hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3d851cd9c71be09e0a6c57c8a32fceaf1783d2 | 12,793 | py | Python | structuretimers/admin.py | buahaha/aa-structuretimers | fbc2752d442795c0803aa419a58ebd1cfd33c66d | [
"MIT"
] | null | null | null | structuretimers/admin.py | buahaha/aa-structuretimers | fbc2752d442795c0803aa419a58ebd1cfd33c66d | [
"MIT"
] | null | null | null | structuretimers/admin.py | buahaha/aa-structuretimers | fbc2752d442795c0803aa419a58ebd1cfd33c66d | [
"MIT"
] | null | null | null | from typing import Any, Dict, Optional
from django import forms
from django.contrib import admin
from django.core.exceptions import ValidationError
from django.db.models.functions import Lower
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from allianceauth.eveonline.models import EveAllianceInfo, EveCorporationInfo
from . import tasks
from .models import (
DiscordWebhook,
NotificationRule,
ScheduledNotification,
StagingSystem,
Timer,
)
@admin.register(DiscordWebhook)
@admin.register(NotificationRule)
@admin.register(ScheduledNotification)
# @admin.register(Timer)
# class TimerAdmin(admin.ModelAdmin):
# list_select_related = ("eve_solar_system", "structure_type", "user")
# list_filter = (
# "timer_type",
# ("eve_solar_system", admin.RelatedOnlyFieldListFilter),
# ("structure_type", admin.RelatedOnlyFieldListFilter),
# "objective",
# "owner_name",
# ("user", admin.RelatedOnlyFieldListFilter),
# "is_opsec",
# )
# ordering = ("-date",)
# autocomplete_fields = ["eve_solar_system", "structure_type"]
# """
# def _scheduled_notifications(self, obj):
# return sorted(
# [
# x["notification_date"].strftime(DATETIME_FORMAT)
# for x in ScheduledNotification.objects.filter(
# timer=obj, notification_date__gt=now()
# ).values("notification_date", "notification_rule_id")
# ]
# )
# """
# actions = ["send_test_notification"]
# def send_test_notification(self, request, queryset):
# for timer in queryset:
# for webhook in DiscordWebhook.objects.filter(is_enabled=True):
# timer.send_notification(
# webhook=webhook,
# content=f"Test notification sent by **{request.user}**",
# )
# self.message_user(
# request, f"Initiated sending test notification for timer: {timer}"
# )
# for webhook in DiscordWebhook.objects.filter(is_enabled=True):
# tasks.send_messages_for_webhook.delay(webhook.pk)
# send_test_notification.short_description = (
# "Send test notification for selected timers to all enabled webhooks"
# )
@admin.register(StagingSystem)
| 34.575676 | 88 | 0.60197 | from typing import Any, Dict, Optional
from django import forms
from django.contrib import admin
from django.core.exceptions import ValidationError
from django.db.models.functions import Lower
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from allianceauth.eveonline.models import EveAllianceInfo, EveCorporationInfo
from . import tasks
from .models import (
DiscordWebhook,
NotificationRule,
ScheduledNotification,
StagingSystem,
Timer,
)
@admin.register(DiscordWebhook)
class DiscordWebhookAdmin(admin.ModelAdmin):
list_display = ("name", "is_enabled", "_messages_in_queue")
list_filter = ("is_enabled",)
ordering = ("name",)
def _messages_in_queue(self, obj):
return obj.queue_size()
actions = ["send_test_message", "purge_messages"]
def purge_messages(self, request, queryset):
actions_count = 0
killmails_deleted = 0
for webhook in queryset:
killmails_deleted += webhook.clear_queue()
actions_count += 1
self.message_user(
request,
f"Purged queued messages for {actions_count} webhooks, "
f"deleting a total of {killmails_deleted} messages.",
)
purge_messages.short_description = "Purge queued messages of selected webhooks"
def send_test_message(self, request, queryset):
actions_count = 0
for webhook in queryset:
tasks.send_test_message_to_webhook.delay(webhook.pk, request.user.pk)
actions_count += 1
self.message_user(
request,
f"Initiated sending of {actions_count} test messages to "
f"selected webhooks. You will receive a notification with the result.",
)
send_test_message.short_description = "Send test message to selected webhooks"
def field_nice_display(name: str) -> str:
return name.replace("_", " ").capitalize()
class NotificationRuleAdminForm(forms.ModelForm):
def clean(self) -> Dict[str, Any]:
cleaned_data = super().clean()
self._validate_not_same_options_chosen(
cleaned_data,
"require_timer_types",
"exclude_timer_types",
lambda x: NotificationRule.get_multiselect_display(x, Timer.Type.choices),
)
self._validate_not_same_options_chosen(
cleaned_data,
"require_objectives",
"exclude_objectives",
lambda x: NotificationRule.get_multiselect_display(
x, Timer.Objective.choices
),
)
self._validate_not_same_options_chosen(
cleaned_data,
"require_visibility",
"exclude_visibility",
lambda x: NotificationRule.get_multiselect_display(
x, Timer.Visibility.choices
),
)
self._validate_not_same_options_chosen(
cleaned_data,
"require_corporations",
"exclude_corporations",
)
self._validate_not_same_options_chosen(
cleaned_data,
"require_alliances",
"exclude_alliances",
)
if (
cleaned_data["trigger"] == NotificationRule.Trigger.SCHEDULED_TIME_REACHED
and cleaned_data["scheduled_time"] is None
):
raise ValidationError(
{
"scheduled_time": (
"You need to specify scheduled time for "
"the `Scheduled time reached` trigger"
)
}
)
if cleaned_data["trigger"] == NotificationRule.Trigger.NEW_TIMER_CREATED:
cleaned_data["scheduled_time"] = None
return cleaned_data
@staticmethod
def _validate_not_same_options_chosen(
cleaned_data, field_name_1, field_name_2, display_func=lambda x: x
) -> None:
same_options = set(cleaned_data[field_name_1]).intersection(
set(cleaned_data[field_name_2])
)
if same_options:
same_options_text = ", ".join(
map(
str,
[display_func(x) for x in same_options],
)
)
raise ValidationError(
f"Can not choose same options for {field_nice_display(field_name_1)} "
f"& {field_nice_display(field_name_2)}: {same_options_text}"
)
@admin.register(NotificationRule)
class NotificationRuleAdmin(admin.ModelAdmin):
form = NotificationRuleAdminForm
list_display = (
"id",
"is_enabled",
"trigger",
"_time",
"webhook",
"ping_type",
"_timer_clauses",
)
list_filter = ("is_enabled", "trigger")
ordering = ("id",)
def _time(self, obj) -> Optional[str]:
if obj.scheduled_time is None:
return None
else:
return obj.get_scheduled_time_display()
_time.admin_order_field = "scheduled time"
def _timer_clauses(self, obj) -> list:
clauses = list()
for field, func, choices in [
("require_timer_types", self._add_to_clauses_1, Timer.Type.choices),
("exclude_timer_types", self._add_to_clauses_1, Timer.Type.choices),
("require_objectives", self._add_to_clauses_1, Timer.Objective.choices),
("exclude_objectives", self._add_to_clauses_1, Timer.Objective.choices),
("require_visibility", self._add_to_clauses_1, Timer.Visibility.choices),
("exclude_visibility", self._add_to_clauses_1, Timer.Visibility.choices),
("require_corporations", self._add_to_clauses_2, None),
("exclude_corporations", self._add_to_clauses_2, None),
("require_alliances", self._add_to_clauses_2, None),
("exclude_alliances", self._add_to_clauses_2, None),
("is_important", self._add_to_clauses_3, None),
("is_opsec", self._add_to_clauses_3, None),
]:
func(clauses, obj, field, choices)
return mark_safe("<br>".join(clauses)) if clauses else None
def _add_to_clauses_1(self, clauses, obj, field, choices):
if getattr(obj, field):
text = ", ".join(
map(
str,
[
NotificationRule.get_multiselect_display(x, choices)
for x in getattr(obj, field)
],
)
)
self._append_field_to_clauses(clauses, field, text)
def _add_to_clauses_2(self, clauses, obj, field, choices=None):
if getattr(obj, field).count() > 0:
text = ", ".join(map(str, getattr(obj, field).all()))
self._append_field_to_clauses(clauses, field, text)
def _add_to_clauses_3(self, clauses, obj, field, choices=None):
if getattr(obj, field) != NotificationRule.Clause.ANY:
text = getattr(obj, f"get_{field}_display")()
self._append_field_to_clauses(clauses, field, text)
def _append_field_to_clauses(self, clauses, field, text):
clauses.append(f"{field_nice_display(field)} = {text}")
actions = ["enable_rule", "disable_rule"]
def enable_rule(self, request, queryset):
queryset.update(is_enabled=True)
self.message_user(request, f"Enabled {queryset.count()} notification rules.")
enable_rule.short_description = "Enable selected notification rules"
def disable_rule(self, request, queryset):
queryset.update(is_enabled=False)
self.message_user(request, f"Disabled {queryset.count()} notification rules.")
disable_rule.short_description = "Disable selected notification rules"
filter_horizontal = (
"require_alliances",
"exclude_alliances",
"require_corporations",
"exclude_corporations",
)
fieldsets = (
(
None,
{
"fields": (
"trigger",
"scheduled_time",
"webhook",
"ping_type",
"is_enabled",
)
},
),
(
"Timer clauses",
{
"classes": ("collapse",),
"fields": (
"require_timer_types",
"exclude_timer_types",
"require_objectives",
"exclude_objectives",
"require_corporations",
"exclude_corporations",
"require_alliances",
"exclude_alliances",
"require_visibility",
"exclude_visibility",
"is_important",
"is_opsec",
),
},
),
)
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""overriding this formfield to have sorted lists in the form"""
if db_field.name in {"require_alliances", "exclude_alliances"}:
kwargs["queryset"] = EveAllianceInfo.objects.order_by(
Lower("alliance_name")
)
elif db_field.name in {"require_corporations", "exclude_corporations"}:
kwargs["queryset"] = EveCorporationInfo.objects.order_by(
Lower("corporation_name")
)
return super().formfield_for_manytomany(db_field, request, **kwargs)
@admin.register(ScheduledNotification)
class ScheduledNotificationAdmin(admin.ModelAdmin):
list_select_related = ("timer", "notification_rule")
list_display = ("notification_date", "timer", "notification_rule", "celery_task_id")
list_filter = ("notification_rule",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.filter(notification_date__gt=now()).order_by("notification_date")
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
# @admin.register(Timer)
# class TimerAdmin(admin.ModelAdmin):
# list_select_related = ("eve_solar_system", "structure_type", "user")
# list_filter = (
# "timer_type",
# ("eve_solar_system", admin.RelatedOnlyFieldListFilter),
# ("structure_type", admin.RelatedOnlyFieldListFilter),
# "objective",
# "owner_name",
# ("user", admin.RelatedOnlyFieldListFilter),
# "is_opsec",
# )
# ordering = ("-date",)
# autocomplete_fields = ["eve_solar_system", "structure_type"]
# """
# def _scheduled_notifications(self, obj):
# return sorted(
# [
# x["notification_date"].strftime(DATETIME_FORMAT)
# for x in ScheduledNotification.objects.filter(
# timer=obj, notification_date__gt=now()
# ).values("notification_date", "notification_rule_id")
# ]
# )
# """
# actions = ["send_test_notification"]
# def send_test_notification(self, request, queryset):
# for timer in queryset:
# for webhook in DiscordWebhook.objects.filter(is_enabled=True):
# timer.send_notification(
# webhook=webhook,
# content=f"Test notification sent by **{request.user}**",
# )
# self.message_user(
# request, f"Initiated sending test notification for timer: {timer}"
# )
# for webhook in DiscordWebhook.objects.filter(is_enabled=True):
# tasks.send_messages_for_webhook.delay(webhook.pk)
# send_test_notification.short_description = (
# "Send test notification for selected timers to all enabled webhooks"
# )
@admin.register(StagingSystem)
class StagingSystemAdmin(admin.ModelAdmin):
list_display = ("eve_solar_system", "_region", "is_main")
list_select_related = (
"eve_solar_system",
"eve_solar_system__eve_constellation__eve_region",
)
autocomplete_fields = ["eve_solar_system"]
ordering = ("eve_solar_system__name",)
def _region(self, obj) -> str:
return obj.eve_solar_system.eve_constellation.eve_region.name
_region.admin_order_field = "eve_solar_system__eve_constellation__eve_region"
actions = ["_recalc_timers"]
def _recalc_timers(self, request, queryset):
for obj in queryset:
tasks.calc_staging_system.delay(obj.pk, force_update=True)
self.message_user(
request, f"{obj}: Started to update timers for staging system..."
)
_recalc_timers.short_description = "Recalc timers for selected staging system"
| 6,368 | 3,894 | 134 |
142371552bb211f737bd8dc3be97787cf8599854 | 476 | py | Python | app/models/base.py | Chaoyingz/paper_trading | cd3af81c932e8f4b1586f2b9bf86b5b252bec896 | [
"MIT"
] | null | null | null | app/models/base.py | Chaoyingz/paper_trading | cd3af81c932e8f4b1586f2b9bf86b5b252bec896 | [
"MIT"
] | null | null | null | app/models/base.py | Chaoyingz/paper_trading | cd3af81c932e8f4b1586f2b9bf86b5b252bec896 | [
"MIT"
] | null | null | null | from datetime import datetime, timezone
from bson import ObjectId
from pydantic import BaseModel, Field
from app.models.types import PyObjectId
| 23.8 | 65 | 0.792017 | from datetime import datetime, timezone
from bson import ObjectId
from pydantic import BaseModel, Field
from app.models.types import PyObjectId
def get_utc_now():
return datetime.now(timezone.utc)
class DateTimeModelMixin(BaseModel):
created_at: datetime = Field(default_factory=get_utc_now)
updated_at: datetime = Field(default_factory=get_utc_now)
class DBModelMixin(DateTimeModelMixin):
id: PyObjectId = Field(default_factory=ObjectId, alias="_id")
| 35 | 223 | 69 |
5354bf2eb0b6402c52bb66eebb35355e3a2c29a4 | 357 | py | Python | script.py | f1amingo/logparser | 65f077a78a974a50e0fff792257fb6fea0a86821 | [
"MIT"
] | null | null | null | script.py | f1amingo/logparser | 65f077a78a974a50e0fff792257fb6fea0a86821 | [
"MIT"
] | null | null | null | script.py | f1amingo/logparser | 65f077a78a974a50e0fff792257fb6fea0a86821 | [
"MIT"
] | null | null | null | from logparser.ADC.ADC_Fast import log_split, log_similarity
# '<$>()<-1> : getImeiV2 memory:868404020067521'
tem = ['<$>', '(', '', ')', '<-1>', ' ', '', ':', '', ' ', '<$>', '<$>', '<$>', '<$>', '<$>']
log = ['<$>', '(', '', ')', '<-1>', ' ', '', ':', '', ' ', 'getImeiV2', ' ', 'memory', ':', '868404020067521']
a = log_similarity(tem, log, 5)
print(a)
| 44.625 | 110 | 0.420168 | from logparser.ADC.ADC_Fast import log_split, log_similarity
# '<$>()<-1> : getImeiV2 memory:868404020067521'
tem = ['<$>', '(', '', ')', '<-1>', ' ', '', ':', '', ' ', '<$>', '<$>', '<$>', '<$>', '<$>']
log = ['<$>', '(', '', ')', '<-1>', ' ', '', ':', '', ' ', 'getImeiV2', ' ', 'memory', ':', '868404020067521']
a = log_similarity(tem, log, 5)
print(a)
| 0 | 0 | 0 |
16eca7bda6e12e4d2913a6a98b2a3352ed2ed698 | 386 | py | Python | experiments/chatterbot.py | wmodes/chickenrobot | b1a903f48a667a295a7be5c026ededb6f20ade36 | [
"MIT"
] | null | null | null | experiments/chatterbot.py | wmodes/chickenrobot | b1a903f48a667a295a7be5c026ededb6f20ade36 | [
"MIT"
] | null | null | null | experiments/chatterbot.py | wmodes/chickenrobot | b1a903f48a667a295a7be5c026ededb6f20ade36 | [
"MIT"
] | null | null | null | from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
chatbot = ChatBot('Ron Obvious')
# Create a new trainer for the chatbot
trainer = ChatterBotCorpusTrainer(chatbot)
# Train the chatbot based on the english corpus
trainer.train("chatterbot.corpus.english")
# Get a response to an input statement
chatbot.get_response("Hello, how are you today?")
| 27.571429 | 55 | 0.803109 | from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
chatbot = ChatBot('Ron Obvious')
# Create a new trainer for the chatbot
trainer = ChatterBotCorpusTrainer(chatbot)
# Train the chatbot based on the english corpus
trainer.train("chatterbot.corpus.english")
# Get a response to an input statement
chatbot.get_response("Hello, how are you today?")
| 0 | 0 | 0 |
aba10f8d8eff32b9423a552b3f2ba0b280c6670c | 236 | py | Python | drlgeb/example.py | mikuh/drlgeb | 5b70834fba6c550f319ea202a691394c2e99e8b5 | [
"MIT"
] | null | null | null | drlgeb/example.py | mikuh/drlgeb | 5b70834fba6c550f319ea202a691394c2e99e8b5 | [
"MIT"
] | null | null | null | drlgeb/example.py | mikuh/drlgeb | 5b70834fba6c550f319ea202a691394c2e99e8b5 | [
"MIT"
] | null | null | null | from drlgeb.ac import A3C
if __name__ == '__main__':
env_id = "SpaceInvaders-v0"
agent = A3C(env_id=env_id)
# train
agent.learn()
# test
model_path = "..."
agent.play(episodes=5, model_path=model_path)
| 14.75 | 49 | 0.622881 | from drlgeb.ac import A3C
if __name__ == '__main__':
env_id = "SpaceInvaders-v0"
agent = A3C(env_id=env_id)
# train
agent.learn()
# test
model_path = "..."
agent.play(episodes=5, model_path=model_path)
| 0 | 0 | 0 |
6a63c85a82799a6fc6dd7884c13a0a1e90d300b1 | 2,406 | py | Python | horarios/migrations/0002_auto__chg_field_subject_name.py | xyos/horarios | f77cdcb3e9865389c4cb0cba8a41c087bffc88eb | [
"MIT"
] | 2 | 2015-01-04T17:20:58.000Z | 2016-01-08T17:20:47.000Z | horarios/migrations/0002_auto__chg_field_subject_name.py | xyos/horarios | f77cdcb3e9865389c4cb0cba8a41c087bffc88eb | [
"MIT"
] | 8 | 2015-01-08T18:36:04.000Z | 2015-05-25T02:44:26.000Z | horarios/migrations/0002_auto__chg_field_subject_name.py | xyos/horarios | f77cdcb3e9865389c4cb0cba8a41c087bffc88eb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| 48.12 | 146 | 0.57606 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Subject.name'
db.alter_column(u'horarios_subject', 'name', self.gf('django.db.models.fields.CharField')(max_length=120))
def backwards(self, orm):
# Changing field 'Subject.name'
db.alter_column(u'horarios_subject', 'name', self.gf('django.db.models.fields.CharField')(max_length=100))
models = {
u'horarios.group': {
'Meta': {'object_name': 'Group'},
'code': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'professions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['horarios.Profession']", 'symmetrical': 'False'}),
'schedule': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '100'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['horarios.Subject']"}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['horarios.Teacher']"})
},
u'horarios.profession': {
'Meta': {'object_name': 'Profession'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'horarios.subject': {
'Meta': {'object_name': 'Subject'},
'code': ('django.db.models.fields.IntegerField', [], {}),
'credits': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'stype': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'horarios.teacher': {
'Meta': {'object_name': 'Teacher'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['horarios'] | 319 | 1,899 | 23 |
546df6e9ef21d349caa0a7c75b2aa1e5f16fd0b2 | 223 | py | Python | data/masif_site/nn_models/all_feat_1l/custom_params.py | NBDsoftware/masif | 2a370518e0d0d0b0d6f153f2f10f6630ae91f149 | [
"Apache-2.0"
] | 309 | 2019-04-11T20:20:12.000Z | 2022-03-31T16:32:17.000Z | data/masif_site/nn_models/all_feat_1l/custom_params.py | NBDsoftware/masif | 2a370518e0d0d0b0d6f153f2f10f6630ae91f149 | [
"Apache-2.0"
] | 41 | 2019-03-31T06:44:46.000Z | 2022-03-13T16:08:56.000Z | data/masif_site/nn_models/all_feat_1l/custom_params.py | NBDsoftware/masif | 2a370518e0d0d0b0d6f153f2f10f6630ae91f149 | [
"Apache-2.0"
] | 90 | 2019-04-20T11:06:11.000Z | 2022-03-24T16:22:22.000Z | custom_params = {}
custom_params['model_dir'] = 'nn_models/all_feat_1l/model_data/'
custom_params['out_dir'] = 'output/all_feat_1l/'
custom_params['feat_mask'] = [1.0, 1.0, 1.0, 1.0, 1.0]
custom_params['n_conv_layers'] = 1
| 37.166667 | 64 | 0.721973 | custom_params = {}
custom_params['model_dir'] = 'nn_models/all_feat_1l/model_data/'
custom_params['out_dir'] = 'output/all_feat_1l/'
custom_params['feat_mask'] = [1.0, 1.0, 1.0, 1.0, 1.0]
custom_params['n_conv_layers'] = 1
| 0 | 0 | 0 |
0b82ddb45eaa0f0a0ba412992da31cf583976f8a | 2,433 | py | Python | pynini/examples/g2p.py | Freddy-pp/pynini | 12587a4a3056931640dc741526225a0a5f02ca2f | [
"Apache-2.0"
] | 62 | 2019-02-16T17:21:15.000Z | 2022-03-25T04:50:58.000Z | pynini/examples/g2p.py | Freddy-pp/pynini | 12587a4a3056931640dc741526225a0a5f02ca2f | [
"Apache-2.0"
] | 44 | 2019-02-07T13:47:22.000Z | 2022-02-04T14:46:33.000Z | pynini/examples/g2p.py | kylebgorman/pynini | a573bb49a4f307f5e920570e45d517065bd2b7cc | [
"Apache-2.0"
] | 9 | 2019-05-06T09:09:36.000Z | 2022-01-26T16:18:38.000Z | # Copyright 2016-2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For general information on the Pynini grammar compilation library, see
# pynini.opengrm.org.
"""Sketch of Spanish grapheme-to-phoneme conversion.
The dialect transcribed is roughly standard Mexican Spanish.
"""
import pynini
from pynini.lib import pynutil
from pynini.lib import rewrite
# Inventories.
_g = pynini.union("a", "á", "b", "c", "d", "e", "é", "f", "g", "h", "i", "í",
"j", "k", "l", "m", "n", "ñ", "o", "ó", "p", "q", "r", "s",
"t", "u", "ú", "ü", "v", "w", "x", "y", "z")
_p = pynini.union("a", "b", "d", "e", "f", "g", "i", "j", "k", "l", "ʝ", "m",
"n", "ɲ", "o", "p", "r", "ɾ", "s", "ʃ", "t", "u", "w", "x",
"z")
_sigma_star = pynini.union(_g, _p).closure().optimize()
# Rules.
_r1 = pynini.cdrewrite(
pynini.string_map([
("ch", "tʃ"),
("ll", "ʝ"),
("qu", "k"),
("j", "x"),
("ñ", "ɲ"),
("v", "b"),
("x", "s"),
("y", "j"),
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
("ü", "w"),
]),
"",
"",
_sigma_star,
).optimize()
_r2 = pynini.cdrewrite(pynutil.delete("h"), "", "", _sigma_star).optimize()
_v = pynini.union("a", "e", "i", "o", "u")
_r3 = pynini.cdrewrite(pynini.cross("r", "ɾ"), _v, _v, _sigma_star).optimize()
_r4 = pynini.cdrewrite(pynini.cross("rr", "r"), "", "", _sigma_star).optimize()
_r5 = pynini.cdrewrite(
pynini.string_map([("c", "s"), ("g", "x")]), "", pynini.union("i", "e"),
_sigma_star).optimize()
_r6 = pynini.cdrewrite(pynini.cross("c", "k"), "", "", _sigma_star).optimize()
_rules = _r1 @ _r2 @ _r3 @ _r4 @ _r5 @ _r6
_g2p = pynini.closure(_g) @ _rules @ pynini.closure(_p)
_g2p.optimize()
# Functions.
| 32.013158 | 79 | 0.549527 | # Copyright 2016-2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For general information on the Pynini grammar compilation library, see
# pynini.opengrm.org.
"""Sketch of Spanish grapheme-to-phoneme conversion.
The dialect transcribed is roughly standard Mexican Spanish.
"""
import pynini
from pynini.lib import pynutil
from pynini.lib import rewrite
# Inventories.
_g = pynini.union("a", "á", "b", "c", "d", "e", "é", "f", "g", "h", "i", "í",
"j", "k", "l", "m", "n", "ñ", "o", "ó", "p", "q", "r", "s",
"t", "u", "ú", "ü", "v", "w", "x", "y", "z")
_p = pynini.union("a", "b", "d", "e", "f", "g", "i", "j", "k", "l", "ʝ", "m",
"n", "ɲ", "o", "p", "r", "ɾ", "s", "ʃ", "t", "u", "w", "x",
"z")
_sigma_star = pynini.union(_g, _p).closure().optimize()
# Rules.
_r1 = pynini.cdrewrite(
pynini.string_map([
("ch", "tʃ"),
("ll", "ʝ"),
("qu", "k"),
("j", "x"),
("ñ", "ɲ"),
("v", "b"),
("x", "s"),
("y", "j"),
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
("ü", "w"),
]),
"",
"",
_sigma_star,
).optimize()
_r2 = pynini.cdrewrite(pynutil.delete("h"), "", "", _sigma_star).optimize()
_v = pynini.union("a", "e", "i", "o", "u")
_r3 = pynini.cdrewrite(pynini.cross("r", "ɾ"), _v, _v, _sigma_star).optimize()
_r4 = pynini.cdrewrite(pynini.cross("rr", "r"), "", "", _sigma_star).optimize()
_r5 = pynini.cdrewrite(
pynini.string_map([("c", "s"), ("g", "x")]), "", pynini.union("i", "e"),
_sigma_star).optimize()
_r6 = pynini.cdrewrite(pynini.cross("c", "k"), "", "", _sigma_star).optimize()
_rules = _r1 @ _r2 @ _r3 @ _r4 @ _r5 @ _r6
_g2p = pynini.closure(_g) @ _rules @ pynini.closure(_p)
_g2p.optimize()
# Functions.
def g2p(string: str) -> str:
return rewrite.one_top_rewrite(string, _g2p)
| 54 | 0 | 23 |
98f64fad3e877fc69d88e7f67a0d9ef1252d5d20 | 1,100 | py | Python | skompiler/toskast/sklearn/linear_model/base.py | odinsemvosem/SKompiler | e46264796c8695497f43f6653688f5bcdbc0cfae | [
"MIT"
] | 112 | 2018-12-12T03:54:28.000Z | 2022-01-14T14:18:42.000Z | skompiler/toskast/sklearn/linear_model/base.py | odinsemvosem/SKompiler | e46264796c8695497f43f6653688f5bcdbc0cfae | [
"MIT"
] | 10 | 2018-12-20T17:21:09.000Z | 2022-03-24T19:31:55.000Z | skompiler/toskast/sklearn/linear_model/base.py | odinsemvosem/SKompiler | e46264796c8695497f43f6653688f5bcdbc0cfae | [
"MIT"
] | 7 | 2019-02-05T05:20:05.000Z | 2021-03-21T16:31:38.000Z | """
SKLearn linear model to SKAST.
"""
from skompiler.dsl import const
def linear_model(coef, intercept, inputs):
"""
Linear regression.
Depending on the shape of the coef and intercept, produces either a single-valued
linear model (w @ x + b) or a multi-valued one (M @ x + b_vec)
Args:
coef (np.array): A vector (1D array, for single-valued model) or a matrix (2D array, for multi-valued one) for the model.
intercept: a number (for single-valued) or a 1D array (for multi-valued regression).
inputs: a list of AST nodes to be used as the input vector to the model or a single node, corresponding to a vector.
"""
single_valued = (coef.ndim == 1)
if single_valued and hasattr(intercept, '__iter__'):
raise ValueError("Single-valued linear model must have a single value for the intercept")
elif not single_valued and (coef.ndim != 2 or intercept.ndim != 1):
raise ValueError("Multi-valued linear model must have a 2D coefficient matrix and a 1D intercept vector")
return const(coef) @ inputs + const(intercept)
| 42.307692 | 129 | 0.691818 | """
SKLearn linear model to SKAST.
"""
from skompiler.dsl import const
def linear_model(coef, intercept, inputs):
"""
Linear regression.
Depending on the shape of the coef and intercept, produces either a single-valued
linear model (w @ x + b) or a multi-valued one (M @ x + b_vec)
Args:
coef (np.array): A vector (1D array, for single-valued model) or a matrix (2D array, for multi-valued one) for the model.
intercept: a number (for single-valued) or a 1D array (for multi-valued regression).
inputs: a list of AST nodes to be used as the input vector to the model or a single node, corresponding to a vector.
"""
single_valued = (coef.ndim == 1)
if single_valued and hasattr(intercept, '__iter__'):
raise ValueError("Single-valued linear model must have a single value for the intercept")
elif not single_valued and (coef.ndim != 2 or intercept.ndim != 1):
raise ValueError("Multi-valued linear model must have a 2D coefficient matrix and a 1D intercept vector")
return const(coef) @ inputs + const(intercept)
| 0 | 0 | 0 |
cb9c843981e48fb47efbd0dce57789a730182e3a | 55 | py | Python | howl/roomsensor/__init__.py | volzotan/django-howl | 3b11c530da95d152844934da09592619b3d4497f | [
"MIT"
] | null | null | null | howl/roomsensor/__init__.py | volzotan/django-howl | 3b11c530da95d152844934da09592619b3d4497f | [
"MIT"
] | null | null | null | howl/roomsensor/__init__.py | volzotan/django-howl | 3b11c530da95d152844934da09592619b3d4497f | [
"MIT"
] | null | null | null | default_app_config = 'roomsensor.apps.RoomsensorConfig' | 55 | 55 | 0.872727 | default_app_config = 'roomsensor.apps.RoomsensorConfig' | 0 | 0 | 0 |
21e507e7ab4bcd8fb87c931f3aa580cb59331a62 | 1,553 | py | Python | src/TASK_train_1dCNN_ER_EC/model_cnn1d_cmu_classification_stage2.py | haoqi/emotions_as_primitives_towards_behavior_understanding | 5d82bb0265e585da1cd0144bb93b28dc5cb0e710 | [
"0BSD"
] | 2 | 2020-08-13T18:26:46.000Z | 2021-04-07T18:58:48.000Z | src/TASK_train_1dCNN_ER_EC/model_cnn1d_cmu_classification_stage2.py | haoqi/emotions_as_primitives_towards_behavior_understanding | 5d82bb0265e585da1cd0144bb93b28dc5cb0e710 | [
"0BSD"
] | null | null | null | src/TASK_train_1dCNN_ER_EC/model_cnn1d_cmu_classification_stage2.py | haoqi/emotions_as_primitives_towards_behavior_understanding | 5d82bb0265e585da1cd0144bb93b28dc5cb0e710 | [
"0BSD"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 21:10:20 2018
@author: haoqi
"""
import os
import torch
import torch.nn as nn
class Classification_Base_1D_NN_fixed_seq_len_1s_majvote_v2(nn.Module):
'''
'''
| 31.06 | 110 | 0.509981 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 21:10:20 2018
@author: haoqi
"""
import os
import torch
import torch.nn as nn
class Classification_Base_1D_NN_fixed_seq_len_1s_majvote_v2(nn.Module):
'''
'''
def __init__(self, in_channels_num):
super(Classification_Base_1D_NN_fixed_seq_len_1s_majvote_v2, self).__init__()
self.num_out_beh = 6
self.cnn_1d=nn.Sequential(
nn.Conv1d(in_channels_num, 96, kernel_size=10, stride=2, padding=0), # input_len=1*100, out 46
nn.ReLU(),
nn.Conv1d(96, 96, kernel_size=5, stride=2, padding=0), # in 46, out 21
nn.ReLU(),
nn.Conv1d(96, 96, kernel_size=5, stride=2, padding=0), # in 21, out 9
nn.ReLU(),
nn.Conv1d(96, 128, kernel_size=3, stride=2, padding=0),# in 9, out 4
nn.ReLU(),
nn.AdaptiveMaxPool1d(1)
)
self.out1 = nn.Sequential(
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU()
)
self.out2 = nn.Sequential(
nn.Linear(128,64),
nn.PReLU(),
nn.Linear(64, 64),
nn.PReLU(),
nn.Linear(64, 2)
)
def forward(self, x_input):
x = self.cnn_1d(x_input)
x = x.view(x.size(0), -1)
output1 = self.out1(x)
output = self.out2(output1)
return output
| 1,258 | 0 | 52 |
ad5aac441f52450806160ebb3f673743c409340e | 18,905 | py | Python | btcde.py | rundekugel/btcde | f18bc844a92d8dc932332a2d9b479487b158c9d1 | [
"MIT"
] | null | null | null | btcde.py | rundekugel/btcde | f18bc844a92d8dc932332a2d9b479487b158c9d1 | [
"MIT"
] | null | null | null | btcde.py | rundekugel/btcde | f18bc844a92d8dc932332a2d9b479487b158c9d1 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""API Wrapper for Bitcoin.de Trading API."""
import requests
import time
import json
import hmac
import hashlib
import logging
import codecs
import decimal
import inspect
import urllib
from urllib.parse import urlencode
logging.basicConfig()
log = logging.getLogger(__name__)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.propagate = True
__version__ = '4.0'
class ParameterBuilder(object):
'''To verify given parameters for API.'''
TRADING_PAIRS = ['btceur', 'bcheur', 'etheur', 'btgeur', 'bsveur', 'ltceur',
'iotabtc', 'dashbtc', 'gntbtc', 'ltcbtc']
ORDER_TYPES = ['buy', 'sell']
CURRENCIES = ['btc', 'bch', 'eth', 'btg', 'bsv', 'ltc',
'iota', 'dash', 'gnt']
BANK_SEATS = ['AT', 'BE', 'BG', 'CH', 'CY', 'CZ',
'DE', 'DK', 'EE', 'ES', 'FI', 'FR',
'GB', 'GR', 'HR', 'HU', 'IE', 'IS',
'IT', 'LI', 'LT', 'LU', 'LV', 'MT',
'MQ', 'NL', 'NO', 'PL', 'PT', 'RO',
'SE', 'SI', 'SK']
TRUST_LEVELS = ['bronze', 'silver', 'gold', 'platin']
TRADE_STATES = [-1, 0, 1]
ORDER_STATES = [-2, -1, 0]
PAYMENT_OPTIONS = [1, 2, 3]
TRADE_TYPES = ['all', 'buy', 'sell', 'inpayment',
'payout', 'affiliate', 'welcome_btc',
'buy_yubikey', 'buy_goldshop',
'buy_diamondshop', 'kickback',
'outgoing_fee_voluntary']
def HandleRequestsException(e):
"""Handle Exception from request."""
log.warning(e)
def HandleAPIErrors(r):
"""To handle Errors from BTCDE API."""
valid_status_codes = [200, 201, 204]
if r.status_code not in valid_status_codes:
content = r.json()
errors = content.get('errors')
log.warning('API Error Code: {}'.format(str(errors[0]['code'])))
log.warning('API Error Message: {}'.format(errors[0]['message']))
log.warning('API Error URL: {}'.format(r.url))
return False
else:
return True
class Connection(object):
"""To provide connection credentials to the trading API"""
def APIConnect(self, method, params):
"""Transform Parameters to URL"""
header = self.set_header(params.url, method,
params.encoded_string)
log.debug('Set Header: {}'.format(header))
try:
r = self.send_request(params.url, method, header,
params.encoded_string)
# Handle API Errors
if HandleAPIErrors(r):
# get results
result = r.json(parse_float=decimal.Decimal)
else:
result = {}
except requests.exceptions.RequestException as e:
HandleRequestsException(e)
result = {}
return result
def addToAddressPool(self, currency, address, **args):
"""Add address to pool"""
uri = f'{self.apibase}{currency}/address'
params = {'address': address}
params.update(args)
avail_params = ['address', 'amount_usages', 'comment']
p = ParameterBuilder(avail_params, params, uri)
return self.APIConnect('POST', p)
def removeFromAddressPool(self, currency, address):
"""Remove address from pool"""
uri = f'{self.apibase}{currency}/address/{address}'
params = {'currency': currency, 'address': address}
avail_params = ['currency', 'address']
p = ParameterBuilder({}, {}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('DELETE', p)
def listAddressPool(self, currency, **args):
"""List address pool"""
uri = f'{self.apibase}{currency}/address'
params = args
avail_params = ['usable', 'comment', 'page']
p = ParameterBuilder(avail_params, params, uri)
return self.APIConnect('GET', p)
def showOrderbook(self, order_type, trading_pair, **args):
"""Search Orderbook for offers."""
uri = f'{self.apibase}{trading_pair}/orderbook'
params = {'type': order_type}
params.update(args)
avail_params = ['type', 'trading_pair', 'amount_currency_to_trade', 'price',
'order_requirements_fullfilled',
'only_kyc_full', 'only_express_orders', 'payment_option',
'sepa_option', 'only_same_bankgroup', 'only_same_bic',
'seat_of_bank', 'page_size']
p = ParameterBuilder(avail_params, params, uri)
return self.APIConnect('GET', p)
def showOrderDetails(self, trading_pair, order_id):
"""Show details for an offer."""
uri = f'{self.apibase}{trading_pair}/orders/public/details/{order_id}'
params = {'trading_pair': trading_pair, 'order_id': order_id}
avail_params = ['trading_pair', 'order_id']
p = ParameterBuilder({}, {}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('GET', p)
def createOrder(self, order_type, trading_pair, max_amount_currency_to_trade, price, **args):
"""Create a new Order."""
uri = f'{self.apibase}{trading_pair}/orders'
# Build parameters
params = {'type': order_type,
'max_amount_currency_to_trade': max_amount_currency_to_trade,
'price': price}
params.update(args)
avail_params = ['type', 'max_amount_currency_to_trade', 'price',
'min_amount_currency_to_trade', 'end_datetime',
'new_order_for_remaining_amount', 'trading_pair',
'min_trust_level', 'only_kyc_full', 'payment_option',
'sepa_option', 'seat_of_bank']
p = ParameterBuilder(avail_params, params, uri)
p.verify_keys_and_values(avail_params, {'trading_pair': trading_pair})
return self.APIConnect('POST', p)
def deleteOrder(self, order_id, trading_pair):
"""Delete an Order."""
# Build parameters
uri = f'{self.apibase}{trading_pair}/orders/{order_id}'
avail_params = ['order_id', 'trading_pair']
params = { 'order_id': order_id, 'trading_pair': trading_pair}
p = ParameterBuilder({}, {}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('DELETE', p)
def showMyOrders(self, **args):
"""Query and Filter own Orders."""
# Build parameters
params = args
avail_params = ['type', 'trading_pair', 'state',
'date_start', 'date_end', 'page']
if params.get("trading_pair"):
uri = f'{self.apibase}{params["trading_pair"]}/orders'
del params["trading_pair"]
else:
uri = f'{self.apibase}orders'
p = ParameterBuilder(avail_params, params, uri)
return self.APIConnect('GET', p)
def showMyOrderDetails(self, trading_pair, order_id):
"""Details to an own Order."""
uri = f'{self.apibase}{trading_pair}/orders/{order_id}'
p = ParameterBuilder({}, {}, uri)
return self.APIConnect('GET', p)
def executeTrade(self, trading_pair, order_id, order_type, amount):
"""Buy/Sell on a specific Order."""
uri = f'{self.apibase}{trading_pair}/trades/{order_id}'
params = { 'type': order_type,
'amount_currency_to_trade': amount}
avail_params = ['type', 'amount_currency_to_trade']
p = ParameterBuilder(avail_params, params, uri)
return self.APIConnect('POST', p)
def showMyTrades(self, **args):
"""Query and Filter on past Trades."""
# Build parameters
params = args
avail_params = ['type', 'trading_pair', 'state',
'only_trades_with_action_for_payment_or_transfer_required',
'payment_method', 'date_start', 'date_end', 'page']
if params.get("trading_pair"):
uri = f'{self.apibase}{params["trading_pair"]}/trades'
del params["trading_pair"]
else:
uri = f'{self.apibase}trades'
p = ParameterBuilder(avail_params, params, uri)
return self.APIConnect('GET', p)
def showMyTradeDetails(self, trading_pair, trade_id):
"""Details to a specific Trade."""
params = {'trading_pair': trading_pair, 'trade_id': trade_id}
avail_params = [ 'trading_pair', 'trade_id' ]
uri = f'{self.apibase}{trading_pair}/trades/{trade_id}'
p = ParameterBuilder({}, {}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('GET', p)
def markCoinsAsTransferred(self, trading_pair, trade_id, amount_currency_to_trade_after_fee):
"""Mark trade as transferred."""
params = {'amount_currency_to_trade_after_fee': amount_currency_to_trade_after_fee,
'trading_pair': trading_pair, 'trade_id': trade_id}
avail_params = [ 'trading_pair', 'trade_id', 'amount_currency_to_trade_after_fee' ]
uri = f'{self.apibase}{trading_pair}/trades/{trade_id}/mark_coins_as_transferred'
p = ParameterBuilder(avail_params,
{'amount_currency_to_trade_after_fee': amount_currency_to_trade_after_fee}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('POST', p)
def markTradeAsPaid(self, trading_pair, trade_id, volume_currency_to_pay_after_fee):
"""Mark traded as paid."""
params = {'volume_currency_to_pay_after_fee': volume_currency_to_pay_after_fee,
'trading_pair': trading_pair, 'trade_id': trade_id}
avail_params = [ 'trading_pair', 'trade_id', 'volume_currency_to_pay_after_fee' ]
uri = f'{self.apibase}{trading_pair}/trades/{trade_id}/mark_trade_as_paid'
p = ParameterBuilder(avail_params,
{'volume_currency_to_pay_after_fee': volume_currency_to_pay_after_fee}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('POST', p)
def markCoinsAsReceived(self, trading_pair, trade_id, amount_currency_to_trade_after_fee, rating):
"""Mark coins as received."""
params = {'amount_currency_to_trade_after_fee': amount_currency_to_trade_after_fee,
'trading_pair': trading_pair, 'trade_id': trade_id, 'rating': rating}
params_post = {'amount_currency_to_trade_after_fee': amount_currency_to_trade_after_fee,
'rating': rating}
avail_params = [ 'trading_pair', 'trade_id', 'amount_currency_to_trade_after_fee', 'rating' ]
uri = f'{self.apibase}{trading_pair}/trades/{trade_id}/mark_coins_as_received'
p = ParameterBuilder(avail_params, params_post, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('POST', p)
def markTradeAsPaymentReceived(self, trading_pair, trade_id,
volume_currency_to_pay_after_fee, rating,
is_paid_from_correct_bank_account):
"""Mark coins as received."""
params = {'volume_currency_to_pay_after_fee': volume_currency_to_pay_after_fee,
'trading_pair': trading_pair, 'trade_id': trade_id, 'rating': rating}
params_post = {'volume_currency_to_pay_after_fee': volume_currency_to_pay_after_fee,
'rating': rating,
'is_paid_from_correct_bank_account': is_paid_from_correct_bank_account}
avail_params = [ 'trading_pair', 'trade_id', 'volume_currency_to_pay_after_fee',
'rating', 'is_paid_from_correct_bank_account' ]
uri = f'{self.apibase}{trading_pair}/trades/{trade_id}/mark_trade_as_payment_received'
p = ParameterBuilder(avail_params, params_post, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('POST', p)
def addTradeRating(self, trading_pair, trade_id, rating):
"""Mark coins as received."""
params = {'trading_pair': trading_pair, 'trade_id': trade_id, 'rating': rating}
params_post = {'rating': rating}
avail_params = [ 'trading_pair', 'trade_id', 'rating' ]
uri = f'{self.apibase}{trading_pair}/trades/{trade_id}/add_trade_rating'
p = ParameterBuilder(avail_params, params_post, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('POST', p)
def showAccountInfo(self):
"""Query on Account Infos."""
uri = f'{self.apibase}account'
p = ParameterBuilder({}, {}, uri)
return self.APIConnect('GET', p)
def showOrderbookCompact(self, trading_pair):
"""Bids and Asks in compact format."""
params = {'trading_pair': trading_pair}
avail_params = ['trading_pair']
uri = f'{self.apibase}{trading_pair}/orderbook/compact'
# Build parameters
p = ParameterBuilder({}, {}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('GET', p)
def showPublicTradeHistory(self, trading_pair, **args):
"""All successful trades of the last 24 hours."""
params = { 'trading_pair': trading_pair }
params.update(args)
avail_params = ['trading_pair', 'since_tid']
uri = f'{self.apibase}{trading_pair}/trades/history'
if params.get('since_tid'):
del params["trading_pair"]
p = ParameterBuilder(avail_params, params, uri)
else:
p = ParameterBuilder({}, {}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('GET', p)
def showRates(self, trading_pair):
"""Query of the average rate last 3 and 12 hours."""
uri = f'{self.apibase}{trading_pair}/rates'
params = {'trading_pair': trading_pair}
avail_params = ['trading_pair']
# Build parameters
p = ParameterBuilder({}, {}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('GET', p)
def showAccountLedger(self, currency, **args):
"""Query on Account statement."""
params = {'currency': currency}
params.update(args)
uri = f'{self.apibase}{currency}/account/ledger'
avail_params = ['currency', 'type',
'datetime_start', 'datetime_end', 'page']
p = ParameterBuilder(avail_params, params, uri)
del params['currency']
p = ParameterBuilder(avail_params, params, uri)
return self.APIConnect('GET', p)
def showPermissions(self):
"""Show permissions that are allowed for used API key"""
uri = f'{self.apibase}permissions'
p = ParameterBuilder({}, {}, uri)
return self.APIConnect('GET', p) | 44.798578 | 129 | 0.606982 | #! /usr/bin/env python
"""API Wrapper for Bitcoin.de Trading API."""
import requests
import time
import json
import hmac
import hashlib
import logging
import codecs
import decimal
import inspect
import urllib
from urllib.parse import urlencode
logging.basicConfig()
log = logging.getLogger(__name__)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.propagate = True
__version__ = '4.0'
class ParameterBuilder(object):
'''To verify given parameters for API.'''
def __init__(self, avail_params, given_params, uri):
self.verify_keys_and_values(avail_params, given_params)
self.params = given_params
self.create_url(uri)
def verify_keys_and_values(self, avail_params, given_params):
for k, v in given_params.items():
if k not in avail_params:
list_string = ', '.join(avail_params)
raise KeyError("{} is not any of {}".format(k, list_string))
if k == 'trading_pair':
self.error_on_invalid_value(v, self.TRADING_PAIRS)
elif k == 'type':
self.error_on_invalid_value(v, self.TRADE_TYPES)
elif k == 'currency':
self.error_on_invalid_value(v, self.CURRENCIES)
elif k == 'seat_of_bank':
self.error_on_invalid_value(v, self.BANK_SEATS)
elif k in ['min_trust_level', 'trust_level']:
self.error_on_invalid_value(v, self.TRUST_LEVELS)
elif k == 'payment_option':
self.error_on_invalid_value(v, self.PAYMENT_OPTIONS)
elif k == 'state':
caller = inspect.stack()[2][3]
if caller in ["showMyOrders", "showMyOrderDetails"]:
self.error_on_invalid_value(v, self.ORDER_STATES)
elif caller in ["showMyTrades", "showMyTradesDetails"]:
self.error_on_invalid_value(v, self.TRADE_STATES)
def error_on_invalid_value(self, value, list):
if value not in list:
list_string = ', '.join(str(x) for x in list)
raise ValueError("{} is not any of {}".format(value, list_string))
def create_url(self, uri):
if self.params:
self.encoded_string = urlencode(sorted(self.params.items()))
self.url = uri + '?' + self.encoded_string
else:
self.encoded_string = ''
self.url = uri
TRADING_PAIRS = ['btceur', 'bcheur', 'etheur', 'btgeur', 'bsveur', 'ltceur',
'iotabtc', 'dashbtc', 'gntbtc', 'ltcbtc']
ORDER_TYPES = ['buy', 'sell']
CURRENCIES = ['btc', 'bch', 'eth', 'btg', 'bsv', 'ltc',
'iota', 'dash', 'gnt']
BANK_SEATS = ['AT', 'BE', 'BG', 'CH', 'CY', 'CZ',
'DE', 'DK', 'EE', 'ES', 'FI', 'FR',
'GB', 'GR', 'HR', 'HU', 'IE', 'IS',
'IT', 'LI', 'LT', 'LU', 'LV', 'MT',
'MQ', 'NL', 'NO', 'PL', 'PT', 'RO',
'SE', 'SI', 'SK']
TRUST_LEVELS = ['bronze', 'silver', 'gold', 'platin']
TRADE_STATES = [-1, 0, 1]
ORDER_STATES = [-2, -1, 0]
PAYMENT_OPTIONS = [1, 2, 3]
TRADE_TYPES = ['all', 'buy', 'sell', 'inpayment',
'payout', 'affiliate', 'welcome_btc',
'buy_yubikey', 'buy_goldshop',
'buy_diamondshop', 'kickback',
'outgoing_fee_voluntary']
def HandleRequestsException(e):
"""Handle Exception from request."""
log.warning(e)
def HandleAPIErrors(r):
"""To handle Errors from BTCDE API."""
valid_status_codes = [200, 201, 204]
if r.status_code not in valid_status_codes:
content = r.json()
errors = content.get('errors')
log.warning('API Error Code: {}'.format(str(errors[0]['code'])))
log.warning('API Error Message: {}'.format(errors[0]['message']))
log.warning('API Error URL: {}'.format(r.url))
return False
else:
return True
class Connection(object):
"""To provide connection credentials to the trading API"""
def __init__(self, api_key, api_secret):
self.api_key = api_key
self.api_secret = api_secret
# set initial self.nonce
self.nonce = int(time.time() * 1000000)
# Bitcoin.de API URI
self.apihost = 'https://api.bitcoin.de'
self.apiversion = 'v4'
self.apibase = f'{self.apihost}/{self.apiversion}/'
self.verify = True # avoid warnings for ssl-cert
def build_hmac_sign(self, md5string, method, url):
hmac_data = '#'.join([method, url, self.api_key, str(self.nonce), md5string])
hmac_signed = hmac.new(bytearray(self.api_secret.encode()), msg=hmac_data.encode(), digestmod=hashlib.sha256).hexdigest()
return hmac_signed
def set_header(self, url, method, encoded_string):
# raise self.nonce before using
self.nonce = int(time.time() * 1000000)
if method == 'POST':
md5_encoded_query_string = hashlib.md5(encoded_string.encode()).hexdigest()
else:
md5_encoded_query_string = hashlib.md5(b'').hexdigest()
hmac_signed = self.build_hmac_sign(md5_encoded_query_string,
method, url)
# set header
header = {'content-type':
'application/x-www-form-urlencoded; charset=utf-8',
'X-API-KEY': self.api_key,
'X-API-NONCE': str(self.nonce),
'X-API-SIGNATURE': hmac_signed }
return header
def send_request(self, url, method, header, encoded_string, verify=None):
if not verify:
verify=self.verify
if method == 'GET':
r = requests.get(url, headers=(header),
stream=True, verify=verify)
elif method == 'POST':
r = requests.post(url, headers=(header), data=encoded_string,
stream=True, verify=verify)
elif method == 'DELETE':
r = requests.delete(url, headers=(header),
stream=True, verify=verify)
return r
def APIConnect(self, method, params):
"""Transform Parameters to URL"""
header = self.set_header(params.url, method,
params.encoded_string)
log.debug('Set Header: {}'.format(header))
try:
r = self.send_request(params.url, method, header,
params.encoded_string)
# Handle API Errors
if HandleAPIErrors(r):
# get results
result = r.json(parse_float=decimal.Decimal)
else:
result = {}
except requests.exceptions.RequestException as e:
HandleRequestsException(e)
result = {}
return result
def addToAddressPool(self, currency, address, **args):
"""Add address to pool"""
uri = f'{self.apibase}{currency}/address'
params = {'address': address}
params.update(args)
avail_params = ['address', 'amount_usages', 'comment']
p = ParameterBuilder(avail_params, params, uri)
return self.APIConnect('POST', p)
def removeFromAddressPool(self, currency, address):
"""Remove address from pool"""
uri = f'{self.apibase}{currency}/address/{address}'
params = {'currency': currency, 'address': address}
avail_params = ['currency', 'address']
p = ParameterBuilder({}, {}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('DELETE', p)
def listAddressPool(self, currency, **args):
"""List address pool"""
uri = f'{self.apibase}{currency}/address'
params = args
avail_params = ['usable', 'comment', 'page']
p = ParameterBuilder(avail_params, params, uri)
return self.APIConnect('GET', p)
def showOrderbook(self, order_type, trading_pair, **args):
"""Search Orderbook for offers."""
uri = f'{self.apibase}{trading_pair}/orderbook'
params = {'type': order_type}
params.update(args)
avail_params = ['type', 'trading_pair', 'amount_currency_to_trade', 'price',
'order_requirements_fullfilled',
'only_kyc_full', 'only_express_orders', 'payment_option',
'sepa_option', 'only_same_bankgroup', 'only_same_bic',
'seat_of_bank', 'page_size']
p = ParameterBuilder(avail_params, params, uri)
return self.APIConnect('GET', p)
def showOrderDetails(self, trading_pair, order_id):
"""Show details for an offer."""
uri = f'{self.apibase}{trading_pair}/orders/public/details/{order_id}'
params = {'trading_pair': trading_pair, 'order_id': order_id}
avail_params = ['trading_pair', 'order_id']
p = ParameterBuilder({}, {}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('GET', p)
def createOrder(self, order_type, trading_pair, max_amount_currency_to_trade, price, **args):
"""Create a new Order."""
uri = f'{self.apibase}{trading_pair}/orders'
# Build parameters
params = {'type': order_type,
'max_amount_currency_to_trade': max_amount_currency_to_trade,
'price': price}
params.update(args)
avail_params = ['type', 'max_amount_currency_to_trade', 'price',
'min_amount_currency_to_trade', 'end_datetime',
'new_order_for_remaining_amount', 'trading_pair',
'min_trust_level', 'only_kyc_full', 'payment_option',
'sepa_option', 'seat_of_bank']
p = ParameterBuilder(avail_params, params, uri)
p.verify_keys_and_values(avail_params, {'trading_pair': trading_pair})
return self.APIConnect('POST', p)
def deleteOrder(self, order_id, trading_pair):
"""Delete an Order."""
# Build parameters
uri = f'{self.apibase}{trading_pair}/orders/{order_id}'
avail_params = ['order_id', 'trading_pair']
params = { 'order_id': order_id, 'trading_pair': trading_pair}
p = ParameterBuilder({}, {}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('DELETE', p)
def showMyOrders(self, **args):
"""Query and Filter own Orders."""
# Build parameters
params = args
avail_params = ['type', 'trading_pair', 'state',
'date_start', 'date_end', 'page']
if params.get("trading_pair"):
uri = f'{self.apibase}{params["trading_pair"]}/orders'
del params["trading_pair"]
else:
uri = f'{self.apibase}orders'
p = ParameterBuilder(avail_params, params, uri)
return self.APIConnect('GET', p)
def showMyOrderDetails(self, trading_pair, order_id):
"""Details to an own Order."""
uri = f'{self.apibase}{trading_pair}/orders/{order_id}'
p = ParameterBuilder({}, {}, uri)
return self.APIConnect('GET', p)
def executeTrade(self, trading_pair, order_id, order_type, amount):
"""Buy/Sell on a specific Order."""
uri = f'{self.apibase}{trading_pair}/trades/{order_id}'
params = { 'type': order_type,
'amount_currency_to_trade': amount}
avail_params = ['type', 'amount_currency_to_trade']
p = ParameterBuilder(avail_params, params, uri)
return self.APIConnect('POST', p)
def showMyTrades(self, **args):
"""Query and Filter on past Trades."""
# Build parameters
params = args
avail_params = ['type', 'trading_pair', 'state',
'only_trades_with_action_for_payment_or_transfer_required',
'payment_method', 'date_start', 'date_end', 'page']
if params.get("trading_pair"):
uri = f'{self.apibase}{params["trading_pair"]}/trades'
del params["trading_pair"]
else:
uri = f'{self.apibase}trades'
p = ParameterBuilder(avail_params, params, uri)
return self.APIConnect('GET', p)
def showMyTradeDetails(self, trading_pair, trade_id):
"""Details to a specific Trade."""
params = {'trading_pair': trading_pair, 'trade_id': trade_id}
avail_params = [ 'trading_pair', 'trade_id' ]
uri = f'{self.apibase}{trading_pair}/trades/{trade_id}'
p = ParameterBuilder({}, {}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('GET', p)
def markCoinsAsTransferred(self, trading_pair, trade_id, amount_currency_to_trade_after_fee):
"""Mark trade as transferred."""
params = {'amount_currency_to_trade_after_fee': amount_currency_to_trade_after_fee,
'trading_pair': trading_pair, 'trade_id': trade_id}
avail_params = [ 'trading_pair', 'trade_id', 'amount_currency_to_trade_after_fee' ]
uri = f'{self.apibase}{trading_pair}/trades/{trade_id}/mark_coins_as_transferred'
p = ParameterBuilder(avail_params,
{'amount_currency_to_trade_after_fee': amount_currency_to_trade_after_fee}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('POST', p)
def markTradeAsPaid(self, trading_pair, trade_id, volume_currency_to_pay_after_fee):
"""Mark traded as paid."""
params = {'volume_currency_to_pay_after_fee': volume_currency_to_pay_after_fee,
'trading_pair': trading_pair, 'trade_id': trade_id}
avail_params = [ 'trading_pair', 'trade_id', 'volume_currency_to_pay_after_fee' ]
uri = f'{self.apibase}{trading_pair}/trades/{trade_id}/mark_trade_as_paid'
p = ParameterBuilder(avail_params,
{'volume_currency_to_pay_after_fee': volume_currency_to_pay_after_fee}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('POST', p)
def markCoinsAsReceived(self, trading_pair, trade_id, amount_currency_to_trade_after_fee, rating):
"""Mark coins as received."""
params = {'amount_currency_to_trade_after_fee': amount_currency_to_trade_after_fee,
'trading_pair': trading_pair, 'trade_id': trade_id, 'rating': rating}
params_post = {'amount_currency_to_trade_after_fee': amount_currency_to_trade_after_fee,
'rating': rating}
avail_params = [ 'trading_pair', 'trade_id', 'amount_currency_to_trade_after_fee', 'rating' ]
uri = f'{self.apibase}{trading_pair}/trades/{trade_id}/mark_coins_as_received'
p = ParameterBuilder(avail_params, params_post, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('POST', p)
def markTradeAsPaymentReceived(self, trading_pair, trade_id,
volume_currency_to_pay_after_fee, rating,
is_paid_from_correct_bank_account):
"""Mark coins as received."""
params = {'volume_currency_to_pay_after_fee': volume_currency_to_pay_after_fee,
'trading_pair': trading_pair, 'trade_id': trade_id, 'rating': rating}
params_post = {'volume_currency_to_pay_after_fee': volume_currency_to_pay_after_fee,
'rating': rating,
'is_paid_from_correct_bank_account': is_paid_from_correct_bank_account}
avail_params = [ 'trading_pair', 'trade_id', 'volume_currency_to_pay_after_fee',
'rating', 'is_paid_from_correct_bank_account' ]
uri = f'{self.apibase}{trading_pair}/trades/{trade_id}/mark_trade_as_payment_received'
p = ParameterBuilder(avail_params, params_post, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('POST', p)
def addTradeRating(self, trading_pair, trade_id, rating):
"""Mark coins as received."""
params = {'trading_pair': trading_pair, 'trade_id': trade_id, 'rating': rating}
params_post = {'rating': rating}
avail_params = [ 'trading_pair', 'trade_id', 'rating' ]
uri = f'{self.apibase}{trading_pair}/trades/{trade_id}/add_trade_rating'
p = ParameterBuilder(avail_params, params_post, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('POST', p)
def showAccountInfo(self):
"""Query on Account Infos."""
uri = f'{self.apibase}account'
p = ParameterBuilder({}, {}, uri)
return self.APIConnect('GET', p)
def showOrderbookCompact(self, trading_pair):
"""Bids and Asks in compact format."""
params = {'trading_pair': trading_pair}
avail_params = ['trading_pair']
uri = f'{self.apibase}{trading_pair}/orderbook/compact'
# Build parameters
p = ParameterBuilder({}, {}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('GET', p)
def showPublicTradeHistory(self, trading_pair, **args):
"""All successful trades of the last 24 hours."""
params = { 'trading_pair': trading_pair }
params.update(args)
avail_params = ['trading_pair', 'since_tid']
uri = f'{self.apibase}{trading_pair}/trades/history'
if params.get('since_tid'):
del params["trading_pair"]
p = ParameterBuilder(avail_params, params, uri)
else:
p = ParameterBuilder({}, {}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('GET', p)
def showRates(self, trading_pair):
"""Query of the average rate last 3 and 12 hours."""
uri = f'{self.apibase}{trading_pair}/rates'
params = {'trading_pair': trading_pair}
avail_params = ['trading_pair']
# Build parameters
p = ParameterBuilder({}, {}, uri)
p.verify_keys_and_values(avail_params, params)
return self.APIConnect('GET', p)
def showAccountLedger(self, currency, **args):
"""Query on Account statement."""
params = {'currency': currency}
params.update(args)
uri = f'{self.apibase}{currency}/account/ledger'
avail_params = ['currency', 'type',
'datetime_start', 'datetime_end', 'page']
p = ParameterBuilder(avail_params, params, uri)
del params['currency']
p = ParameterBuilder(avail_params, params, uri)
return self.APIConnect('GET', p)
def showPermissions(self):
"""Show permissions that are allowed for used API key"""
uri = f'{self.apibase}permissions'
p = ParameterBuilder({}, {}, uri)
return self.APIConnect('GET', p) | 3,798 | 0 | 214 |
8c080f7a67d89084acf08fc4b0d24de5c243f0ba | 749 | py | Python | Section3/L6 Saving loading of file/saving_loading_arrays2.py | Mohit-Sharma1/Takenmind_Internship_assignments | 7099ae3a70fca009f6298482e90e988124868148 | [
"MIT"
] | null | null | null | Section3/L6 Saving loading of file/saving_loading_arrays2.py | Mohit-Sharma1/Takenmind_Internship_assignments | 7099ae3a70fca009f6298482e90e988124868148 | [
"MIT"
] | null | null | null | Section3/L6 Saving loading of file/saving_loading_arrays2.py | Mohit-Sharma1/Takenmind_Internship_assignments | 7099ae3a70fca009f6298482e90e988124868148 | [
"MIT"
] | null | null | null | import numpy as np
arr=np.arange(10)
print arr
#saving single array
np.save('saved_array',arr)
#now_file_is_created = saved_array.npy
new_array=np.load('saved_array.npy')
print new_array
#save multiple array
array_1=np.arange(25)
array_2=np.arange(30)
np.savez('saved_archieve.npz',x=array_1,y=array_2)
load_archieve=np.load('saved_archieve.npz')
print 'load_archieve[x] is'
print load_archieve['x']
print 'load_archieve[y] is'
print load_archieve['y']
#save to textfile
np.savetxt('notepadfile.txt',array_1,delimiter = ',') # delimeter is a new function wwhich used with the to file generation of text file.
#loading of txt file
load_txt_file=np.loadtxt('notepadfile.txt',delimiter=',')
print "load_txt_file is"
print load_txt_file
| 18.725 | 137 | 0.766355 | import numpy as np
arr=np.arange(10)
print arr
#saving single array
np.save('saved_array',arr)
#now_file_is_created = saved_array.npy
new_array=np.load('saved_array.npy')
print new_array
#save multiple array
array_1=np.arange(25)
array_2=np.arange(30)
np.savez('saved_archieve.npz',x=array_1,y=array_2)
load_archieve=np.load('saved_archieve.npz')
print 'load_archieve[x] is'
print load_archieve['x']
print 'load_archieve[y] is'
print load_archieve['y']
#save to textfile
np.savetxt('notepadfile.txt',array_1,delimiter = ',') # delimeter is a new function wwhich used with the to file generation of text file.
#loading of txt file
load_txt_file=np.loadtxt('notepadfile.txt',delimiter=',')
print "load_txt_file is"
print load_txt_file
| 0 | 0 | 0 |
76d2e76305c2d3d979ffcd7fdf87223ff0b7649d | 840 | py | Python | caravel_test/test_frequency_counter.py | mattvenn/wrapped_frequency_counter | e3a0c328ed4b4882601dba04e3694d1dc0b70a52 | [
"Apache-2.0"
] | 1 | 2022-03-17T00:17:06.000Z | 2022-03-17T00:17:06.000Z | caravel_test/test_frequency_counter.py | mattvenn/wrapped_frequency_counter | e3a0c328ed4b4882601dba04e3694d1dc0b70a52 | [
"Apache-2.0"
] | null | null | null | caravel_test/test_frequency_counter.py | mattvenn/wrapped_frequency_counter | e3a0c328ed4b4882601dba04e3694d1dc0b70a52 | [
"Apache-2.0"
] | null | null | null | import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, ClockCycles
import random
from test.test_encoder import Encoder
clocks_per_phase = 10
# takes ~60 seconds on my PC
@cocotb.test()
| 22.702703 | 67 | 0.671429 | import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, ClockCycles
import random
from test.test_encoder import Encoder
clocks_per_phase = 10
# takes ~60 seconds on my PC
@cocotb.test()
async def test_start(dut):
clock = Clock(dut.clock, 25, units="ns")
cocotb.fork(clock.start())
dut.RSTB <= 0
dut.power1 <= 0;
dut.power2 <= 0;
dut.power3 <= 0;
dut.power4 <= 0;
await ClockCycles(dut.clock, 8)
dut.power1 <= 1;
await ClockCycles(dut.clock, 8)
dut.power2 <= 1;
await ClockCycles(dut.clock, 8)
dut.power3 <= 1;
await ClockCycles(dut.clock, 8)
dut.power4 <= 1;
await ClockCycles(dut.clock, 80)
dut.RSTB <= 1
# wait for the project to become active
await RisingEdge(dut.uut.mprj.wrapped_frequency_counter.active)
| 586 | 0 | 22 |
cf18605741f7a9144acd9f9a39605a2fd243f40c | 5,454 | py | Python | lemonadefashion_flask_monitoringdashboard/controllers/requests.py | us88/LF_Flask-MonitoringDashboard | 5917543fe480a3d46b52663d6937558078e9f705 | [
"MIT"
] | null | null | null | lemonadefashion_flask_monitoringdashboard/controllers/requests.py | us88/LF_Flask-MonitoringDashboard | 5917543fe480a3d46b52663d6937558078e9f705 | [
"MIT"
] | null | null | null | lemonadefashion_flask_monitoringdashboard/controllers/requests.py | us88/LF_Flask-MonitoringDashboard | 5917543fe480a3d46b52663d6937558078e9f705 | [
"MIT"
] | null | null | null | import datetime
import numpy
from sqlalchemy import func, and_
from lemonadefashion_flask_monitoringdashboard.core.timezone import to_utc_datetime, to_local_datetime
from lemonadefashion_flask_monitoringdashboard.database import Request
from lemonadefashion_flask_monitoringdashboard.database.count_group import count_requests_per_day, get_value
from lemonadefashion_flask_monitoringdashboard.database.endpoint import get_endpoints, get_num_requests
from lemonadefashion_flask_monitoringdashboard.database.request import create_time_based_sample_criterion
def get_num_requests_data(session, start_date, end_date):
"""
:param session: session for the database
:param start_date: datetime object
:param end_date: datetime object and: end_date >= start_date
:return: a list of the number of requests for each endpoint and on which day
"""
numdays = (end_date - start_date).days + 1
days = [start_date + datetime.timedelta(days=i) for i in range(numdays)]
hits = count_requests_per_day(session, days)
endpoints = get_endpoints(session)
data = [
{'name': end.name, 'values': [get_value(hits_day, end.id) for hits_day in hits]}
for end in endpoints
]
return {'days': [d.strftime('%Y-%m-%d') for d in days], 'data': data}
def get_all_request_status_code_counts(session, endpoint_id):
"""
Gets all the request status code counts.
:param session: session for the database
:param endpoint_id: id for the endpoint
:return: A list of tuples in the form of `(status_code, count)`
"""
return (
session.query(Request.status_code, func.count(Request.status_code))
.filter(Request.endpoint_id == endpoint_id, Request.status_code.isnot(None))
.group_by(Request.status_code)
.all()
)
def get_status_code_distribution(session, endpoint_id):
"""
Gets the distribution of status codes returned by the given endpoint.
:param session: session for the database
:param endpoint_id: id for the endpoint
:return: A dict where the key is the status code and the value is the fraction of requests
that returned the status
code. Example: a return value of `{ 200: 0.92, 404: 0.08 }` means that status code 200 was
returned on 92% of the
requests. 8% of the requests returned a 404 status code.
"""
status_code_counts = get_all_request_status_code_counts(session, endpoint_id)
total_count = sum(frequency for (_, frequency) in status_code_counts)
return {status_code: frequency / total_count for (status_code, frequency) in status_code_counts}
def get_status_code_frequencies(session, endpoint_id, *criterion):
"""
Gets the frequencies of each status code.
:param session: session for the database
:param endpoint_id: id for the endpoint
:param criterion: Optional criteria used to file the requests.
:return: A dict where the key is the status code and the value is the fraction of requests that returned the status
code. Example: a return value of `{ 200: 105, 404: 3 }` means that status code 200 was returned 105 times and
404 was returned 3 times.
"""
status_code_counts = session.query(Request.status_code, func.count(Request.status_code)) \
.filter(Request.endpoint_id == endpoint_id, Request.status_code.isnot(None), *criterion) \
.group_by(Request.status_code).all()
return dict(status_code_counts)
def get_error_requests(session, endpoint_id, *criterion):
"""
Gets all requests that did not return a 200 status code.
:param session: session for the database
:param endpoint_id: ID of the endpoint to be queried
:param criterion: Optional criteria used to file the requests.
:return:
"""
criteria = and_(
Request.endpoint_id == endpoint_id,
Request.status_code.isnot(None),
Request.status_code >= 400,
Request.status_code <= 599,
)
return session.query(Request).filter(criteria, *criterion).all()
def get_hourly_load(session, endpoint_id, start_date, end_date):
"""
:param session: session for the database
:param endpoint_id: id for the endpoint
:param start_date: datetime object
:param end_date: datetime object and: end_date >= start_date
:return:
"""
numdays = (end_date - start_date).days + 1
# list of hours: 0:00 - 23:00
hours = ['0{}:00'.format(h) for h in range(0, 10)] + ['{}:00'.format(h) for h in range(10, 24)]
heatmap_data = numpy.zeros((len(hours), numdays))
start_datetime = to_utc_datetime(
datetime.datetime.combine(start_date, datetime.time(0, 0, 0, 0))
)
end_datetime = to_utc_datetime(datetime.datetime.combine(end_date, datetime.time(23, 59, 59)))
for time, count in get_num_requests(session, endpoint_id, start_datetime, end_datetime):
parsed_time = datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
day_index = (parsed_time - start_datetime).days
hour_index = int(to_local_datetime(parsed_time).strftime('%H'))
heatmap_data[hour_index][day_index] = count
return {
'days': [
(start_date + datetime.timedelta(days=i)).strftime('%Y-%m-%d') for i in range(numdays)
],
"data": heatmap_data.tolist(),
}
| 39.521739 | 119 | 0.713055 | import datetime
import numpy
from sqlalchemy import func, and_
from lemonadefashion_flask_monitoringdashboard.core.timezone import to_utc_datetime, to_local_datetime
from lemonadefashion_flask_monitoringdashboard.database import Request
from lemonadefashion_flask_monitoringdashboard.database.count_group import count_requests_per_day, get_value
from lemonadefashion_flask_monitoringdashboard.database.endpoint import get_endpoints, get_num_requests
from lemonadefashion_flask_monitoringdashboard.database.request import create_time_based_sample_criterion
def get_num_requests_data(session, start_date, end_date):
"""
:param session: session for the database
:param start_date: datetime object
:param end_date: datetime object and: end_date >= start_date
:return: a list of the number of requests for each endpoint and on which day
"""
numdays = (end_date - start_date).days + 1
days = [start_date + datetime.timedelta(days=i) for i in range(numdays)]
hits = count_requests_per_day(session, days)
endpoints = get_endpoints(session)
data = [
{'name': end.name, 'values': [get_value(hits_day, end.id) for hits_day in hits]}
for end in endpoints
]
return {'days': [d.strftime('%Y-%m-%d') for d in days], 'data': data}
def get_all_request_status_code_counts(session, endpoint_id):
"""
Gets all the request status code counts.
:param session: session for the database
:param endpoint_id: id for the endpoint
:return: A list of tuples in the form of `(status_code, count)`
"""
return (
session.query(Request.status_code, func.count(Request.status_code))
.filter(Request.endpoint_id == endpoint_id, Request.status_code.isnot(None))
.group_by(Request.status_code)
.all()
)
def get_status_code_distribution(session, endpoint_id):
"""
Gets the distribution of status codes returned by the given endpoint.
:param session: session for the database
:param endpoint_id: id for the endpoint
:return: A dict where the key is the status code and the value is the fraction of requests
that returned the status
code. Example: a return value of `{ 200: 0.92, 404: 0.08 }` means that status code 200 was
returned on 92% of the
requests. 8% of the requests returned a 404 status code.
"""
status_code_counts = get_all_request_status_code_counts(session, endpoint_id)
total_count = sum(frequency for (_, frequency) in status_code_counts)
return {status_code: frequency / total_count for (status_code, frequency) in status_code_counts}
def get_status_code_frequencies(session, endpoint_id, *criterion):
"""
Gets the frequencies of each status code.
:param session: session for the database
:param endpoint_id: id for the endpoint
:param criterion: Optional criteria used to file the requests.
:return: A dict where the key is the status code and the value is the fraction of requests that returned the status
code. Example: a return value of `{ 200: 105, 404: 3 }` means that status code 200 was returned 105 times and
404 was returned 3 times.
"""
status_code_counts = session.query(Request.status_code, func.count(Request.status_code)) \
.filter(Request.endpoint_id == endpoint_id, Request.status_code.isnot(None), *criterion) \
.group_by(Request.status_code).all()
return dict(status_code_counts)
def get_error_requests(session, endpoint_id, *criterion):
"""
Gets all requests that did not return a 200 status code.
:param session: session for the database
:param endpoint_id: ID of the endpoint to be queried
:param criterion: Optional criteria used to file the requests.
:return:
"""
criteria = and_(
Request.endpoint_id == endpoint_id,
Request.status_code.isnot(None),
Request.status_code >= 400,
Request.status_code <= 599,
)
return session.query(Request).filter(criteria, *criterion).all()
def get_status_code_frequencies_in_interval(session, endpoint_id, criterion):
return get_status_code_frequencies(session, endpoint_id, *criterion)
def get_hourly_load(session, endpoint_id, start_date, end_date):
"""
:param session: session for the database
:param endpoint_id: id for the endpoint
:param start_date: datetime object
:param end_date: datetime object and: end_date >= start_date
:return:
"""
numdays = (end_date - start_date).days + 1
# list of hours: 0:00 - 23:00
hours = ['0{}:00'.format(h) for h in range(0, 10)] + ['{}:00'.format(h) for h in range(10, 24)]
heatmap_data = numpy.zeros((len(hours), numdays))
start_datetime = to_utc_datetime(
datetime.datetime.combine(start_date, datetime.time(0, 0, 0, 0))
)
end_datetime = to_utc_datetime(datetime.datetime.combine(end_date, datetime.time(23, 59, 59)))
for time, count in get_num_requests(session, endpoint_id, start_datetime, end_datetime):
parsed_time = datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
day_index = (parsed_time - start_datetime).days
hour_index = int(to_local_datetime(parsed_time).strftime('%H'))
heatmap_data[hour_index][day_index] = count
return {
'days': [
(start_date + datetime.timedelta(days=i)).strftime('%Y-%m-%d') for i in range(numdays)
],
"data": heatmap_data.tolist(),
}
| 129 | 0 | 23 |
8561e6fbacbb534b3ebce62f6af578c0af3f291e | 5,992 | py | Python | src/myth.py | gupta-siddhartha/MYTH | 4a57536ba91e77687fa86e714c06821b275fef7a | [
"MIT"
] | null | null | null | src/myth.py | gupta-siddhartha/MYTH | 4a57536ba91e77687fa86e714c06821b275fef7a | [
"MIT"
] | null | null | null | src/myth.py | gupta-siddhartha/MYTH | 4a57536ba91e77687fa86e714c06821b275fef7a | [
"MIT"
] | null | null | null | #======================================================================
# MYTH : Multipurpose code compiles YT-rendering for Hydro-simulations
#
# Author: Siddhartha Gupta
# contact: gsiddhartha@uchicago.edu
#
# Last modified on 17 July 2020
#======================================================================
from headers import *
from users_input import *
import timeit
start = timeit.default_timer()
print(CodeInfo)
########################################
# Creating Output directory
########################################
dirName = "%s" % (OutputPath)
if not os.path.exists(dirName):
os.makedirs(dirName)
print(">> Creating directory: %s\n" % (dirName))
else:
print(">> Directory: ", dirName , "already exists\n")
if PrintLog == 'yes':
sys.stdout = open("%s/myth.log"%OutputPath, 'w')
########################################
# Input data file main loop starts
########################################
for infilenumber in range(File_Start,File_End+1,1):
#**************************************
# Input data files
#**************************************
print(">> Input File Number:%d"%infilenumber)
nx,ny,nz = Nx1, Nx2, Nx3
#**************************************
# Volrender info
#**************************************
Zoomedinout = ZoomedOperation + [infilenumber] + ZoomingInfo
RenderingOperation=Zoomedinout+Rotation
#**************************************
# Reading input data file
#**************************************
if FreshRead == 'yes':
data = readfile.Read_InputFile(simulation, 'data', nx,ny,nz,infilenumber,InputPath)
X1,X2,X3 = readfile.Read_InputFile(simulation, 'grid', nx,ny,nz,infilenumber,InputPath)
if AddParticles == 'yes':
Particles = readfile.Read_Particles(infilenumber,InputPath)
Particles=Particles[:,[1,2,3]]
else:
Particles = [0]
#**************************************
# Check a data slice
#**************************************
if (FreshRead == 'yes' and CheckInputData == 'yes'):
import matplotlib.pyplot as plt
import matplotlib as mpl
fig,ax = plt.subplots(1,1)
ax = fig.add_subplot(111,aspect='equal')
ax = fig.add_subplot(111,aspect=1.0)
ax.set_aspect('equal')
plt.axes().set_aspect('equal')
color_map_name = 'magma'
if Check_slice == 'xy':
print(data.shape)
plt.pcolormesh(X1,X2,np.log10(data[len(X3)//2,:,:]), cmap=color_map_name)
plt.colorbar()
if AddParticles == 'yes':
plt.scatter(Particles[:,0],Particles[:,1])
print(">> Please check at %s file:checkinput%04d.png" % (OutputPath,infilenumber))
plt.savefig("%s/checkinput%04d.png"%(OutputPath,infilenumber))
plt.close()
#**************************************
# Convert data to uniform grid
#**************************************
if FreshRead == 'yes':
#
if ConverToUniformGrid == 'yes':
unidata = tools.Interpolate(Resol_x1,Resol_x2,Resol_x3,Interpolationbox,X1,X2,X3,data)
#
else:
unidata = data
dumpfilename="%s/unigriddata%04d.txt" % (OutputPath,infilenumber)
print(">>Dumping data into file=%s" % dumpfilename)
file = open(dumpfilename, "w")
for k in range((Resol_x3)):
for j in range((Resol_x2)):
for i in range((Resol_x1)):
file.write("%e\n"% (unidata[k,j,i]))
file.write("\n")
file.write("\n")
file.close()
else:
dumpfilename="%s/unigriddata%04d.txt" % (OutputPath,infilenumber)
print(">>Reading data from the file dumped previously (%s)"%(dumpfilename))
data = np.loadtxt(dumpfilename)
data = np.reshape(data,(Resol_x3,Resol_x2,Resol_x1))
unidata = data
#**************************************
# Reshaping data for the yt project
#**************************************
#unidata = np.moveaxis(unidata.reshape(Resol_x3,Resol_x2,Resol_x1),0,-2)
x1beg, x1end = Interpolationbox[1], Interpolationbox[2]
x2beg, x2end = Interpolationbox[3], Interpolationbox[4]
x3beg, x3end = Interpolationbox[5], Interpolationbox[6]
x1 = np.linspace(x1beg, x1end, Resol_x1)
x2 = np.linspace(x2beg, x2end, Resol_x2)
x3 = np.linspace(x3beg, x3end, Resol_x3)
#**************************************
# Checking if interpolation is correct
# Please see figure in output dir
# checkdum.filenumber.png
#**************************************
if CheckInterpolation == 'yes':
import matplotlib.pyplot as plt
import matplotlib as mpl
fig,ax = plt.subplots(1,1)
ax = fig.add_subplot(111,aspect='equal')
ax = fig.add_subplot(111,aspect=1.0)
ax.set_aspect('equal')
plt.axes().set_aspect('equal')
print(unidata.shape)
color_map_name = 'magma'
if Check_slice == 'xy':
#plt.pcolormesh(x1,x2,np.log10(unidata[Check_slice_number,:,:], cmap=color_map_name)
#vmin=np.log10(colorbound[0]),vmax=np.log10(colorbound[1]))
plt.pcolormesh(x1,x2,np.log10(unidata[Check_slice_number,:,:]), cmap=color_map_name)
plt.colorbar()
if AddParticles == 'yes':
plt.scatter(Particles[:,0],Particles[:,1])
print(">> Please check at %s file:checkdump%04d.png" % (OutputPath,infilenumber))
plt.savefig("%s/checkdump%04d.png"%(OutputPath,infilenumber))
plt.close()
#**************************************
# Volume rendering
#**************************************
# yt reads d[nx,ny,nz] instead d[nz,ny,nz], so we reshape data below
B=np.moveaxis(unidata,2,0)
unidata=np.moveaxis(B,1,-1)
#Ready for performing yt-rendering
outputfilename = "%s/volren%04d" % (OutputPath,infilenumber)
ytrendering.VolumeRender(colorbound, RenderingOperation, outputfilename, x1,x2,x3,unidata,Particles)
#**************************************
# Job completed for filenumber
#**************************************
print(">> File: %d Completed! cheers!!!"%infilenumber)
stop = timeit.default_timer()
tools.RuntimeCalculation(start, stop)
print(">> Completed! cheers!!!")
print(Completion)
########################################
# Input data file main loop ends
########################################
| 34.436782 | 101 | 0.563919 | #======================================================================
# MYTH : Multipurpose code compiles YT-rendering for Hydro-simulations
#
# Author: Siddhartha Gupta
# contact: gsiddhartha@uchicago.edu
#
# Last modified on 17 July 2020
#======================================================================
from headers import *
from users_input import *
import timeit
start = timeit.default_timer()
print(CodeInfo)
########################################
# Creating Output directory
########################################
dirName = "%s" % (OutputPath)
if not os.path.exists(dirName):
os.makedirs(dirName)
print(">> Creating directory: %s\n" % (dirName))
else:
print(">> Directory: ", dirName , "already exists\n")
if PrintLog == 'yes':
sys.stdout = open("%s/myth.log"%OutputPath, 'w')
########################################
# Input data file main loop starts
########################################
for infilenumber in range(File_Start,File_End+1,1):
#**************************************
# Input data files
#**************************************
print(">> Input File Number:%d"%infilenumber)
nx,ny,nz = Nx1, Nx2, Nx3
#**************************************
# Volrender info
#**************************************
Zoomedinout = ZoomedOperation + [infilenumber] + ZoomingInfo
RenderingOperation=Zoomedinout+Rotation
#**************************************
# Reading input data file
#**************************************
if FreshRead == 'yes':
data = readfile.Read_InputFile(simulation, 'data', nx,ny,nz,infilenumber,InputPath)
X1,X2,X3 = readfile.Read_InputFile(simulation, 'grid', nx,ny,nz,infilenumber,InputPath)
if AddParticles == 'yes':
Particles = readfile.Read_Particles(infilenumber,InputPath)
Particles=Particles[:,[1,2,3]]
else:
Particles = [0]
#**************************************
# Check a data slice
#**************************************
if (FreshRead == 'yes' and CheckInputData == 'yes'):
import matplotlib.pyplot as plt
import matplotlib as mpl
fig,ax = plt.subplots(1,1)
ax = fig.add_subplot(111,aspect='equal')
ax = fig.add_subplot(111,aspect=1.0)
ax.set_aspect('equal')
plt.axes().set_aspect('equal')
color_map_name = 'magma'
if Check_slice == 'xy':
print(data.shape)
plt.pcolormesh(X1,X2,np.log10(data[len(X3)//2,:,:]), cmap=color_map_name)
plt.colorbar()
if AddParticles == 'yes':
plt.scatter(Particles[:,0],Particles[:,1])
print(">> Please check at %s file:checkinput%04d.png" % (OutputPath,infilenumber))
plt.savefig("%s/checkinput%04d.png"%(OutputPath,infilenumber))
plt.close()
#**************************************
# Convert data to uniform grid
#**************************************
if FreshRead == 'yes':
#
if ConverToUniformGrid == 'yes':
unidata = tools.Interpolate(Resol_x1,Resol_x2,Resol_x3,Interpolationbox,X1,X2,X3,data)
#
else:
unidata = data
dumpfilename="%s/unigriddata%04d.txt" % (OutputPath,infilenumber)
print(">>Dumping data into file=%s" % dumpfilename)
file = open(dumpfilename, "w")
for k in range((Resol_x3)):
for j in range((Resol_x2)):
for i in range((Resol_x1)):
file.write("%e\n"% (unidata[k,j,i]))
file.write("\n")
file.write("\n")
file.close()
else:
dumpfilename="%s/unigriddata%04d.txt" % (OutputPath,infilenumber)
print(">>Reading data from the file dumped previously (%s)"%(dumpfilename))
data = np.loadtxt(dumpfilename)
data = np.reshape(data,(Resol_x3,Resol_x2,Resol_x1))
unidata = data
#**************************************
# Reshaping data for the yt project
#**************************************
#unidata = np.moveaxis(unidata.reshape(Resol_x3,Resol_x2,Resol_x1),0,-2)
x1beg, x1end = Interpolationbox[1], Interpolationbox[2]
x2beg, x2end = Interpolationbox[3], Interpolationbox[4]
x3beg, x3end = Interpolationbox[5], Interpolationbox[6]
x1 = np.linspace(x1beg, x1end, Resol_x1)
x2 = np.linspace(x2beg, x2end, Resol_x2)
x3 = np.linspace(x3beg, x3end, Resol_x3)
#**************************************
# Checking if interpolation is correct
# Please see figure in output dir
# checkdum.filenumber.png
#**************************************
if CheckInterpolation == 'yes':
import matplotlib.pyplot as plt
import matplotlib as mpl
fig,ax = plt.subplots(1,1)
ax = fig.add_subplot(111,aspect='equal')
ax = fig.add_subplot(111,aspect=1.0)
ax.set_aspect('equal')
plt.axes().set_aspect('equal')
print(unidata.shape)
color_map_name = 'magma'
if Check_slice == 'xy':
#plt.pcolormesh(x1,x2,np.log10(unidata[Check_slice_number,:,:], cmap=color_map_name)
#vmin=np.log10(colorbound[0]),vmax=np.log10(colorbound[1]))
plt.pcolormesh(x1,x2,np.log10(unidata[Check_slice_number,:,:]), cmap=color_map_name)
plt.colorbar()
if AddParticles == 'yes':
plt.scatter(Particles[:,0],Particles[:,1])
print(">> Please check at %s file:checkdump%04d.png" % (OutputPath,infilenumber))
plt.savefig("%s/checkdump%04d.png"%(OutputPath,infilenumber))
plt.close()
#**************************************
# Volume rendering
#**************************************
# yt reads d[nx,ny,nz] instead d[nz,ny,nz], so we reshape data below
B=np.moveaxis(unidata,2,0)
unidata=np.moveaxis(B,1,-1)
#Ready for performing yt-rendering
outputfilename = "%s/volren%04d" % (OutputPath,infilenumber)
ytrendering.VolumeRender(colorbound, RenderingOperation, outputfilename, x1,x2,x3,unidata,Particles)
#**************************************
# Job completed for filenumber
#**************************************
print(">> File: %d Completed! cheers!!!"%infilenumber)
stop = timeit.default_timer()
tools.RuntimeCalculation(start, stop)
print(">> Completed! cheers!!!")
print(Completion)
########################################
# Input data file main loop ends
########################################
| 0 | 0 | 0 |
4faa90fa574ae7c5351d0d865e9c4fefeaa8b91e | 3,954 | py | Python | azure-mgmt-batchai/azure/mgmt/batchai/models/cluster_create_parameters.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure-mgmt-batchai/azure/mgmt/batchai/models/cluster_create_parameters.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 2 | 2016-09-30T21:40:24.000Z | 2017-11-10T18:16:18.000Z | azure-mgmt-batchai/azure/mgmt/batchai/models/cluster_create_parameters.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ClusterCreateParameters(Model):
"""Parameters supplied to the Create operation.
:param location: The region in which to create the cluster.
:type location: str
:param tags: The user specified tags associated with the Cluster.
:type tags: dict
:param vm_size: The size of the virtual machines in the cluster. All
virtual machines in a cluster are the same size. For information about
available VM sizes for clusters using images from the Virtual Machines
Marketplace (see Sizes for Virtual Machines (Linux) or Sizes for Virtual
Machines (Windows). Batch AI service supports all Azure VM sizes except
STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and
STANDARD_DSV2 series).
:type vm_size: str
:param vm_priority: dedicated or lowpriority. Default is dedicated.
Possible values include: 'dedicated', 'lowpriority'. Default value:
"dedicated" .
:type vm_priority: str or :class:`VmPriority
<azure.mgmt.batchai.models.VmPriority>`
:param scale_settings: Desired scale for the cluster.
:type scale_settings: :class:`ScaleSettings
<azure.mgmt.batchai.models.ScaleSettings>`
:param virtual_machine_configuration: Settings for OS image and mounted
data volumes.
:type virtual_machine_configuration: :class:`VirtualMachineConfiguration
<azure.mgmt.batchai.models.VirtualMachineConfiguration>`
:param node_setup: Setup to be done on all compute nodes in the cluster.
:type node_setup: :class:`NodeSetup <azure.mgmt.batchai.models.NodeSetup>`
:param user_account_settings: Settings for user account that will be
created on all compute nodes of the cluster.
:type user_account_settings: :class:`UserAccountSettings
<azure.mgmt.batchai.models.UserAccountSettings>`
:param subnet: Specifies the identifier of the subnet. .
:type subnet: :class:`ResourceId <azure.mgmt.batchai.models.ResourceId>`
"""
_validation = {
'location': {'required': True},
'vm_size': {'required': True},
'user_account_settings': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'vm_size': {'key': 'properties.vmSize', 'type': 'str'},
'vm_priority': {'key': 'properties.vmPriority', 'type': 'VmPriority'},
'scale_settings': {'key': 'properties.scaleSettings', 'type': 'ScaleSettings'},
'virtual_machine_configuration': {'key': 'properties.virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'},
'node_setup': {'key': 'properties.nodeSetup', 'type': 'NodeSetup'},
'user_account_settings': {'key': 'properties.userAccountSettings', 'type': 'UserAccountSettings'},
'subnet': {'key': 'properties.subnet', 'type': 'ResourceId'},
}
| 49.425 | 188 | 0.6826 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ClusterCreateParameters(Model):
"""Parameters supplied to the Create operation.
:param location: The region in which to create the cluster.
:type location: str
:param tags: The user specified tags associated with the Cluster.
:type tags: dict
:param vm_size: The size of the virtual machines in the cluster. All
virtual machines in a cluster are the same size. For information about
available VM sizes for clusters using images from the Virtual Machines
Marketplace (see Sizes for Virtual Machines (Linux) or Sizes for Virtual
Machines (Windows). Batch AI service supports all Azure VM sizes except
STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and
STANDARD_DSV2 series).
:type vm_size: str
:param vm_priority: dedicated or lowpriority. Default is dedicated.
Possible values include: 'dedicated', 'lowpriority'. Default value:
"dedicated" .
:type vm_priority: str or :class:`VmPriority
<azure.mgmt.batchai.models.VmPriority>`
:param scale_settings: Desired scale for the cluster.
:type scale_settings: :class:`ScaleSettings
<azure.mgmt.batchai.models.ScaleSettings>`
:param virtual_machine_configuration: Settings for OS image and mounted
data volumes.
:type virtual_machine_configuration: :class:`VirtualMachineConfiguration
<azure.mgmt.batchai.models.VirtualMachineConfiguration>`
:param node_setup: Setup to be done on all compute nodes in the cluster.
:type node_setup: :class:`NodeSetup <azure.mgmt.batchai.models.NodeSetup>`
:param user_account_settings: Settings for user account that will be
created on all compute nodes of the cluster.
:type user_account_settings: :class:`UserAccountSettings
<azure.mgmt.batchai.models.UserAccountSettings>`
:param subnet: Specifies the identifier of the subnet. .
:type subnet: :class:`ResourceId <azure.mgmt.batchai.models.ResourceId>`
"""
_validation = {
'location': {'required': True},
'vm_size': {'required': True},
'user_account_settings': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'vm_size': {'key': 'properties.vmSize', 'type': 'str'},
'vm_priority': {'key': 'properties.vmPriority', 'type': 'VmPriority'},
'scale_settings': {'key': 'properties.scaleSettings', 'type': 'ScaleSettings'},
'virtual_machine_configuration': {'key': 'properties.virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'},
'node_setup': {'key': 'properties.nodeSetup', 'type': 'NodeSetup'},
'user_account_settings': {'key': 'properties.userAccountSettings', 'type': 'UserAccountSettings'},
'subnet': {'key': 'properties.subnet', 'type': 'ResourceId'},
}
def __init__(self, location, vm_size, user_account_settings, tags=None, vm_priority="dedicated", scale_settings=None, virtual_machine_configuration=None, node_setup=None, subnet=None):
self.location = location
self.tags = tags
self.vm_size = vm_size
self.vm_priority = vm_priority
self.scale_settings = scale_settings
self.virtual_machine_configuration = virtual_machine_configuration
self.node_setup = node_setup
self.user_account_settings = user_account_settings
self.subnet = subnet
| 536 | 0 | 27 |
6e6d727791f38eb8a21eb1a8ac25979239e65594 | 1,457 | py | Python | manager_app/apis/manage_carousel_api.py | syz247179876/e_mall | f94e39e091e098242342f532ae371b8ff127542f | [
"Apache-2.0"
] | 7 | 2021-04-10T13:20:56.000Z | 2022-03-29T15:00:29.000Z | manager_app/apis/manage_carousel_api.py | syz247179876/E_mall | f94e39e091e098242342f532ae371b8ff127542f | [
"Apache-2.0"
] | 9 | 2021-05-11T03:53:31.000Z | 2022-03-12T00:58:03.000Z | manager_app/apis/manage_carousel_api.py | syz247179876/E_mall | f94e39e091e098242342f532ae371b8ff127542f | [
"Apache-2.0"
] | 2 | 2020-11-24T08:59:22.000Z | 2020-11-24T14:10:59.000Z | # -*- coding: utf-8 -*-
# @Time : 2021/4/6 下午8:51
# @Author : 司云中
# @File : manage_carousel_api.py
# @Software: Pycharm
from rest_framework.response import Response
from Emall.base_api import BackendGenericApiView
from Emall.decorator import validate_url_data
from Emall.response_code import response_code, DELETE_CAROUSEL, ADD_CAROUSEL
from manager_app.serializers.carousel_serializers import ManagerCarouselSerializer,DeleteCarouselSerializer
class ManageCarouselApiView(BackendGenericApiView):
"""管理员管理轮播图API"""
serializer_class = ManagerCarouselSerializer
serializer_delete_class = DeleteCarouselSerializer
def post(self, request):
"""增加轮播图"""
super().post(request)
return Response(response_code.result(ADD_CAROUSEL, '添加成功'))
def delete(self, request):
"""删除轮播图"""
rows = super().delete(request)
print(rows)
return Response(response_code.result(DELETE_CAROUSEL, '删除成功' if rows > 0 else '无数据操作'))
@validate_url_data('carousel', 'pk')
def put(self, request):
"""修改轮播图"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
rows = serializer.modify()
return Response(response_code.result(DELETE_CAROUSEL, '修改成功' if rows > 0 else '无数据操作'))
@validate_url_data('carousel', 'pk', null=True)
def get(self, request):
"""获取全部轮播图"""
return super().get(request)
| 31 | 107 | 0.702128 | # -*- coding: utf-8 -*-
# @Time : 2021/4/6 下午8:51
# @Author : 司云中
# @File : manage_carousel_api.py
# @Software: Pycharm
from rest_framework.response import Response
from Emall.base_api import BackendGenericApiView
from Emall.decorator import validate_url_data
from Emall.response_code import response_code, DELETE_CAROUSEL, ADD_CAROUSEL
from manager_app.serializers.carousel_serializers import ManagerCarouselSerializer,DeleteCarouselSerializer
class ManageCarouselApiView(BackendGenericApiView):
"""管理员管理轮播图API"""
serializer_class = ManagerCarouselSerializer
serializer_delete_class = DeleteCarouselSerializer
def post(self, request):
"""增加轮播图"""
super().post(request)
return Response(response_code.result(ADD_CAROUSEL, '添加成功'))
def delete(self, request):
"""删除轮播图"""
rows = super().delete(request)
print(rows)
return Response(response_code.result(DELETE_CAROUSEL, '删除成功' if rows > 0 else '无数据操作'))
@validate_url_data('carousel', 'pk')
def put(self, request):
"""修改轮播图"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
rows = serializer.modify()
return Response(response_code.result(DELETE_CAROUSEL, '修改成功' if rows > 0 else '无数据操作'))
@validate_url_data('carousel', 'pk', null=True)
def get(self, request):
"""获取全部轮播图"""
return super().get(request)
| 0 | 0 | 0 |
25eb94215850c29794e09342a0067104f51af52e | 3,678 | py | Python | tests/test_user_def_template.py | CodeYellowBV/django-tally | a705821050da912fb8dabd56c41c040ea0a00a21 | [
"MIT"
] | null | null | null | tests/test_user_def_template.py | CodeYellowBV/django-tally | a705821050da912fb8dabd56c41c040ea0a00a21 | [
"MIT"
] | null | null | null | tests/test_user_def_template.py | CodeYellowBV/django-tally | a705821050da912fb8dabd56c41c040ea0a00a21 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django_tally.data.models import Data
from django_tally.user_def.lang import parse
from django_tally.user_def.lang.json import encode
from django_tally.user_def.models import UserDefTemplate
from .testapp.models import Foo
AGGREGATE_PARAMS = {
'required': ['get_tally', 'add', 'sub'],
'optional': [
'base', 'get_value', 'get_nonexisting_value',
'filter_value', 'transform', 'get_group',
],
}
AGGREGATE_TEMPLATE = list(parse("""
(do
(def agg_base '(do
(defn agg_sub [tally value]
(unquote sub))
(defn agg_add [tally value]
(unquote add))
(defn agg_trans [value]
(unquote (if (def? transform)
transform
'value)))))
(if (def? base)
(def agg_base (cat
'(do (unquote base))
(slice agg_base 1 null))))
(def res #{
'base
agg_base
'handle_change
'(-> tally
(agg_sub (agg_trans old_value))
(agg_add (agg_trans new_value)))})
(for key [
'get_tally
'get_value
'get_nonexisting_value
'filter_value
'get_group]
(if (eval '(def? (unquote key)))
(put res key (eval key))))
res)
"""))[0]
SUM_PARAMS = {'optional': AGGREGATE_PARAMS['optional']}
SUM_TEMPLATE = list(parse("""(do
(def res #{
'get_tally '0
'add '(+ tally value)
'sub '(- tally value)})
(for key [
'base
'get_value
'get_nonexisting_value
'filter_value
'get_group]
(if (eval '(def? (unquote key)))
(put res key (eval key))))
res)
"""))[0]
| 25.191781 | 67 | 0.54323 | from django.test import TestCase
from django_tally.data.models import Data
from django_tally.user_def.lang import parse
from django_tally.user_def.lang.json import encode
from django_tally.user_def.models import UserDefTemplate
from .testapp.models import Foo
AGGREGATE_PARAMS = {
'required': ['get_tally', 'add', 'sub'],
'optional': [
'base', 'get_value', 'get_nonexisting_value',
'filter_value', 'transform', 'get_group',
],
}
AGGREGATE_TEMPLATE = list(parse("""
(do
(def agg_base '(do
(defn agg_sub [tally value]
(unquote sub))
(defn agg_add [tally value]
(unquote add))
(defn agg_trans [value]
(unquote (if (def? transform)
transform
'value)))))
(if (def? base)
(def agg_base (cat
'(do (unquote base))
(slice agg_base 1 null))))
(def res #{
'base
agg_base
'handle_change
'(-> tally
(agg_sub (agg_trans old_value))
(agg_add (agg_trans new_value)))})
(for key [
'get_tally
'get_value
'get_nonexisting_value
'filter_value
'get_group]
(if (eval '(def? (unquote key)))
(put res key (eval key))))
res)
"""))[0]
SUM_PARAMS = {'optional': AGGREGATE_PARAMS['optional']}
SUM_TEMPLATE = list(parse("""(do
(def res #{
'get_tally '0
'add '(+ tally value)
'sub '(- tally value)})
(for key [
'base
'get_value
'get_nonexisting_value
'filter_value
'get_group]
(if (eval '(def? (unquote key)))
(put res key (eval key))))
res)
"""))[0]
class TestSimpleCounter(TestCase):
def setUp(self):
self.aggregate_template = UserDefTemplate(
params=AGGREGATE_PARAMS,
template=encode(AGGREGATE_TEMPLATE),
)
self.aggregate_template.save()
self.sum_template = UserDefTemplate(
params=SUM_PARAMS,
template=encode(SUM_TEMPLATE),
parent=self.aggregate_template,
)
self.sum_template.save()
def test_sum_template_transform(self):
values = self.sum_template.transform({
'get_value': 1,
'get_nonexisting_value': 0,
})
self.maxDiff = None
self.assertEqual(values, {
'base': list(parse("""
(do
(defn agg_sub [tally value]
(- tally value))
(defn agg_add [tally value]
(+ tally value))
(defn agg_trans [value]
value))
"""))[0],
'handle_change': list(parse("""
(-> tally
(agg_sub (agg_trans old_value))
(agg_add (agg_trans new_value)))
"""))[0],
'get_value': 1,
'get_nonexisting_value': 0,
'get_tally': 0,
})
def test_sum_template(self):
counter = self.sum_template(
get_value=1,
get_nonexisting_value=0,
db_name='counter',
save=True,
)
with counter.as_tally().on(Foo):
self.assertStored('counter', 0)
foo1 = Foo()
foo1.save()
self.assertStored('counter', 1)
foo2 = Foo()
foo2.save()
self.assertStored('counter', 2)
foo1.delete()
self.assertStored('counter', 1)
def assertStored(self, db_name, value):
try:
data = Data.objects.get(name=db_name)
except Data.DoesNotExist:
self.fail('No data associated with {}'.format(db_name))
else:
self.assertEqual(data.value, value)
| 1,964 | 13 | 131 |
2d6b79d9f4841f82e28b707c6ae7d9435b499d27 | 7,381 | py | Python | ml_models/RF.py | lackeylela/openASO | 20dddb35f226e42dfd6da5c0fe1cf7196795d33d | [
"BSD-3-Clause"
] | 3 | 2020-11-19T14:51:15.000Z | 2022-01-29T02:14:18.000Z | ml_models/RF.py | lackeylela/openASO | 20dddb35f226e42dfd6da5c0fe1cf7196795d33d | [
"BSD-3-Clause"
] | 1 | 2020-05-24T00:15:49.000Z | 2020-10-30T15:59:22.000Z | ml_models/RF.py | lackeylela/openASO | 20dddb35f226e42dfd6da5c0fe1cf7196795d33d | [
"BSD-3-Clause"
] | 4 | 2020-05-22T17:56:59.000Z | 2021-01-13T03:51:53.000Z | # -*- coding: utf-8 -*-
"""
Created on Sat May 23 12:02:26 2020
@author: Chung
"""
import numpy as np
import pandas as pd
from sklearn import svm
import sys
import argparse
from sklearn.linear_model import Perceptron
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
#from sklearn.ensemble import VotingRegressor
from itertools import combinations_with_replacement
from itertools import permutations
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
#####args.input.name = 'example_data/Processed_IDT_ASO_Data'
#sys.argv = ['RF.py', '../example_data/Processed_IDT_ASO_Data']
sys.argv = ['RF.py', '../example_data/Mockup_UltimateDataSet.csv']
args = parse_args()
ASO_score_data = get_input_file()
#####build location info features
location_info = ASO_score_data["ASOseq"].apply(lambda x: one_hot(split(x)))
location_info = location_info.apply(lambda l: [item for sublist in l for item in sublist])
#build gene info features
structure_features = []
for i in range(len(ASO_score_data.columns)):
if ASO_score_data.columns[i].find('RNAstructScore') !=-1:
structure_features.append(ASO_score_data.columns[i])
#build structure info features
gene_pool = list(set(ASO_score_data["GeneID"]))[1:]
gene_info = ASO_score_data["GeneID"].apply(lambda x: one_hot_gene(x, gene_pool))
#####build kmers(2-5 mer) features
symbol_2mers = get_kmers(2)
symbol_3mers = get_kmers(3)
symbol_4mers = get_kmers(4)
symbol_5mers = get_kmers(5)
features_2mer = get_features(ASO_score_data["ASOseq"], symbol_2mers)
features_3mer = get_features(ASO_score_data["ASOseq"], symbol_3mers)
features_4mer = get_features(ASO_score_data["ASOseq"], symbol_4mers)
features_5mer = get_features(ASO_score_data["ASOseq"], symbol_5mers)
#####construct X
X = location_info.to_list()
X = combine_features(X, gene_info.to_list())
X = combine_features(X, features_2mer)
X = combine_features(X, features_3mer)
X = combine_features(X, features_4mer)
X = combine_features(X, features_5mer)
X = combine_features(X, ASO_score_data["chr"].to_list())
X = combine_features(X, ASO_score_data["AtoIeditingScore1"].to_list())
X = combine_features(X, ASO_score_data["RBPscore1"].to_list())
for i in range(len(structure_features)):
X = combine_features(X, ASO_score_data[structure_features[i]].to_list())
#####construct Y
Y = ASO_score_data["ASOeffective"]
#####constrcut feature names
ATCG_identity = ['A','C','G','T']
features = ["1"]*4*22
for i in range(len(features)):
base = i%4
features[i] = 'if position ' + str(i//4) +' is ' + ATCG_identity[base]
#features = features + gene_pool + symbol_2mers + symbol_3mers
features = features + gene_pool + symbol_2mers + symbol_3mers +symbol_4mers + symbol_5mers\
+ ['chr', "AtoIeditingScore1", "RBPscore1"] + structure_features
features = np.array(features)
#####split test/train
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
X_train = np.array(X_train)
X_test = np.array(X_test)
Y_train = np.array(Y_train)
Y_test = np.array(Y_test)
#####training model
tfbs_classifier = RandomForestRegressor(n_estimators=100)
tfbs_classifier.fit(X_train, Y_train)
Y_pred = tfbs_classifier.predict(X_test)
#####evaluate the model
error = mean_squared_error(Y_test, Y_pred)
print('mean_squared_error: ', error)
#####analyze the importance of each feature
importances = tfbs_classifier.feature_importances_
std = np.std([tree.feature_importances_ for tree in tfbs_classifier.estimators_],
axis=0)
indices = np.argsort(importances)
indices = indices[-50:]
print("Feature ranking:")
for f in range(len(indices)):
print("%d. feature %s (%f)" % (f + 1, features[indices[f]], importances[indices[f]]))
##### Plot feature importance
fig, ax = plt.subplots(figsize=(15,15))
plt.title("Feature importances")
#ax.barh(range(len(indices)), importances[indices], color="r", xerr=std[indices], align="center")
ax.barh(range(len(indices)), importances[indices], color="r", align="center")
ax.set_yticks(range(len(indices)))
ax.set_yticklabels(features[indices])
fig.savefig('../figure/feature_importance.png', bbox_inches='tight', dpi=200)
#plt.show()
'''
#visual inspection to the predicted data
plt.plot(Y_test, Y_pred, '.')
plt.xlabel('Real RNA effective')
plt.show()
''' | 29.762097 | 97 | 0.702886 | # -*- coding: utf-8 -*-
"""
Created on Sat May 23 12:02:26 2020
@author: Chung
"""
import numpy as np
import pandas as pd
from sklearn import svm
import sys
import argparse
from sklearn.linear_model import Perceptron
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
#from sklearn.ensemble import VotingRegressor
from itertools import combinations_with_replacement
from itertools import permutations
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
def parse_args():
parser = argparse.ArgumentParser(description = "Run regression on tab file")
required = parser.add_argument_group("Required arguments to run")
required.add_argument("input", type=argparse.FileType('r'))
args = parser.parse_args()
return args
def get_input_file():
try:
ASO_file = pd.read_csv(args.input.name, delimiter=",")
return(ASO_file)
except Exception as err:
print("Error opening file.")
print(err)
sys.exit(1)
def split(word):
return list(word)
def one_hot(nuc_list):
#equalize the ASO length
patch_len = 22-len(nuc_list)
nuc_list = nuc_list + ['X']*patch_len
one_hot = []
for nucelotide in nuc_list:
if nucelotide == 'A':
one_hot.append([1,0,0,0])
elif nucelotide == 'C':
one_hot.append([0,1,0,0])
elif nucelotide == 'G':
one_hot.append([0,0,1,0])
elif nucelotide == 'T':
one_hot.append([0,0,0,1])
else:
one_hot.append([0,0,0,0])
return one_hot
def one_hot_gene(gene, gene_pool):
one_hot = [0]*len(gene_pool)
for i in range(len(gene_pool)):
if gene == gene_pool[i]:
one_hot[i] = 1
return one_hot
def get_kmers(kmer_length):
all_kmers = []
kmers = list(combinations_with_replacement("ATCG", kmer_length))
for index in range(len(kmers)):
kmers[index] = "".join(kmers[index])
for kmer in kmers:
permut = list(permutations(kmer))
permut = list(set(["".join(x) for x in permut]))
all_kmers = all_kmers + permut
return all_kmers
def get_features(data, kmer_list):
features = []
seq_len = len(data[0])
kmer_length = len(kmer_list[0])
for entry in data:
feature_count = [0] * len(kmer_list)
start = 0
end = kmer_length
while end <= seq_len + 1:
seq_slice = entry[start:end]
start += 1
end += 1
if len(seq_slice) == kmer_length:
#append to list of counts
index = kmer_list.index(seq_slice)
feature_count[index] += 1
#get the gc content and add this to the list of features for this sequence
#gc_content = calc_gc(entry)
#feature_count.append(gc_content)
#append to list of all features
features.append(feature_count)
return features
def combine_features(*args):
combined_features = []
num_features = len(args[0])
for index in range(num_features):
new_list = []
for a in args:
if isinstance(a[index], list):
new_list = new_list + a[index]
else:
new_list = new_list + list(a)
combined_features.append(new_list)
return combined_features
#####args.input.name = 'example_data/Processed_IDT_ASO_Data'
#sys.argv = ['RF.py', '../example_data/Processed_IDT_ASO_Data']
sys.argv = ['RF.py', '../example_data/Mockup_UltimateDataSet.csv']
args = parse_args()
ASO_score_data = get_input_file()
#####build location info features
location_info = ASO_score_data["ASOseq"].apply(lambda x: one_hot(split(x)))
location_info = location_info.apply(lambda l: [item for sublist in l for item in sublist])
#build gene info features
structure_features = []
for i in range(len(ASO_score_data.columns)):
if ASO_score_data.columns[i].find('RNAstructScore') !=-1:
structure_features.append(ASO_score_data.columns[i])
#build structure info features
gene_pool = list(set(ASO_score_data["GeneID"]))[1:]
gene_info = ASO_score_data["GeneID"].apply(lambda x: one_hot_gene(x, gene_pool))
#####build kmers(2-5 mer) features
symbol_2mers = get_kmers(2)
symbol_3mers = get_kmers(3)
symbol_4mers = get_kmers(4)
symbol_5mers = get_kmers(5)
features_2mer = get_features(ASO_score_data["ASOseq"], symbol_2mers)
features_3mer = get_features(ASO_score_data["ASOseq"], symbol_3mers)
features_4mer = get_features(ASO_score_data["ASOseq"], symbol_4mers)
features_5mer = get_features(ASO_score_data["ASOseq"], symbol_5mers)
#####construct X
X = location_info.to_list()
X = combine_features(X, gene_info.to_list())
X = combine_features(X, features_2mer)
X = combine_features(X, features_3mer)
X = combine_features(X, features_4mer)
X = combine_features(X, features_5mer)
X = combine_features(X, ASO_score_data["chr"].to_list())
X = combine_features(X, ASO_score_data["AtoIeditingScore1"].to_list())
X = combine_features(X, ASO_score_data["RBPscore1"].to_list())
for i in range(len(structure_features)):
X = combine_features(X, ASO_score_data[structure_features[i]].to_list())
#####construct Y
Y = ASO_score_data["ASOeffective"]
#####constrcut feature names
ATCG_identity = ['A','C','G','T']
features = ["1"]*4*22
for i in range(len(features)):
base = i%4
features[i] = 'if position ' + str(i//4) +' is ' + ATCG_identity[base]
#features = features + gene_pool + symbol_2mers + symbol_3mers
features = features + gene_pool + symbol_2mers + symbol_3mers +symbol_4mers + symbol_5mers\
+ ['chr', "AtoIeditingScore1", "RBPscore1"] + structure_features
features = np.array(features)
#####split test/train
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
X_train = np.array(X_train)
X_test = np.array(X_test)
Y_train = np.array(Y_train)
Y_test = np.array(Y_test)
#####training model
tfbs_classifier = RandomForestRegressor(n_estimators=100)
tfbs_classifier.fit(X_train, Y_train)
Y_pred = tfbs_classifier.predict(X_test)
#####evaluate the model
error = mean_squared_error(Y_test, Y_pred)
print('mean_squared_error: ', error)
#####analyze the importance of each feature
importances = tfbs_classifier.feature_importances_
std = np.std([tree.feature_importances_ for tree in tfbs_classifier.estimators_],
axis=0)
indices = np.argsort(importances)
indices = indices[-50:]
print("Feature ranking:")
for f in range(len(indices)):
print("%d. feature %s (%f)" % (f + 1, features[indices[f]], importances[indices[f]]))
##### Plot feature importance
fig, ax = plt.subplots(figsize=(15,15))
plt.title("Feature importances")
#ax.barh(range(len(indices)), importances[indices], color="r", xerr=std[indices], align="center")
ax.barh(range(len(indices)), importances[indices], color="r", align="center")
ax.set_yticks(range(len(indices)))
ax.set_yticklabels(features[indices])
fig.savefig('../figure/feature_importance.png', bbox_inches='tight', dpi=200)
#plt.show()
'''
#visual inspection to the predicted data
plt.plot(Y_test, Y_pred, '.')
plt.xlabel('Real RNA effective')
plt.show()
''' | 2,570 | 0 | 184 |
ab84c8185e73685f87a8bbf6bdde4531e8020b45 | 23,589 | py | Python | tally_ho/apps/tally/tests/views/test_quality_control.py | crononauta/tally-ho | ba2207bfaef27bee3ff13a393983ca493f767238 | [
"Apache-2.0"
] | null | null | null | tally_ho/apps/tally/tests/views/test_quality_control.py | crononauta/tally-ho | ba2207bfaef27bee3ff13a393983ca493f767238 | [
"Apache-2.0"
] | null | null | null | tally_ho/apps/tally/tests/views/test_quality_control.py | crononauta/tally-ho | ba2207bfaef27bee3ff13a393983ca493f767238 | [
"Apache-2.0"
] | null | null | null | from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.contrib.auth.models import AnonymousUser
from django.core.urlresolvers import reverse
from django.test import RequestFactory
from tally_ho.apps.tally.views import quality_control as views
from tally_ho.apps.tally.models.quality_control import QualityControl
from tally_ho.apps.tally.models.result_form import ResultForm
from tally_ho.libs.models.enums.form_state import FormState
from tally_ho.libs.permissions import groups
from tally_ho.libs.tests.test_base import create_candidates,\
create_reconciliation_form, create_recon_forms, create_result_form,\
create_center, create_station, TestBase
| 45.189655 | 80 | 0.666455 | from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.contrib.auth.models import AnonymousUser
from django.core.urlresolvers import reverse
from django.test import RequestFactory
from tally_ho.apps.tally.views import quality_control as views
from tally_ho.apps.tally.models.quality_control import QualityControl
from tally_ho.apps.tally.models.result_form import ResultForm
from tally_ho.libs.models.enums.form_state import FormState
from tally_ho.libs.permissions import groups
from tally_ho.libs.tests.test_base import create_candidates,\
create_reconciliation_form, create_recon_forms, create_result_form,\
create_center, create_station, TestBase
def create_quality_control(result_form, user):
return QualityControl.objects.create(result_form=result_form,
user=user)
class TestQualityControl(TestBase):
def setUp(self):
self.factory = RequestFactory()
self._create_permission_groups()
def _common_view_tests(self, view):
request = self.factory.get('/')
request.user = AnonymousUser()
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual('/accounts/login/?next=/', response['location'])
self._create_and_login_user()
request.user = self.user
with self.assertRaises(PermissionDenied):
view(request)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
response = view(request)
response.render()
self.assertIn('/accounts/logout/', response.content)
return response
def test_quality_control_get(self):
response = self._common_view_tests(views.QualityControlView.as_view())
self.assertContains(response, 'Quality Control')
self.assertIn('<form id="result_form"', response.content)
def test_quality_control_post(self):
barcode = '123456789'
create_result_form(barcode,
form_state=FormState.QUALITY_CONTROL)
self._create_and_login_user()
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlView.as_view()
data = {'barcode': barcode, 'barcode_copy': barcode}
request = self.factory.post('/', data=data)
request.user = self.user
request.session = {}
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertIn('quality-control/dashboard',
response['location'])
result_form = ResultForm.objects.get(barcode=barcode)
self.assertEqual(result_form.form_state, FormState.QUALITY_CONTROL)
self.assertEqual(result_form.qualitycontrol.user, self.user)
def test_dashboard_abort_post(self):
barcode = '123456789'
create_result_form(barcode,
form_state=FormState.QUALITY_CONTROL)
self._create_and_login_user()
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
result_form = ResultForm.objects.get(barcode=barcode)
create_quality_control(result_form, self.user)
view = views.QualityControlDashboardView.as_view()
data = {'result_form': result_form.pk,
'abort': 1}
request = self.factory.post('/', data=data)
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
quality_control = result_form.qualitycontrol_set.all()[0]
self.assertEqual(quality_control.active, False)
self.assertEqual(response.status_code, 302)
self.assertIn('quality-control/home',
response['location'])
self.assertEqual(request.session, {})
def test_dashboard_submit_post(self):
barcode = '123456789'
create_result_form(barcode,
form_state=FormState.QUALITY_CONTROL)
self._create_and_login_user()
result_form = ResultForm.objects.get(barcode=barcode)
quality_control = create_quality_control(result_form, self.user)
quality_control.passed_general = True
quality_control.passed_reconciliation = True
quality_control.passed_women = True
quality_control.save()
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
data = {'result_form': result_form.pk,
'correct': 1}
request = self.factory.post('/', data=data)
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
result_form = ResultForm.objects.get(pk=result_form.pk)
self.assertEqual(response.status_code, 302)
self.assertIn('quality-control/success',
response['location'])
self.assertEqual(result_form.form_state, FormState.ARCHIVING)
def test_dashboard_get_double_recon(self):
barcode = '123456789'
self._create_and_login_user()
center = create_center()
station = create_station(center=center)
result_form = create_result_form(barcode,
center=center,
station_number=station.station_number,
form_state=FormState.QUALITY_CONTROL)
create_reconciliation_form(result_form, self.user)
create_reconciliation_form(result_form, self.user)
create_candidates(result_form, self.user)
create_quality_control(result_form, self.user)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
request = self.factory.get('/')
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'General Results Section')
self.assertContains(response, 'Abort')
def test_dashboard_get_double_recon_raise(self):
barcode = '123456789'
self._create_and_login_user()
center = create_center()
station = create_station(center=center)
result_form = create_result_form(barcode,
center=center,
station_number=station.station_number,
form_state=FormState.QUALITY_CONTROL)
create_reconciliation_form(result_form, self.user)
create_reconciliation_form(result_form, self.user,
ballot_number_from=2)
create_candidates(result_form, self.user)
create_quality_control(result_form, self.user)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
request = self.factory.get('/')
request.user = self.user
request.session = {'result_form': result_form.pk}
with self.assertRaises(SuspiciousOperation):
view(request)
def test_dashboard_get(self):
barcode = '123456789'
self._create_and_login_user()
center = create_center()
station = create_station(center=center)
create_result_form(barcode,
center=center,
station_number=station.station_number,
form_state=FormState.QUALITY_CONTROL)
result_form = ResultForm.objects.get(barcode=barcode)
create_candidates(result_form, self.user)
create_quality_control(result_form, self.user)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
request = self.factory.get('/')
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
self.assertEqual(response.status_code, 200)
# gender has been removed
# self.assertContains(response, str(result_form.gender_name))
self.assertContains(response, 'General Results Section')
self.assertNotContains(response, 'Reconciliation')
self.assertContains(response, 'Abort')
def test_reconciliation_get(self):
barcode = '123456789'
create_result_form(barcode,
form_state=FormState.QUALITY_CONTROL)
result_form = ResultForm.objects.get(barcode=barcode)
self._create_and_login_user()
create_reconciliation_form(result_form, self.user)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
request = self.factory.get('/')
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
response.render()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Reconciliation')
self.assertContains(response, 'Total number of the sorted and counted')
def test_reconciliation_post_correct(self):
self._create_and_login_user()
barcode = '123456789'
create_result_form(barcode,
form_state=FormState.QUALITY_CONTROL)
result_form = ResultForm.objects.get(barcode=barcode)
create_quality_control(result_form, self.user)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
data = {'result_form': result_form.pk, 'correct': 1}
request = self.factory.post('/', data=data)
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
result_form.reload()
self.assertEqual(response.status_code, 302)
self.assertIn('quality-control/success',
response['location'])
quality_control = QualityControl.objects.get(
pk=result_form.qualitycontrol.pk)
self.assertEqual(result_form.form_state, FormState.ARCHIVING)
self.assertTrue(quality_control.passed_reconciliation)
def test_reconciliation_post_incorrect(self):
self._create_and_login_user()
barcode = '123456789'
create_result_form(barcode,
form_state=FormState.QUALITY_CONTROL)
result_form = ResultForm.objects.get(barcode=barcode)
create_recon_forms(result_form, self.user)
create_candidates(result_form, self.user)
create_quality_control(result_form, self.user)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
data = {'result_form': result_form.pk, 'incorrect': 1}
request = self.factory.post('/', data=data)
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertIn('quality-control/reject',
response['location'])
self.assertEqual(result_form.rejected_count, 0)
result_form = ResultForm.objects.get(pk=result_form.pk)
quality_control = result_form.qualitycontrol_set.all()[0]
results = result_form.results.all()
self.assertTrue(len(results) > 0)
for result in results:
self.assertEqual(result.active, False)
recon_forms = result_form.reconciliationform_set.all()
self.assertTrue(len(recon_forms) > 0)
for recon in recon_forms:
self.assertEqual(recon.active, False)
self.assertEqual(result_form.form_state, FormState.DATA_ENTRY_1)
self.assertEqual(result_form.rejected_count, 1)
self.assertEqual(quality_control.active, False)
self.assertEqual(quality_control.passed_reconciliation, False)
def test_reconciliation_post_abort(self):
self._create_and_login_user()
barcode = '123456789'
create_result_form(barcode,
form_state=FormState.QUALITY_CONTROL)
result_form = ResultForm.objects.get(barcode=barcode)
create_quality_control(result_form, self.user)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
data = {'result_form': result_form.pk, 'abort': 1}
request = self.factory.post('/', data=data)
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertIn('quality-control/home',
response['location'])
quality_control = result_form.qualitycontrol_set.all()[0]
self.assertEqual(quality_control.active, False)
def test_general_get(self):
barcode = '123456789'
center = create_center()
station = create_station(center=center)
create_result_form(barcode,
center=center,
station_number=station.station_number,
form_state=FormState.QUALITY_CONTROL)
result_form = ResultForm.objects.get(barcode=barcode)
self._create_and_login_user()
name = 'the candidate name'
women_name = 'women candidate name'
votes = 123
create_candidates(result_form, self.user, name, votes, women_name)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
request = self.factory.get('/')
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
response.render()
self.assertEqual(response.status_code, 200)
# gender has been removed
# self.assertContains(response, str(result_form.gender_name))
self.assertContains(response, 'General')
self.assertContains(response, name)
self.assertContains(response, women_name)
self.assertContains(response, str(votes))
def test_general_post_correct(self):
self._create_and_login_user()
barcode = '123456789'
create_result_form(barcode,
form_state=FormState.QUALITY_CONTROL)
result_form = ResultForm.objects.get(barcode=barcode)
create_quality_control(result_form, self.user)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
data = {'result_form': result_form.pk, 'correct': 1}
request = self.factory.post('/', data=data)
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
result_form.reload()
self.assertEqual(response.status_code, 302)
self.assertIn('quality-control/success',
response['location'])
quality_control = QualityControl.objects.get(
pk=result_form.qualitycontrol.pk)
self.assertEqual(result_form.form_state, FormState.ARCHIVING)
self.assertTrue(quality_control.passed_general)
def test_general_post_incorrect(self):
self._create_and_login_user()
barcode = '123456789'
create_result_form(barcode,
form_state=FormState.QUALITY_CONTROL)
result_form = ResultForm.objects.get(barcode=barcode)
create_quality_control(result_form, self.user)
create_candidates(result_form, self.user)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
data = {'result_form': result_form.pk, 'incorrect': 1}
request = self.factory.post('/', data=data)
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertIn('quality-control/reject',
response['location'])
self.assertEqual(result_form.rejected_count, 0)
result_form = ResultForm.objects.get(pk=result_form.pk)
quality_control = result_form.qualitycontrol_set.all()[0]
results = result_form.results.all()
self.assertTrue(len(results) > 0)
for result in results:
self.assertEqual(result.active, False)
self.assertEqual(result_form.form_state, FormState.DATA_ENTRY_1)
self.assertEqual(result_form.rejected_count, 1)
self.assertEqual(quality_control.active, False)
self.assertEqual(quality_control.passed_general, False)
def test_general_post_abort(self):
self._create_and_login_user()
barcode = '123456789'
create_result_form(barcode,
form_state=FormState.QUALITY_CONTROL)
result_form = ResultForm.objects.get(barcode=barcode)
create_quality_control(result_form, self.user)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
data = {'result_form': result_form.pk, 'abort': 1}
request = self.factory.post('/', data=data)
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertIn('quality-control/home',
response['location'])
quality_control = result_form.qualitycontrol_set.all()[0]
self.assertEqual(quality_control.active, False)
def test_women_get(self):
barcode = '123456789'
center = create_center()
station = create_station(center=center)
create_result_form(barcode,
center=center,
station_number=station.station_number,
form_state=FormState.QUALITY_CONTROL)
result_form = ResultForm.objects.get(barcode=barcode)
self._create_and_login_user()
name = 'general candidate name'
women_name = 'women candidate name'
votes = 123
create_candidates(result_form, self.user, name, votes, women_name)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
request = self.factory.get('/')
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
response.render()
self.assertEqual(response.status_code, 200)
# gender has been removed
# self.assertContains(response, str(result_form.gender_name))
self.assertContains(response, 'Women')
self.assertContains(response, women_name)
self.assertContains(response, name)
self.assertContains(response, str(votes))
def test_women_post_correct(self):
self._create_and_login_user()
barcode = '123456789'
create_result_form(barcode,
form_state=FormState.QUALITY_CONTROL)
result_form = ResultForm.objects.get(barcode=barcode)
create_quality_control(result_form, self.user)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
data = {'result_form': result_form.pk, 'correct': 1}
request = self.factory.post('/', data=data)
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
result_form.reload()
self.assertEqual(response.status_code, 302)
self.assertIn('quality-control/success',
response['location'])
quality_control = result_form.qualitycontrol_set.all()[0]
self.assertEqual(result_form.form_state, FormState.ARCHIVING)
self.assertTrue(quality_control.passed_women)
def test_women_post_incorrect(self):
self._create_and_login_user()
barcode = '123456789'
create_result_form(barcode,
form_state=FormState.QUALITY_CONTROL)
result_form = ResultForm.objects.get(barcode=barcode)
create_quality_control(result_form, self.user)
create_candidates(result_form, self.user)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
data = {'result_form': result_form.pk, 'incorrect': 1}
request = self.factory.post('/', data=data)
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertIn('quality-control/reject',
response['location'])
self.assertEqual(result_form.rejected_count, 0)
result_form = ResultForm.objects.get(pk=result_form.pk)
quality_control = result_form.qualitycontrol_set.all()[0]
results = result_form.results.all()
self.assertTrue(len(results) > 0)
for result in results:
self.assertEqual(result.active, False)
self.assertEqual(result_form.form_state, FormState.DATA_ENTRY_1)
self.assertEqual(result_form.rejected_count, 1)
self.assertEqual(quality_control.active, False)
self.assertEqual(quality_control.passed_women, False)
def test_women_post_abort(self):
self._create_and_login_user()
barcode = '123456789'
create_result_form(barcode,
form_state=FormState.QUALITY_CONTROL)
result_form = ResultForm.objects.get(barcode=barcode)
create_quality_control(result_form, self.user)
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.QualityControlDashboardView.as_view()
data = {'result_form': result_form.pk, 'abort': 1}
request = self.factory.post('/', data=data)
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertIn('quality-control/home',
response['location'])
quality_control = result_form.qualitycontrol_set.all()[0]
self.assertEqual(quality_control.active, False)
def test_confirmation_get(self):
result_form = create_result_form(form_state=FormState.ARCHIVING)
self._create_and_login_user()
self._add_user_to_group(self.user, groups.QUALITY_CONTROL_ARCHIVE_CLERK)
view = views.ConfirmationView.as_view()
request = self.factory.get('/')
request.user = self.user
request.session = {'result_form': result_form.pk}
response = view(request)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Archiving')
self.assertContains(response, reverse('quality-control'))
self.assertEqual(request.session.get('result_form'), None)
| 22,244 | 14 | 639 |
5d8655e0db66bd2687b2937d4f861817bc448e33 | 1,072 | py | Python | tests/test_RuleEngine.py | IVIR3zaM/KafkaRulesManager | 848b349e79cfa8f1c9afa79831d1b552b3905410 | [
"MIT"
] | 1 | 2020-10-18T14:33:26.000Z | 2020-10-18T14:33:26.000Z | tests/test_RuleEngine.py | IVIR3zaM/KafkaRulesManager | 848b349e79cfa8f1c9afa79831d1b552b3905410 | [
"MIT"
] | null | null | null | tests/test_RuleEngine.py | IVIR3zaM/KafkaRulesManager | 848b349e79cfa8f1c9afa79831d1b552b3905410 | [
"MIT"
] | null | null | null | from kafka_rules_manager import RuleEngine
| 28.210526 | 78 | 0.758396 | from kafka_rules_manager import RuleEngine
def test_no_match_no_change():
rules = RuleEngine.Rule("lorem"),
engine = RuleEngine.Engine(rules)
message = RuleEngine.Message("Some Ipsum", RuleEngine.MessageStatuses.NEW)
engine.process_message(message)
assert message.status == RuleEngine.MessageStatuses.NEW
assert message.should_send_at is None
def test_match_only_status_change():
rules = RuleEngine.Rule("ipsum"),
engine = RuleEngine.Engine(rules)
message = RuleEngine.Message("Some Ipsum", RuleEngine.MessageStatuses.NEW)
engine.process_message(message)
assert message.status == RuleEngine.MessageStatuses.READY
assert message.should_send_at is None
def test_match_second_rule_applies():
rules = RuleEngine.Rule("lorem"), RuleEngine.Rule("ipsum")
engine = RuleEngine.Engine(rules)
message = RuleEngine.Message("Some Ipsum", RuleEngine.MessageStatuses.NEW)
engine.process_message(message)
assert message.status == RuleEngine.MessageStatuses.READY
assert message.should_send_at is None
| 957 | 0 | 69 |
44c4d063f133515fa09f6e2667424f3d14d11a80 | 8,603 | py | Python | parse_mail.py | ulrichard/vouchergen | 698b60186c44209235c21ad502a66b19d003ef13 | [
"BSD-3-Clause"
] | 1 | 2019-06-24T20:51:06.000Z | 2019-06-24T20:51:06.000Z | parse_mail.py | ulrichard/vouchergen | 698b60186c44209235c21ad502a66b19d003ef13 | [
"BSD-3-Clause"
] | 1 | 2015-03-07T01:01:49.000Z | 2015-03-07T01:01:49.000Z | parse_mail.py | ulrichard/vouchergen | 698b60186c44209235c21ad502a66b19d003ef13 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/python
import email, smtplib, tidy, os, datetime, csv, subprocess, locale, time, inspect, sys
from lxml import etree
from email.mime.text import MIMEText
# allow import from subdirectory
currentDir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
btcuDir = currentDir + '/bitcoinutilities'
if btcuDir not in sys.path:
sys.path.append(btcuDir)
import pycoin.key.BIP32Node
import blockchain_info
# see http://docs.python.org/2/library/email-examples.html
# test code
if __name__ == "__main__":
if not os.path.exists('tmp'):
os.mkdir('tmp')
locale.setlocale(locale.LC_TIME, '')
# information gathering
mailer = Mailer()
infos = mailer.Parse("../Ihre_Anfrage.mbox")
overview = Overview('../GutscheineUebersicht.csv')
voucherNumber = overview.findNextVoucherNbr()
infos['VoucherNumber'] = str(voucherNumber)
# find a bitcoin address
bitCoinAddr = BitCoinAddr('../BitCoinXPub.txt').GetNext()
infos['xbtAddress'] = str(bitCoinAddr)
print('*' + infos['xbtAddress'] + '*')
# generate the qr codes
qrInfoString = 'http://paraeasy.ch\n' \
+ 'GutscheinNr: ' + infos['VoucherNumber'] + '\n' \
+ 'FlugTyp: ' + infos['FlightType'] + '\n' \
+ 'Passagier: ' + infos['Name des Beschenkten'] + '\n' \
+ 'BitCoin: ' + infos['xbtAddress'] + '\n'
print qrInfoString
infof = open('../pdf/' + voucherNumber + '_infos.txt', 'wt')
infof.write(qrInfoString)
infof.close()
key_id = os.environ['GPGKEY']
subprocess.call(['gpg', '-u', key_id, '--clearsign', 'pdf/' + voucherNumber + '_infos.txt'], cwd='../')
infofs = open('../pdf/' + voucherNumber + '_infos.txt.asc', 'rt')
qrInfoString = infofs.read()
infofs.close()
os.remove('../pdf/' + voucherNumber + '_infos.txt')
os.remove('../pdf/' + voucherNumber + '_infos.txt.asc')
infos['QrInfoFile'] = 'tmp/qr_' + infos['VoucherNumber'] + '.png'
print('writing ', infos['QrInfoFile'])
p = subprocess.Popen(['qrencode', '-o', infos['QrInfoFile']], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
p.stdin.write(qrInfoString)
p.communicate()[0]
p.stdin.close()
if not os.path.isfile(infos['QrInfoFile']):
raise ValueError('qr image file not written')
# prepare the documents
latex = LaTex(infos, 'tmp')
files = ['Gutschein.tex', 'Rechnung.tex']
for texFile in files:
pdfFile = latex.ToPdf(latex.Prepare(texFile))
subprocess.call(['evince', pdfFile])
subprocess.call(['git', 'add', str(pdfFile)], cwd='../')
# accounting
overview.addEntry(infos)
subprocess.call(['git', 'add', 'GutscheineUebersicht.csv'], cwd='../')
print infos
| 37.732456 | 118 | 0.55748 | #! /usr/bin/python
import email, smtplib, tidy, os, datetime, csv, subprocess, locale, time, inspect, sys
from lxml import etree
from email.mime.text import MIMEText
# allow import from subdirectory
currentDir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
btcuDir = currentDir + '/bitcoinutilities'
if btcuDir not in sys.path:
sys.path.append(btcuDir)
import pycoin.key.BIP32Node
import blockchain_info
# see http://docs.python.org/2/library/email-examples.html
class Mailer:
def Parse(self, mailFile):
fp = open(mailFile, 'rb')
msg = email.message_from_file(fp)
cont = msg.get_payload()
options = dict(output_xhtml=1, add_xml_decl=0, indent=1, tidy_mark=0)
td = tidy.parseString(cont, **options)
ss = str(td)
ss = ss[ss.find('<table '):]
ss = ss[:ss.find('</table>') + 8]
ss = ss[:ss.find('colspan')]
ss = ss[:ss.rfind('<tr>')]
ss = ss.replace('<tbody>', '');
ss = ss.replace('ü', 'u');
ss = ss.replace('ä', 'a');
ss = ss.replace('ö', 'o');
ss = ss.replace('Ä', 'A');
ss = ss.replace('Ö', 'O');
ss = ss.replace('ü', 'u');
ss = ss + '</table>'
#http://stackoverflow.com/questions/6325216/parse-html-table-to-python-list
table = etree.XML(ss)
infos = {}
# direct infos
rows = iter(table)
for row in rows:
values = [col.text for col in row]
key = str(values[0]).replace('\n', '').strip()
val = str(values[2]).replace('\n', '').strip()
infos[key] = val
# processed infos
flightTypeAndPrice = infos['Bitte wahlen Sie Ihren Flug aus']
flights = {
'Schnupperflug fur 160.00 CHF' : ['Schnupperflug', '160'],
'Genussflug fur 200.00 CHF' : ['Genussflug', '200'],
'Panoramaflug fur 250.00 CHF' : ['Panoramaflug', '250'],
'Pilot fur einen Tag fur 520.00 CHF' : ['Pilot 4 a day', '520'],
}
infos['FlightType'] = flights[flightTypeAndPrice][0]
infos['FlightPrice'] = flights[flightTypeAndPrice][1]
#meta infos
infos['Subject'] = msg['Subject']
return infos
def Send(self):
msg = MIMEText("Hallo")
class LaTex:
def __init__(self, values, directory):
self.values = values
self.directory = directory
def Prepare(self, fileName):
fpi = open(fileName, 'rt')
ss = fpi.read()
numFlights = 1
ss = ss.replace('@CustomerName@', self.values['Vor- und Nachname'])
ss = ss.replace('@CustomerAddress@', self.values['Strasse und Hausnummer'])
ss = ss.replace('@CustomerZIP@', self.values['Postleitzahl'])
ss = ss.replace('@CustomerCity@', self.values['Ort'])
ss = ss.replace('@CustomerEMail@', self.values['Ihre E-Mail'])
ss = ss.replace('@CustomerPhone@', self.values['Rufnummer (fur Ruckfragen)'])
ss = ss.replace('@PassengerName@', self.values['Name des Beschenkten'])
ss = ss.replace('@VoucherNumber@', self.values['VoucherNumber'])
ss = ss.replace('@ValidUntil@', (datetime.date.today() + datetime.timedelta(days=400)).strftime('%B %Y'))
ss = ss.replace('@FlightType@', self.values['FlightType'])
ss = ss.replace('@FlightCount@', str(numFlights))
ss = ss.replace('@FlightPrice@', self.values['FlightPrice'])
ss = ss.replace('@TotalPrice@', str(numFlights * int(self.values['FlightPrice'])))
ss = ss.replace('@QrInfoFile@', self.values['QrInfoFile'])
ss = ss.replace('@xbtAddress@', self.values['xbtAddress'])
if not os.path.exists(self.directory):
os.makedirs(self.directory)
outFileName = self.directory + '/' + fileName
fpo = open(outFileName, 'wt')
fpo.write(ss)
return outFileName
def ToPdf(self, fileName):
subprocess.call(['pdflatex', fileName])
fileName = os.path.basename(fileName.replace('.tex', ''))
if fileName == 'Rechnung':
persName = self.values['Vor- und Nachname']
else:
persName = self.values['Name des Beschenkten']
pdfName = os.getcwd() + '/../pdf/' + self.values['VoucherNumber'] + '_' + fileName + '_' + persName + '.pdf'
os.rename(os.getcwd() + '/' + fileName + '.pdf', pdfName)
return pdfName
class Overview:
def __init__(self, fileName):
self.fileName = fileName
def findNextVoucherNbr(self):
today = datetime.date.today()
pref = '%02d%02d%02d' % (today.year % 1000, today.month, today.day)
cnt = 0
try:
for row in csv.reader(open(self.fileName, 'rt'), delimiter=';'):
if len(row) >= 6:
vfld = row[0]
if vfld[0:6] == pref:
cnt = int(vfld[6:])
return pref + ('%02d' % (cnt + 1))
except Exception as ex:
print ex
return pref + '01'
def addEntry(self, values):
csvwriter = csv.writer(open(self.fileName, 'at'), delimiter=';')
row = [ values['VoucherNumber']
, values['FlightType']
, values['Name des Beschenkten']
, values['Vor- und Nachname'] + ', ' + values['Strasse und Hausnummer']
+ ', ' + values['Postleitzahl'] + ' ' + values['Ort']
, values['FlightPrice']
, infos['xbtAddress']
]
csvwriter.writerow(row)
class BitCoinAddr:
def __init__(self, fileName):
fp = open(fileName, 'rt')
self.xpub = fp.readline().rstrip('\n')
fp.close()
def GetNext(self):
kk = pycoin.key.BIP32Node.BIP32Node.from_hwif(self.xpub)
for i in range(99999):
keypath = "0/%d.pub" % i
addr = kk.subkey_for_path(keypath).address()
#print i, j, addr
ledger = blockchain_info.blockchain(addr, False)
if ledger.tx_count() == 0:
return addr
# test code
if __name__ == "__main__":
if not os.path.exists('tmp'):
os.mkdir('tmp')
locale.setlocale(locale.LC_TIME, '')
# information gathering
mailer = Mailer()
infos = mailer.Parse("../Ihre_Anfrage.mbox")
overview = Overview('../GutscheineUebersicht.csv')
voucherNumber = overview.findNextVoucherNbr()
infos['VoucherNumber'] = str(voucherNumber)
# find a bitcoin address
bitCoinAddr = BitCoinAddr('../BitCoinXPub.txt').GetNext()
infos['xbtAddress'] = str(bitCoinAddr)
print('*' + infos['xbtAddress'] + '*')
# generate the qr codes
qrInfoString = 'http://paraeasy.ch\n' \
+ 'GutscheinNr: ' + infos['VoucherNumber'] + '\n' \
+ 'FlugTyp: ' + infos['FlightType'] + '\n' \
+ 'Passagier: ' + infos['Name des Beschenkten'] + '\n' \
+ 'BitCoin: ' + infos['xbtAddress'] + '\n'
print qrInfoString
infof = open('../pdf/' + voucherNumber + '_infos.txt', 'wt')
infof.write(qrInfoString)
infof.close()
key_id = os.environ['GPGKEY']
subprocess.call(['gpg', '-u', key_id, '--clearsign', 'pdf/' + voucherNumber + '_infos.txt'], cwd='../')
infofs = open('../pdf/' + voucherNumber + '_infos.txt.asc', 'rt')
qrInfoString = infofs.read()
infofs.close()
os.remove('../pdf/' + voucherNumber + '_infos.txt')
os.remove('../pdf/' + voucherNumber + '_infos.txt.asc')
infos['QrInfoFile'] = 'tmp/qr_' + infos['VoucherNumber'] + '.png'
print('writing ', infos['QrInfoFile'])
p = subprocess.Popen(['qrencode', '-o', infos['QrInfoFile']], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
p.stdin.write(qrInfoString)
p.communicate()[0]
p.stdin.close()
if not os.path.isfile(infos['QrInfoFile']):
raise ValueError('qr image file not written')
# prepare the documents
latex = LaTex(infos, 'tmp')
files = ['Gutschein.tex', 'Rechnung.tex']
for texFile in files:
pdfFile = latex.ToPdf(latex.Prepare(texFile))
subprocess.call(['evince', pdfFile])
subprocess.call(['git', 'add', str(pdfFile)], cwd='../')
# accounting
overview.addEntry(infos)
subprocess.call(['git', 'add', 'GutscheineUebersicht.csv'], cwd='../')
print infos
| 5,459 | -26 | 359 |
0c6df9cb0d37ac8b13fd3faeca5ef0f44c8698b5 | 1,133 | py | Python | ProjectEuler/p031.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | 1 | 2018-01-30T13:21:30.000Z | 2018-01-30T13:21:30.000Z | ProjectEuler/p031.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | null | null | null | ProjectEuler/p031.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | 1 | 2018-08-29T13:26:50.000Z | 2018-08-29T13:26:50.000Z |
# Execution time : 0.001408 seconds
# Solution Explanation
# Let v = { v1, v2, ..., vn } be values
# We want to find in how many ways we can sum s with
# element of v if we can get as many items of v as we need
# ( We can choose as many times and element as we want )
# So, we can define the following recurrence
# sol(i,s) = if s<0 -> 0, if i <= n -> sol(i,s-v[i]) + sol(i+1,s), if i==n -> 1 if s=0 else 0
# So the answer is sol(1,200)
# And we note that there are overlapping cases in this recurrence
# So we can implement it using DP
import time
width = 40
if __name__=="__main__":
start_ = time.time()
print(' Answer -> %s '.center(width,'-') % ( solution() ))
print(' %f seconds '.center(width,'-') % ( time.time() - start_))
| 30.621622 | 93 | 0.595763 |
# Execution time : 0.001408 seconds
# Solution Explanation
# Let v = { v1, v2, ..., vn } be values
# We want to find in how many ways we can sum s with
# element of v if we can get as many items of v as we need
# ( We can choose as many times and element as we want )
# So, we can define the following recurrence
# sol(i,s) = if s<0 -> 0, if i <= n -> sol(i,s-v[i]) + sol(i+1,s), if i==n -> 1 if s=0 else 0
# So the answer is sol(1,200)
# And we note that there are overlapping cases in this recurrence
# So we can implement it using DP
import time
width = 40
def solution():
v = [1,2,5,10,20,50,100,200]
memo = [[-1 for i in range(210)] for j in range(10)]
def ways(idx,s):
if s<0: return 0
if idx==8 and s!=0: return 0
if idx==8 and s==0: return 1
if memo[idx][s]!=-1: return memo[idx][s]
memo[idx][s] = ways(idx,s-v[idx])+ways(idx+1,s)
return memo[idx][s]
return ways(0,200)
if __name__=="__main__":
start_ = time.time()
print(' Answer -> %s '.center(width,'-') % ( solution() ))
print(' %f seconds '.center(width,'-') % ( time.time() - start_))
| 362 | 0 | 23 |
61c2f0b08e78df0660d2c6a39451cea11268969f | 933 | py | Python | The Revolution of Prime Numbers/primenumbers_5.py | mralamdari/python_Adventure | 026372163612aaab4f4908732f2912b8dd5240fb | [
"MIT"
] | 2 | 2021-01-18T14:03:18.000Z | 2021-02-04T09:45:15.000Z | The Revolution of Prime Numbers/primenumbers_5.py | EFA2020/python_Adventure | f98585008ee50867e21f025ce01b68e36123fff3 | [
"MIT"
] | null | null | null | The Revolution of Prime Numbers/primenumbers_5.py | EFA2020/python_Adventure | f98585008ee50867e21f025ce01b68e36123fff3 | [
"MIT"
] | null | null | null | """
This is my fifth prime number project,
it is one of the fastest.
It will find all prime numbers between 2 and 'End'.
1. At first we create an Prime array to N
store True vlaue, and N is End + 1 == size
and another list containing 2 as our main prime list
2.we iterate all odd numbers in range of 3 and END,
if that number's value in prime list is True,
means that we have not used it, so it is prime,
we add it to our prime list then
we make all prime list values false where we find
this number's multiplies untill the END
"""
print(primeNumbers_6(100000))
| 28.272727 | 52 | 0.632369 | """
This is my fifth prime number project,
it is one of the fastest.
It will find all prime numbers between 2 and 'End'.
1. At first we create an Prime array to N
store True vlaue, and N is End + 1 == size
and another list containing 2 as our main prime list
2.we iterate all odd numbers in range of 3 and END,
if that number's value in prime list is True,
means that we have not used it, so it is prime,
we add it to our prime list then
we make all prime list values false where we find
this number's multiplies untill the END
"""
def primeNumbers_6(End):
size = int(End) + 1
Prime = [True] * size
Prime_List = [2]
for i in range(3, size, 2):
if Prime[i]:
Prime_List.append(i)
temp, index = i, 1
while temp < size:
Prime[temp] = False
temp = i * index
index += 1
return Prime_List
print(primeNumbers_6(100000))
| 340 | 0 | 23 |
612c9d94cef45f4522dd83452ff711ebfc4e387e | 1,613 | py | Python | src/Filtering/BinaryMathematicalMorphology/ThinImage/Code.py | Leengit/ITKExamples | 9ab3696385a9fe82b4bbbadbdf7d3bb3b7079ec5 | [
"Apache-2.0"
] | null | null | null | src/Filtering/BinaryMathematicalMorphology/ThinImage/Code.py | Leengit/ITKExamples | 9ab3696385a9fe82b4bbbadbdf7d3bb3b7079ec5 | [
"Apache-2.0"
] | null | null | null | src/Filtering/BinaryMathematicalMorphology/ThinImage/Code.py | Leengit/ITKExamples | 9ab3696385a9fe82b4bbbadbdf7d3bb3b7079ec5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itk
import argparse
parser = argparse.ArgumentParser(description="Thin Image.")
parser.add_argument("input_image", nargs="?")
args = parser.parse_args()
PixelType = itk.UC
Dimension = 2
ImageType = itk.Image[PixelType, Dimension]
if args.input_image:
image = itk.imread(args.input_image)
else:
# Create an image
start = itk.Index[Dimension]()
start.Fill(0)
size = itk.Size[Dimension]()
size.Fill(100)
region = itk.ImageRegion[Dimension]()
region.SetIndex(start)
region.SetSize(size)
image = ImageType.New(Regions=region)
image.Allocate()
image.FillBuffer(0)
# Draw a 5 pixel wide line
image[50:55, 20:80] = 255
# Write Image
itk.imwrite(image, "input.png")
image = itk.binary_thinning_image_filter(image)
# Rescale the image so that it can be seen (the output is 0 and 1, we want 0 and 255)
image = itk.rescale_intensity_image_filter(image, output_minimum=0, output_maximum=255)
# Write Image
itk.imwrite(image, "outputPython.png")
| 26.883333 | 87 | 0.725356 | #!/usr/bin/env python
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itk
import argparse
parser = argparse.ArgumentParser(description="Thin Image.")
parser.add_argument("input_image", nargs="?")
args = parser.parse_args()
PixelType = itk.UC
Dimension = 2
ImageType = itk.Image[PixelType, Dimension]
if args.input_image:
image = itk.imread(args.input_image)
else:
# Create an image
start = itk.Index[Dimension]()
start.Fill(0)
size = itk.Size[Dimension]()
size.Fill(100)
region = itk.ImageRegion[Dimension]()
region.SetIndex(start)
region.SetSize(size)
image = ImageType.New(Regions=region)
image.Allocate()
image.FillBuffer(0)
# Draw a 5 pixel wide line
image[50:55, 20:80] = 255
# Write Image
itk.imwrite(image, "input.png")
image = itk.binary_thinning_image_filter(image)
# Rescale the image so that it can be seen (the output is 0 and 1, we want 0 and 255)
image = itk.rescale_intensity_image_filter(image, output_minimum=0, output_maximum=255)
# Write Image
itk.imwrite(image, "outputPython.png")
| 0 | 0 | 0 |
dabb92efc6b69498e1c95a7d244c96eb89f7d911 | 1,411 | py | Python | 06_project_example/run.py | alexarmstrongvi/Tutorial-Python-Logger | a5d69f05fe2e02cc5bb2d98243bb21d25d801e82 | [
"MIT"
] | null | null | null | 06_project_example/run.py | alexarmstrongvi/Tutorial-Python-Logger | a5d69f05fe2e02cc5bb2d98243bb21d25d801e82 | [
"MIT"
] | null | null | null | 06_project_example/run.py | alexarmstrongvi/Tutorial-Python-Logger | a5d69f05fe2e02cc5bb2d98243bb21d25d801e82 | [
"MIT"
] | null | null | null | import logger
log = logger.get_logger(__name__)
import argparse
import sys
import os
import subprocess
import module as mod
import subpackage.submodule as submod
if __name__ == '__main__':
args = get_args()
if args.log_level:
log.setLevel(args.log_level.upper())
main()
| 25.196429 | 76 | 0.639972 | import logger
log = logger.get_logger(__name__)
import argparse
import sys
import os
import subprocess
import module as mod
import subpackage.submodule as submod
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--log-level') # log_level
args = parser.parse_args()
return args
def main():
log.info('='*40)
for line in logger.log_summary_str(log).split('\n'):
log.info(line)
log.info('Info message')
log.warning('Warning message')
print('sys.stdout message')
print('sys.stderr message', file=sys.stderr)
# Currently not sure if system messages (i.e. fd 1 & 2) can be captured
subprocess.run('echo Unix stdout message'.split())
#subprocess.run('echo "Unix stderr message" >&2'.split()) # Doesn't work
os.system('echo "Unix stderr message" >&2')
log.info('\r'+' '*80)
log.info('='*40)
for line in logger.log_summary_str(mod.log).split('\n'):
log.info(line)
mod.print_messages()
log.info('\r'+' '*80)
log.info('='*40)
for line in logger.log_summary_str(submod.log).split('\n'):
log.info(line)
submod.print_messages()
log.info('\r'+' '*80)
log.info('='*40)
raise RuntimeError('Test unhandled exception')
log.info('='*40)
if __name__ == '__main__':
args = get_args()
if args.log_level:
log.setLevel(args.log_level.upper())
main()
| 1,071 | 0 | 46 |
b5d3d2cad872fc9129b68a5b4344d6873551b3a7 | 410 | py | Python | support/views.py | chrisdrackett/django-support | a4f29421a31797e0b069637a0afec85328b4f0ca | [
"BSD-3-Clause"
] | null | null | null | support/views.py | chrisdrackett/django-support | a4f29421a31797e0b069637a0afec85328b4f0ca | [
"BSD-3-Clause"
] | null | null | null | support/views.py | chrisdrackett/django-support | a4f29421a31797e0b069637a0afec85328b4f0ca | [
"BSD-3-Clause"
] | null | null | null | from django.template import RequestContext
from django.shortcuts import render_to_response | 29.285714 | 77 | 0.746341 | from django.template import RequestContext
from django.shortcuts import render_to_response
def template(request, template_name, dictionary, *args, **kwargs):
kwargs['context_instance'] = RequestContext(request)
status = kwargs.pop('status', None)
response = render_to_response(template_name, dictionary, *args, **kwargs)
if status:
response.status_code = status
return response | 297 | 0 | 23 |
08ebba3f75d96660b3a77a1a0f6521959c1eb659 | 5,224 | py | Python | bot/cogs/commands/bot_channel/capaventura_aux/capaventura_db_connect.py | LeandroLFE/capmon | 9d1200301628ea4fec0e8ed09d5e9b67a426d923 | [
"MIT"
] | null | null | null | bot/cogs/commands/bot_channel/capaventura_aux/capaventura_db_connect.py | LeandroLFE/capmon | 9d1200301628ea4fec0e8ed09d5e9b67a426d923 | [
"MIT"
] | null | null | null | bot/cogs/commands/bot_channel/capaventura_aux/capaventura_db_connect.py | LeandroLFE/capmon | 9d1200301628ea4fec0e8ed09d5e9b67a426d923 | [
"MIT"
] | null | null | null | from db.connect.instanciaAtualDB import atualDB
from db.scripts.script_select.select_canais import select_verifica_canal, select_canais_ativos
from db.scripts.script_select.select_aventureiros import script_select_aventureiro_nome, script_select_aventureiro_id
from db.scripts.script_create_drop.create_tables import create_aventureiros_canal, create_buddies_canal, create_capturados_canal, create_tipo_hordas_canal, create_hordas_canal, create_capboard_dados_canal, create_itens_obtidos_canal
from db.scripts.script_create_drop.drop_tables import drop_table_aventureiros_canal, drop_table_buddies_canal, drop_table_capturados_canal, drop_tipo_hordas_canal,drop_hordas_canal, drop_capboard_dados_canal, drop_itens_obtidos_canal
from db.scripts.script_insert_update_delete.update_insert_canais import script_insert_canais, script_update_canais
from db.scripts.script_insert_update_delete.insert_update_tipo_hordas_canal import script_insert_table_tipo_hordas_canal
from db.scripts.script_insert_update_delete.insert_update_hordas_canal import script_insert_table_hordas_canal
from db.scripts.script_insert_update_delete.insert_aventureiros import script_insert_aventureiros
from db.scripts.script_select.select_idiomas import script_select_todos_idiomas, script_select_idioma_por_nome
from db.scripts.script_insert_update_delete.insert_tipo_itens import script_insert_tipo_itens
from db.scripts.script_select.select_parametros_canal import select_parametros_aventureiros_novo_canal, select_parametros_hordas_canal | 53.85567 | 258 | 0.813361 | from db.connect.instanciaAtualDB import atualDB
from db.scripts.script_select.select_canais import select_verifica_canal, select_canais_ativos
from db.scripts.script_select.select_aventureiros import script_select_aventureiro_nome, script_select_aventureiro_id
from db.scripts.script_create_drop.create_tables import create_aventureiros_canal, create_buddies_canal, create_capturados_canal, create_tipo_hordas_canal, create_hordas_canal, create_capboard_dados_canal, create_itens_obtidos_canal
from db.scripts.script_create_drop.drop_tables import drop_table_aventureiros_canal, drop_table_buddies_canal, drop_table_capturados_canal, drop_tipo_hordas_canal,drop_hordas_canal, drop_capboard_dados_canal, drop_itens_obtidos_canal
from db.scripts.script_insert_update_delete.update_insert_canais import script_insert_canais, script_update_canais
from db.scripts.script_insert_update_delete.insert_update_tipo_hordas_canal import script_insert_table_tipo_hordas_canal
from db.scripts.script_insert_update_delete.insert_update_hordas_canal import script_insert_table_hordas_canal
from db.scripts.script_insert_update_delete.insert_aventureiros import script_insert_aventureiros
from db.scripts.script_select.select_idiomas import script_select_todos_idiomas, script_select_idioma_por_nome
from db.scripts.script_insert_update_delete.insert_tipo_itens import script_insert_tipo_itens
from db.scripts.script_select.select_parametros_canal import select_parametros_aventureiros_novo_canal, select_parametros_hordas_canal
class Capaventura_DB_Connect():
def __init__(self) -> None:
pass
@atualDB.select_table_many_data
async def consulta_todos_idiomas(self, dados ={}):
_select_idiomas = script_select_todos_idiomas(dados)
return _select_idiomas
@atualDB.select_table_one_data
async def consulta_idioma_por_nome(self, dados ={}):
_select_idioma_por_nome = script_select_idioma_por_nome(dados)
return _select_idioma_por_nome
@atualDB.select_table_one_data
async def consulta_parametros_aventureiros_novo_canal(self, dados ={}):
_select_parametros_aventureiros_novo_canal = select_parametros_aventureiros_novo_canal()
return _select_parametros_aventureiros_novo_canal
@atualDB.select_table_one_data
async def consulta_aventureiro_por_nome(self, dados ={}):
_select_aventureiro_por_nome = script_select_aventureiro_nome(dados)
return _select_aventureiro_por_nome
@atualDB.select_table_one_data
async def consulta_aventureiro_por_id(self, dados ={}):
_select_aventureiro_por_id = script_select_aventureiro_id(dados)
return _select_aventureiro_por_id
@atualDB.select_table_one_data
async def consulta_parametros_hordas_canal(self, dados ={}):
_consulta_parametros_hordas_canal = select_parametros_hordas_canal(dados)
return _consulta_parametros_hordas_canal
@atualDB.select_table_many_data
async def consulta_canais_ativos_existentes(self, dados ={}):
_consulta_canais_existentes = select_canais_ativos(dados)
return _consulta_canais_existentes
@atualDB.select_table_one_data
async def consulta_canal_dado_um_id(self, dados ={}):
_consulta_canal_dado_um_id = select_verifica_canal(dados)
return _consulta_canal_dado_um_id
@atualDB.update_table
async def update_canais(self, dados ={}):
_update_canais = script_update_canais(dados)
return _update_canais
@atualDB.insert_table_one_line
async def insert_canais(self, dados ={}):
_insert_canais = script_insert_canais(dados)
return _insert_canais
@atualDB.insert_table_many_lines
async def insert_tipo_itens(self, dados ={}):
_insert_tipo_itens = script_insert_tipo_itens(dados)
return _insert_tipo_itens
@atualDB.insert_table_one_line
async def insert_aventureiro(self, dados={}):
_insert_aventureiros = script_insert_aventureiros(dados)
return _insert_aventureiros
@atualDB.insert_table_many_lines
async def insert_tipo_hordas_canal(self, dados ={}):
_insert_tipo_hordas_canal = script_insert_table_tipo_hordas_canal(dados)
return _insert_tipo_hordas_canal
@atualDB.insert_table_one_line
async def insert_hordas_canal(self, dados={}):
_insert_hordas_canal = script_insert_table_hordas_canal(dados)
return _insert_hordas_canal
@atualDB.create_or_drop_table
async def create_tables_with_underline_adventurer_name(self, dados={}):
_create_tables = create_aventureiros_canal(dados) + create_buddies_canal(dados) + create_capturados_canal(dados) + create_tipo_hordas_canal(dados) + create_hordas_canal(dados) + create_itens_obtidos_canal(dados) + create_capboard_dados_canal(dados)
return _create_tables
@atualDB.create_or_drop_table
async def drop_tables_with_underline_adventurer_name(self, dados={}):
_drop_tables = drop_capboard_dados_canal(dados) + drop_itens_obtidos_canal(dados) + drop_hordas_canal(dados) + drop_tipo_hordas_canal(dados) + drop_table_capturados_canal(dados) + drop_table_buddies_canal(dados) + drop_table_aventureiros_canal(dados)
return _drop_tables | 2,658 | 1,029 | 23 |
ce93298b8ba681a158cc20994e7461c62aae6ed8 | 2,566 | py | Python | branch/services.py | caioaraujo/bakery_payments_api_v2 | ade365bd2aa9561182be982286caa72923f36e13 | [
"MIT"
] | 4 | 2019-06-01T23:51:20.000Z | 2021-02-24T11:23:31.000Z | branch/services.py | caioaraujo/bakery_payments_api_v2 | ade365bd2aa9561182be982286caa72923f36e13 | [
"MIT"
] | 8 | 2020-06-13T23:10:46.000Z | 2022-02-28T13:58:02.000Z | branch/services.py | caioaraujo/bakery_payments_api_v2 | ade365bd2aa9561182be982286caa72923f36e13 | [
"MIT"
] | 1 | 2022-03-22T04:54:35.000Z | 2022-03-22T04:54:35.000Z | from django.utils.translation import ugettext
from rest_framework.exceptions import NotFound
from commons.decorators import validate_requirements, validate_existance, str_to_boolean
from payment.models import Payment
from .models import Branch
class BranchService:
"""
General services for branch
"""
@validate_requirements('name', 'current_balance')
def insert(self, params):
"""
Save a new Branch model
Args:
params: dict
Returns: Branch instance
"""
branch = Branch()
branch.name = params['name']
branch.current_balance = params['current_balance']
branch.save()
return branch
@validate_existance((Branch, 'id'), is_critical=True)
@validate_requirements('name', 'current_balance')
def update(self, params):
"""
Update a Branch model
Args:
params: dict
Returns: Branch instance
"""
branch_id = params['id']
name = params['name']
current_balance = params['current_balance']
branch = Branch(id=branch_id, name=name, current_balance=current_balance)
branch.save(update_fields=['name', 'current_balance'])
return branch
def find(self):
"""
Return a list of branches
Returns: Branch QuerySet
"""
return Branch.objects.all()
def find_by_id(self, branch_id):
"""
Return a single Branch instance
Args:
branch_id: int
Returns: Branch instance
"""
try:
return Branch.objects.get(id=branch_id)
except Branch.DoesNotExist:
raise NotFound(detail=ugettext('Branch not found'))
def delete(self, branch_id):
"""
Delete a Branch instance
Args:
branch_id: int
"""
try:
Branch.objects.get(id=branch_id).delete()
except Branch.DoesNotExist:
raise NotFound(detail=ugettext('Branch not found'))
@validate_existance((Branch, 'branch'), is_critical=True)
@str_to_boolean('is_paid')
def find_payments(self, params):
"""
Find a list of branch payment
Args:
params: dict - filters
Returns: Payment queryset
"""
branch = params['branch']
is_paid = params.get('is_paid')
query = Payment.objects.filter(branch_id=branch)
if is_paid is not None:
query = query.filter(is_paid=is_paid)
return query
| 23.117117 | 88 | 0.59431 | from django.utils.translation import ugettext
from rest_framework.exceptions import NotFound
from commons.decorators import validate_requirements, validate_existance, str_to_boolean
from payment.models import Payment
from .models import Branch
class BranchService:
"""
General services for branch
"""
@validate_requirements('name', 'current_balance')
def insert(self, params):
"""
Save a new Branch model
Args:
params: dict
Returns: Branch instance
"""
branch = Branch()
branch.name = params['name']
branch.current_balance = params['current_balance']
branch.save()
return branch
@validate_existance((Branch, 'id'), is_critical=True)
@validate_requirements('name', 'current_balance')
def update(self, params):
"""
Update a Branch model
Args:
params: dict
Returns: Branch instance
"""
branch_id = params['id']
name = params['name']
current_balance = params['current_balance']
branch = Branch(id=branch_id, name=name, current_balance=current_balance)
branch.save(update_fields=['name', 'current_balance'])
return branch
def find(self):
"""
Return a list of branches
Returns: Branch QuerySet
"""
return Branch.objects.all()
def find_by_id(self, branch_id):
"""
Return a single Branch instance
Args:
branch_id: int
Returns: Branch instance
"""
try:
return Branch.objects.get(id=branch_id)
except Branch.DoesNotExist:
raise NotFound(detail=ugettext('Branch not found'))
def delete(self, branch_id):
"""
Delete a Branch instance
Args:
branch_id: int
"""
try:
Branch.objects.get(id=branch_id).delete()
except Branch.DoesNotExist:
raise NotFound(detail=ugettext('Branch not found'))
@validate_existance((Branch, 'branch'), is_critical=True)
@str_to_boolean('is_paid')
def find_payments(self, params):
"""
Find a list of branch payment
Args:
params: dict - filters
Returns: Payment queryset
"""
branch = params['branch']
is_paid = params.get('is_paid')
query = Payment.objects.filter(branch_id=branch)
if is_paid is not None:
query = query.filter(is_paid=is_paid)
return query
| 0 | 0 | 0 |
a227747028c6b14641d8f64ac9d748b7af8120d3 | 2,996 | py | Python | lib/surface/certificate_manager/maps/list.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/certificate_manager/maps/list.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/certificate_manager/maps/list.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud certificate-manager maps list` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.certificate_manager import certificate_maps
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.certificate_manager import resource_args
from googlecloudsdk.command_lib.certificate_manager import util
from googlecloudsdk.core.resource import resource_transform
_FORMAT = """\
table(
name.scope(certificateMaps),
gclbTargets.gclbTargetsToString(undefined='-'):label=ENDPOINTS,
description,
createTime.date('%Y-%m-%d %H:%M:%S %Oz', undefined='-')
)
"""
def _TransformGclbTargets(targets, undefined=''):
r"""Transforms GclbTargets to more compact form.
It uses following format: IP_1:port_1\nIP_2:port_2\n...IP_n:port_n.
Args:
targets: GclbTargets API representation.
undefined: str, value to be returned if no IP:port pair is found.
Returns:
String representation to be shown in table view.
"""
if not targets:
return undefined
result = []
for target in targets:
ip_configs = resource_transform.GetKeyValue(target, 'ipConfig', None)
if ip_configs is None:
return undefined
for ip_config in ip_configs:
ip_address = resource_transform.GetKeyValue(ip_config, 'ipAddress', None)
port = resource_transform.GetKeyValue(ip_config, 'port', None)
if ip_address is None or port is None:
continue
result.append('{}:{}'.format(ip_address, port))
return '\n'.join(result) if result else undefined
_TRANSFORMS = {
'gclbTargetsToString': _TransformGclbTargets,
}
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class List(base.ListCommand):
"""List certificate maps.
List Certificate Manager maps in the project.
## EXAMPLES
To list all certificate maps in the project, run:
$ {command}
"""
@staticmethod
| 31.87234 | 79 | 0.748331 | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud certificate-manager maps list` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.certificate_manager import certificate_maps
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.certificate_manager import resource_args
from googlecloudsdk.command_lib.certificate_manager import util
from googlecloudsdk.core.resource import resource_transform
_FORMAT = """\
table(
name.scope(certificateMaps),
gclbTargets.gclbTargetsToString(undefined='-'):label=ENDPOINTS,
description,
createTime.date('%Y-%m-%d %H:%M:%S %Oz', undefined='-')
)
"""
def _TransformGclbTargets(targets, undefined=''):
r"""Transforms GclbTargets to more compact form.
It uses following format: IP_1:port_1\nIP_2:port_2\n...IP_n:port_n.
Args:
targets: GclbTargets API representation.
undefined: str, value to be returned if no IP:port pair is found.
Returns:
String representation to be shown in table view.
"""
if not targets:
return undefined
result = []
for target in targets:
ip_configs = resource_transform.GetKeyValue(target, 'ipConfig', None)
if ip_configs is None:
return undefined
for ip_config in ip_configs:
ip_address = resource_transform.GetKeyValue(ip_config, 'ipAddress', None)
port = resource_transform.GetKeyValue(ip_config, 'port', None)
if ip_address is None or port is None:
continue
result.append('{}:{}'.format(ip_address, port))
return '\n'.join(result) if result else undefined
_TRANSFORMS = {
'gclbTargetsToString': _TransformGclbTargets,
}
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class List(base.ListCommand):
"""List certificate maps.
List Certificate Manager maps in the project.
## EXAMPLES
To list all certificate maps in the project, run:
$ {command}
"""
@staticmethod
def Args(parser):
resource_args.AddLocationResourceArg(parser, 'to list maps for')
parser.display_info.AddUriFunc(util.CertificateMapUriFunc)
parser.display_info.AddFormat(_FORMAT)
parser.display_info.AddTransforms(_TRANSFORMS)
def Run(self, args):
client = certificate_maps.CertificateMapClient()
location_ref = args.CONCEPTS.location.Parse()
return client.List(location_ref, args.limit, args.page_size)
| 389 | 0 | 49 |
e750fd535877fd5891ddba57200668a4010a3aa3 | 938 | py | Python | plugins/hanlp_demo/hanlp_demo/zh/train_sota_bert_pku.py | callzhang/HanLP | f33c7e95b1d30d952d57f50272152f8d3a1740b2 | [
"Apache-2.0"
] | null | null | null | plugins/hanlp_demo/hanlp_demo/zh/train_sota_bert_pku.py | callzhang/HanLP | f33c7e95b1d30d952d57f50272152f8d3a1740b2 | [
"Apache-2.0"
] | null | null | null | plugins/hanlp_demo/hanlp_demo/zh/train_sota_bert_pku.py | callzhang/HanLP | f33c7e95b1d30d952d57f50272152f8d3a1740b2 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-08-11 02:47
from hanlp.common.dataset import SortingSamplerBuilder
from hanlp.components.tokenizers.transformer import TransformerTaggingTokenizer
from hanlp.datasets.cws.sighan2005.pku import SIGHAN2005_PKU_TRAIN_ALL, SIGHAN2005_PKU_TEST
from tests import cdroot
cdroot()
tokenizer = TransformerTaggingTokenizer()
save_dir = 'data/model/cws/sighan2005_pku_bert_base_96.66'
tokenizer.fit(
SIGHAN2005_PKU_TRAIN_ALL,
SIGHAN2005_PKU_TEST, # Conventionally, no devset is used. See Tian et al. (2020).
save_dir,
'bert-base-chinese',
max_seq_len=300,
char_level=True,
hard_constraint=True,
sampler_builder=SortingSamplerBuilder(batch_size=32),
epochs=3,
adam_epsilon=1e-6,
warmup_steps=0.1,
weight_decay=0.01,
word_dropout=0.1,
seed=1609422632,
)
tokenizer.evaluate(SIGHAN2005_PKU_TEST, save_dir)
print(f'Model saved in {save_dir}')
| 31.266667 | 91 | 0.766525 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-08-11 02:47
from hanlp.common.dataset import SortingSamplerBuilder
from hanlp.components.tokenizers.transformer import TransformerTaggingTokenizer
from hanlp.datasets.cws.sighan2005.pku import SIGHAN2005_PKU_TRAIN_ALL, SIGHAN2005_PKU_TEST
from tests import cdroot
cdroot()
tokenizer = TransformerTaggingTokenizer()
save_dir = 'data/model/cws/sighan2005_pku_bert_base_96.66'
tokenizer.fit(
SIGHAN2005_PKU_TRAIN_ALL,
SIGHAN2005_PKU_TEST, # Conventionally, no devset is used. See Tian et al. (2020).
save_dir,
'bert-base-chinese',
max_seq_len=300,
char_level=True,
hard_constraint=True,
sampler_builder=SortingSamplerBuilder(batch_size=32),
epochs=3,
adam_epsilon=1e-6,
warmup_steps=0.1,
weight_decay=0.01,
word_dropout=0.1,
seed=1609422632,
)
tokenizer.evaluate(SIGHAN2005_PKU_TEST, save_dir)
print(f'Model saved in {save_dir}')
| 0 | 0 | 0 |
876d5f64c3476f92c01bd76ed23572ca05df5a01 | 4,018 | py | Python | tests/cluster/commands_set_test.py | ProjectHentai/yaaredis | be6fcaf4c66f98272bfdeae33d34bb4e6fc13f1f | [
"MIT"
] | 13 | 2021-06-08T23:44:00.000Z | 2022-03-23T22:48:17.000Z | tests/cluster/commands_set_test.py | talkiq/yaaredis | 01e3fdd5ccf80843c56f5932952eb6ef0a697b33 | [
"MIT"
] | 10 | 2021-06-09T00:03:20.000Z | 2022-03-22T10:37:08.000Z | tests/cluster/commands_set_test.py | ProjectHentai/yaaredis | be6fcaf4c66f98272bfdeae33d34bb4e6fc13f1f | [
"MIT"
] | 1 | 2021-11-26T16:46:31.000Z | 2021-11-26T16:46:31.000Z | import pytest
from yaaredis.utils import b
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
| 28.097902 | 73 | 0.553758 | import pytest
from yaaredis.utils import b
@pytest.mark.asyncio
async def test_sadd(r):
await r.flushdb()
members = {b('1'), b('2'), b('3')}
await r.sadd('a', *members)
assert await r.smembers('a') == members
@pytest.mark.asyncio
async def test_scard(r):
await r.flushdb()
await r.sadd('a', '1', '2', '3')
assert await r.scard('a') == 3
@pytest.mark.asyncio
async def test_sdiff(r):
await r.flushdb()
await r.sadd('a{foo}', '1', '2', '3')
assert await r.sdiff('a{foo}', 'b{foo}') == {b('1'), b('2'), b('3')}
await r.sadd('b{foo}', '2', '3')
assert await r.sdiff('a{foo}', 'b{foo}') == {b('1')}
@pytest.mark.asyncio
async def test_sdiffstore(r):
await r.flushdb()
await r.sadd('a{foo}', '1', '2', '3')
assert await r.sdiffstore('c{foo}', 'a{foo}', 'b{foo}') == 3
assert await r.smembers('c{foo}') == {b('1'), b('2'), b('3')}
await r.sadd('b{foo}', '2', '3')
assert await r.sdiffstore('c{foo}', 'a{foo}', 'b{foo}') == 1
assert await r.smembers('c{foo}') == {b('1')}
# Diff:s that return empty set should not fail
assert await r.sdiffstore('d{foo}', 'e{foo}') == 0
@pytest.mark.asyncio
async def test_sinter(r):
await r.flushdb()
await r.sadd('a{foo}', '1', '2', '3')
assert await r.sinter('a{foo}', 'b{foo}') == set()
await r.sadd('b{foo}', '2', '3')
assert await r.sinter('a{foo}', 'b{foo}') == {b('2'), b('3')}
@pytest.mark.asyncio
async def test_sinterstore(r):
await r.flushdb()
await r.sadd('a{foo}', '1', '2', '3')
assert await r.sinterstore('c{foo}', 'a{foo}', 'b{foo}') == 0
assert await r.smembers('c{foo}') == set()
await r.sadd('b{foo}', '2', '3')
assert await r.sinterstore('c{foo}', 'a{foo}', 'b{foo}') == 2
assert await r.smembers('c{foo}') == {b('2'), b('3')}
@pytest.mark.asyncio
async def test_sismember(r):
await r.flushdb()
await r.sadd('a', '1', '2', '3')
assert await r.sismember('a', '1')
assert await r.sismember('a', '2')
assert await r.sismember('a', '3')
assert not await r.sismember('a', '4')
@pytest.mark.asyncio
async def test_smembers(r):
await r.flushdb()
await r.sadd('a', '1', '2', '3')
assert await r.smembers('a') == {b('1'), b('2'), b('3')}
@pytest.mark.asyncio
async def test_smove(r):
await r.flushdb()
await r.sadd('a{foo}', 'a1', 'a2')
await r.sadd('b{foo}', 'b1', 'b2')
assert await r.smove('a{foo}', 'b{foo}', 'a1')
assert await r.smembers('a{foo}') == {b('a2')}
assert await r.smembers('b{foo}') == {b('b1'), b('b2'), b('a1')}
@pytest.mark.asyncio
async def test_spop(r):
await r.flushdb()
s = [b('1'), b('2'), b('3')]
await r.sadd('a', *s)
value = await r.spop('a')
assert value in s
assert await r.smembers('a') == set(s) - {value}
@pytest.mark.asyncio
async def test_srandmember(r):
await r.flushdb()
s = [b('1'), b('2'), b('3')]
await r.sadd('a', *s)
assert await r.srandmember('a') in s
@pytest.mark.asyncio
async def test_srandmember_multi_value(r):
await r.flushdb()
s = [b('1'), b('2'), b('3')]
await r.sadd('a', *s)
randoms = await r.srandmember('a', number=2)
assert len(randoms) == 2
assert set(randoms).intersection(s) == set(randoms)
@pytest.mark.asyncio
async def test_srem(r):
await r.flushdb()
await r.sadd('a', '1', '2', '3', '4')
assert await r.srem('a', '5') == 0
assert await r.srem('a', '2', '4') == 2
assert await r.smembers('a') == {b('1'), b('3')}
@pytest.mark.asyncio
async def test_sunion(r):
await r.flushdb()
await r.sadd('a{foo}', '1', '2')
await r.sadd('b{foo}', '2', '3')
assert await r.sunion('a{foo}', 'b{foo}') == {b('1'), b('2'), b('3')}
@pytest.mark.asyncio
async def test_sunionstore(r):
await r.flushdb()
await r.sadd('a{foo}', '1', '2')
await r.sadd('b{foo}', '2', '3')
assert await r.sunionstore('c{foo}', 'a{foo}', 'b{foo}') == 3
assert await r.smembers('c{foo}') == {b('1'), b('2'), b('3')}
| 3,299 | 0 | 330 |
11ebadeaa60ed1cc5bbb6ca2726e3438b9721491 | 1,499 | py | Python | bandcamp_player/__init__.py | MonkeysAreEvil/bandcamp-player | f3ecd9d998ff23df858eff755dcc0ac339cea771 | [
"MIT"
] | null | null | null | bandcamp_player/__init__.py | MonkeysAreEvil/bandcamp-player | f3ecd9d998ff23df858eff755dcc0ac339cea771 | [
"MIT"
] | null | null | null | bandcamp_player/__init__.py | MonkeysAreEvil/bandcamp-player | f3ecd9d998ff23df858eff755dcc0ac339cea771 | [
"MIT"
] | 1 | 2019-06-11T14:32:17.000Z | 2019-06-11T14:32:17.000Z | # coding=utf-8
import logging
import sys
import argparse
from bandcamp_parser.album import Album
from bandcamp_parser.tag import Tag
from bandcamp_parser.track import Track
logging.basicConfig(level=logging.INFO)
def main():
""" Playing the tracks until CTRL-C """
try:
loop()
except KeyboardInterrupt:
exit(0)
if __name__ == '__main__':
main()
| 26.298246 | 120 | 0.643763 | # coding=utf-8
import logging
import sys
import argparse
from bandcamp_parser.album import Album
from bandcamp_parser.tag import Tag
from bandcamp_parser.track import Track
logging.basicConfig(level=logging.INFO)
def loop():
parser = argparse.ArgumentParser(description="Plays a track, an album, or random tracks from a tag, from Bandcamp.")
parser.add_argument("--version", action="version", version="'%(prog)s 0.2.1'")
any_of_required = parser.add_mutually_exclusive_group(required=True)
any_of_required.add_argument("--album", help="plays an album")
any_of_required.add_argument("--tag", help="plays random tracks with tag")
any_of_required.add_argument("--track", help="plays a track")
args = vars(parser.parse_args())
album_url = args["album"]
tag_url = args["tag"]
track_url = args["track"]
if album_url:
album = Album(album_url)
for track_url in album.tracks():
track = Track(track_url)
track.play()
if tag_url:
tag = Tag(sys.argv[2])
while True:
album_url = tag.album_random().href
album = Album(album_url)
track_url = album.track_random()
track = Track(track_url)
track.play()
if track_url:
track = Track(track_url)
track.play()
def main():
""" Playing the tracks until CTRL-C """
try:
loop()
except KeyboardInterrupt:
exit(0)
if __name__ == '__main__':
main()
| 1,092 | 0 | 23 |
d42c6ad7703b5e148e59e19432bd1b37dbd29b17 | 9,809 | py | Python | parts/broker/pub/hedge.py | coolerking/agent_smith | 1ec8c285fcb3996eaa77869b15af993696e113a8 | [
"MIT"
] | null | null | null | parts/broker/pub/hedge.py | coolerking/agent_smith | 1ec8c285fcb3996eaa77869b15af993696e113a8 | [
"MIT"
] | null | null | null | parts/broker/pub/hedge.py | coolerking/agent_smith | 1ec8c285fcb3996eaa77869b15af993696e113a8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
MarvelmindデータをAWS IoT Core へ Publish するパーツクラスを定義するモジュール。
"""
import time
import json
from .base import PublisherBase, to_float, to_str
from .topic import pub_hedge_usnav_json_topic, pub_hedge_usnav_raw_json_topic, pub_hedge_imu_json_topic
class USNavPublisher(PublisherBase):
"""
Marvelmindデータ(辞書型、位置情報データのみ)をAWS IoT CoreへPublishするパーツクラス。
"""
def run(self, usnav_id, usnav_x, usnav_y, usnav_z, usnav_angle, usnav_timestamp):
"""
Marvelmindデータ(辞書型、位置情報データのみ)をPublishする。
引数:
usnav_id モバイルビーコンID
usnav_x 位置情報(X軸)
usnav_y 位置情報(Y軸)
usnav_z 位置情報(Z軸)
usnav_angle 位置情報(向き)
usnav_timestamp 位置情報取得時刻
戻り値:
なし
"""
ret = self.client.publish(
self.topic,
self.to_message(
usnav_id, usnav_x, usnav_y, usnav_z, usnav_angle, usnav_timestamp),
self.qos)
if self.debug:
print('[USNavPublisher] publish topic={} ret={}'.format(self.topic, str(ret)))
print('[USNavPublisher] msg={}'.format(self.to_message(
usnav_id, usnav_x, usnav_y, usnav_z, usnav_angle, usnav_timestamp)))
def to_message(self, usnav_id, usnav_x, usnav_y, usnav_z,
usnav_angle, usnav_timestamp):
"""
手動運転のみのTubデータをメッセージ文字列化する。
引数:
usnav_id モバイルビーコンID
usnav_x 位置情報(X軸)
usnav_y 位置情報(Y軸)
usnav_z 位置情報(Z軸)
usnav_angle 位置情報(向き)
usnav_timestamp 位置情報取得時刻
戻り値:
メッセージ文字列
"""
message = {
'usnav/id': to_str(usnav_id),
'usnav/x': to_float(usnav_x),
'usnav/y': to_float(usnav_y),
'usnav/z': to_float(usnav_z),
'usnav/angle': to_float(usnav_angle),
'usnav/timestamp': to_float(usnav_timestamp),
}
return json.dumps(message)
class USNavRawPublisher(PublisherBase):
"""
Marvelmindデータ(辞書型、ビーコン間距離データのみ)を
AWS IoT CoreへPublishするパーツクラス。
"""
def run(self, dist_id, dist_b1, dist_b1d, dist_b2, dist_b2d,
dist_b3, dist_b3d, dist_b4, dist_b4d, dist_timestamp):
"""
Marvelmindデータ(辞書型、ビーコン間距離データのみ)をPublishする。
引数:
dist_id モバイルビーコンID
dist_b1 対象となるビーコンID1
dist_b1d ビーコンID1との距離
dist_b2 対象となるビーコンID2
dist_b2d ビーコンID2との距離
dist_b3 対象となるビーコンID3
dist_b3d ビーコンID3との距離
dist_b4 対象となるビーコンID4
dist_b4d ビーコンID4との距離
dist_timestamp ビーコン間距離取得時刻
戻り値:
なし
"""
ret = self.client.publish(
self.topic,
self.to_message(
dist_id, dist_b1, dist_b1d, dist_b2, dist_b2d,
dist_b3, dist_b3d, dist_b4, dist_b4d, dist_timestamp),
self.qos)
if self.debug:
print('[USNavRawPublisher] publish topic={} ret={}'.format(self.topic, str(ret)))
print('[USNavRawPublisher] msg={}'.format(self.to_message(
dist_id, dist_b1, dist_b1d, dist_b2, dist_b2d,
dist_b3, dist_b3d, dist_b4, dist_b4d, dist_timestamp)))
def to_message(self, dist_id, dist_b1, dist_b1d, dist_b2,
dist_b2d, dist_b3, dist_b3d, dist_b4, dist_b4d, dist_timestamp):
"""
Marvelmindデータ(辞書型、ビーコン間距離データのみ)を
メッセージ文字列化する。
引数:
dist_id モバイルビーコンID
dist_b1 対象となるビーコンID1
dist_b1d ビーコンID1との距離
dist_b2 対象となるビーコンID2
dist_b2d ビーコンID2との距離
dist_b3 対象となるビーコンID3
dist_b3d ビーコンID3との距離
dist_b4 対象となるビーコンID4
dist_b4d ビーコンID4との距離
dist_timestamp ビーコン間距離取得時刻
戻り値:
メッセージ文字列
"""
message = {
'dist/id': to_str(dist_id),
'dist/b1': to_str(dist_b1),
'dist/b1d': to_float(dist_b1d),
'dist/b2': to_str(dist_b2),
'dist/b2d': to_float(dist_b2d),
'dist/b3': to_str(dist_b3),
'dist/b3d': to_float(dist_b3d),
'dist/b4': to_str(dist_b4),
'dist/b4d': to_float(dist_b4d),
'dist/timestamp': to_float(dist_timestamp),
}
return json.dumps(message)
class IMUPublisher(PublisherBase):
"""
Marvelmindデータ(辞書型、IMUデータのみ)を
AWS IoT CoreへPublishするパーツクラス。
"""
def run(self, imu_x, imu_y, imu_z, imu_qw, imu_qx, imu_qy, imu_qz,
imu_vx, imu_vy, imu_vz, imu_ax, imu_ay, imu_az, imu_gx, imu_gy, imu_gz,
imu_mx, imu_my, imu_mz,
imu_timestamp):
"""
Marvelmindデータ(辞書型、IMUデータのみ)をPublishする。
引数:
imu_x 位置情報(X軸)
imu_y 位置情報(Y軸)
imu_z 位置情報(Z軸)
imu_qw 四元数(Q)
imu_qx 四元数(X)
imu_qy 四元数(Y)
imu_qz 四元数(Z)
imu_vx 速度(X軸)
imu_vy 速度(Y軸)
imu_vz 速度(Z軸)
imu_ax 加速度(X軸)
imu_ay 加速度(Y軸)
imu_az 加速度(Z軸)
imu_gx 角速度(X軸)
imu_gy 角速度(Y軸)
imu_gz 角速度(Z軸)
imu_mx 磁束密度(X軸)
imu_my 磁束密度(Y軸)
imu_mz 磁束密度(Z軸)
imu_timestamp IMUデータ取得時刻
戻り値:
なし
"""
ret = self.client.publish(
self.topic,
self.to_message(
imu_x, imu_y, imu_z, imu_qw, imu_qx, imu_qy, imu_qz,
imu_vx, imu_vy, imu_vz, imu_ax, imu_ay, imu_az,
imu_gx, imu_gy, imu_gz, imu_mx, imu_my, imu_mz,
imu_timestamp),
self.qos)
if self.debug:
print('[IMUPublisher] publish topic={} ret={}'.format(self.topic, str(ret)))
print('[IMUPublisher] msg={}'.format(self.to_message(
imu_x, imu_y, imu_z, imu_qw, imu_qx, imu_qy, imu_qz,
imu_vx, imu_vy, imu_vz, imu_ax, imu_ay, imu_az,
imu_gx, imu_gy, imu_gz, imu_mx, imu_my, imu_mz,
imu_timestamp)))
def to_message(self, imu_x, imu_y, imu_z,
imu_qw, imu_qx, imu_qy, imu_qz,
imu_vx, imu_vy, imu_vz,
imu_ax, imu_ay, imu_az,
imu_gx, imu_gy, imu_gz,
imu_mx, imu_my, imu_mz,
imu_timestamp):
"""
Marvelmindデータ(辞書型、IMUデータのみ)を
メッセージ文字列化する。
引数:
imu_x 位置情報(X軸)
imu_y 位置情報(Y軸)
imu_z 位置情報(Z軸)
imu_qw 四元数(Q)
imu_qx 四元数(X)
imu_qy 四元数(Y)
imu_qz 四元数(Z)
imu_vx 速度(X軸)
imu_vy 速度(Y軸)
imu_vz 速度(Z軸)
imu_ax 加速度(X軸)
imu_ay 加速度(Y軸)
imu_az 加速度(Z軸)
imu_gx 角速度(X軸)
imu_gy 角速度(Y軸)
imu_gz 角速度(Z軸)
imu_mx 磁束密度(X軸)
imu_my 磁束密度(Y軸)
imu_mz 磁束密度(Z軸)
imu_timestamp IMUデータ取得時刻
戻り値:
メッセージ文字列
"""
message = {
'imu/x': to_float(imu_x),
'imu/y': to_float(imu_y),
'imu/z': to_float(imu_z),
'imu/qw': to_float(imu_qw),
'imu/qx': to_float(imu_qx),
'imu/qy': to_float(imu_qy),
'imu/qz': to_float(imu_qz),
'imu/vx': to_float(imu_vx),
'imu/vy': to_float(imu_vy),
'imu/vz': to_float(imu_vz),
'imu/ax': to_float(imu_ax),
'imu/ay': to_float(imu_ay),
'imu/az': to_float(imu_az),
'imu/gx': to_float(imu_gx),
'imu/gy': to_float(imu_gy),
'imu/gz': to_float(imu_gz),
'imu/mx': to_float(imu_mx),
'imu/my': to_float(imu_my),
'imu/mz': to_float(imu_mz),
'imu/timestamp': to_float(imu_timestamp),
}
return json.dumps(message) | 37.582375 | 103 | 0.504741 | # -*- coding: utf-8 -*-
"""
MarvelmindデータをAWS IoT Core へ Publish するパーツクラスを定義するモジュール。
"""
import time
import json
from .base import PublisherBase, to_float, to_str
from .topic import pub_hedge_usnav_json_topic, pub_hedge_usnav_raw_json_topic, pub_hedge_imu_json_topic
class USNavPublisher(PublisherBase):
"""
Marvelmindデータ(辞書型、位置情報データのみ)をAWS IoT CoreへPublishするパーツクラス。
"""
def __init__(self, aws_iot_client_factory, debug=False):
super().__init__(aws_iot_client_factory, 'USNav', debug)
self.topic = pub_hedge_usnav_json_topic(
self.system, self.thing_type, self.thing_group, self.thing_name)
if self.debug:
print('[USNavPublisher] topic name = {}'.format(self.topic))
self.qos = 0
def run(self, usnav_id, usnav_x, usnav_y, usnav_z, usnav_angle, usnav_timestamp):
"""
Marvelmindデータ(辞書型、位置情報データのみ)をPublishする。
引数:
usnav_id モバイルビーコンID
usnav_x 位置情報(X軸)
usnav_y 位置情報(Y軸)
usnav_z 位置情報(Z軸)
usnav_angle 位置情報(向き)
usnav_timestamp 位置情報取得時刻
戻り値:
なし
"""
ret = self.client.publish(
self.topic,
self.to_message(
usnav_id, usnav_x, usnav_y, usnav_z, usnav_angle, usnav_timestamp),
self.qos)
if self.debug:
print('[USNavPublisher] publish topic={} ret={}'.format(self.topic, str(ret)))
print('[USNavPublisher] msg={}'.format(self.to_message(
usnav_id, usnav_x, usnav_y, usnav_z, usnav_angle, usnav_timestamp)))
def to_message(self, usnav_id, usnav_x, usnav_y, usnav_z,
usnav_angle, usnav_timestamp):
"""
手動運転のみのTubデータをメッセージ文字列化する。
引数:
usnav_id モバイルビーコンID
usnav_x 位置情報(X軸)
usnav_y 位置情報(Y軸)
usnav_z 位置情報(Z軸)
usnav_angle 位置情報(向き)
usnav_timestamp 位置情報取得時刻
戻り値:
メッセージ文字列
"""
message = {
'usnav/id': to_str(usnav_id),
'usnav/x': to_float(usnav_x),
'usnav/y': to_float(usnav_y),
'usnav/z': to_float(usnav_z),
'usnav/angle': to_float(usnav_angle),
'usnav/timestamp': to_float(usnav_timestamp),
}
return json.dumps(message)
class USNavRawPublisher(PublisherBase):
"""
Marvelmindデータ(辞書型、ビーコン間距離データのみ)を
AWS IoT CoreへPublishするパーツクラス。
"""
def __init__(self, aws_iot_client_factory, debug=False):
super().__init__(aws_iot_client_factory, 'USNavRaw', debug)
self.topic = pub_hedge_usnav_raw_json_topic(
self.system, self.thing_type, self.thing_group, self.thing_name)
if self.debug:
print('[USNavRawPublisher] topic name = {}'.format(self.topic))
self.qos = 0
def run(self, dist_id, dist_b1, dist_b1d, dist_b2, dist_b2d,
dist_b3, dist_b3d, dist_b4, dist_b4d, dist_timestamp):
"""
Marvelmindデータ(辞書型、ビーコン間距離データのみ)をPublishする。
引数:
dist_id モバイルビーコンID
dist_b1 対象となるビーコンID1
dist_b1d ビーコンID1との距離
dist_b2 対象となるビーコンID2
dist_b2d ビーコンID2との距離
dist_b3 対象となるビーコンID3
dist_b3d ビーコンID3との距離
dist_b4 対象となるビーコンID4
dist_b4d ビーコンID4との距離
dist_timestamp ビーコン間距離取得時刻
戻り値:
なし
"""
ret = self.client.publish(
self.topic,
self.to_message(
dist_id, dist_b1, dist_b1d, dist_b2, dist_b2d,
dist_b3, dist_b3d, dist_b4, dist_b4d, dist_timestamp),
self.qos)
if self.debug:
print('[USNavRawPublisher] publish topic={} ret={}'.format(self.topic, str(ret)))
print('[USNavRawPublisher] msg={}'.format(self.to_message(
dist_id, dist_b1, dist_b1d, dist_b2, dist_b2d,
dist_b3, dist_b3d, dist_b4, dist_b4d, dist_timestamp)))
def to_message(self, dist_id, dist_b1, dist_b1d, dist_b2,
dist_b2d, dist_b3, dist_b3d, dist_b4, dist_b4d, dist_timestamp):
"""
Marvelmindデータ(辞書型、ビーコン間距離データのみ)を
メッセージ文字列化する。
引数:
dist_id モバイルビーコンID
dist_b1 対象となるビーコンID1
dist_b1d ビーコンID1との距離
dist_b2 対象となるビーコンID2
dist_b2d ビーコンID2との距離
dist_b3 対象となるビーコンID3
dist_b3d ビーコンID3との距離
dist_b4 対象となるビーコンID4
dist_b4d ビーコンID4との距離
dist_timestamp ビーコン間距離取得時刻
戻り値:
メッセージ文字列
"""
message = {
'dist/id': to_str(dist_id),
'dist/b1': to_str(dist_b1),
'dist/b1d': to_float(dist_b1d),
'dist/b2': to_str(dist_b2),
'dist/b2d': to_float(dist_b2d),
'dist/b3': to_str(dist_b3),
'dist/b3d': to_float(dist_b3d),
'dist/b4': to_str(dist_b4),
'dist/b4d': to_float(dist_b4d),
'dist/timestamp': to_float(dist_timestamp),
}
return json.dumps(message)
class IMUPublisher(PublisherBase):
"""
Marvelmindデータ(辞書型、IMUデータのみ)を
AWS IoT CoreへPublishするパーツクラス。
"""
def __init__(self, aws_iot_client_factory, debug=False):
super().__init__(aws_iot_client_factory, 'IMU', debug)
self.topic = pub_hedge_imu_json_topic(
self.system, self.thing_type, self.thing_group, self.thing_name)
if self.debug:
print('[IMUPublisher] topic name = {}'.format(self.topic))
self.qos = 0
def run(self, imu_x, imu_y, imu_z, imu_qw, imu_qx, imu_qy, imu_qz,
imu_vx, imu_vy, imu_vz, imu_ax, imu_ay, imu_az, imu_gx, imu_gy, imu_gz,
imu_mx, imu_my, imu_mz,
imu_timestamp):
"""
Marvelmindデータ(辞書型、IMUデータのみ)をPublishする。
引数:
imu_x 位置情報(X軸)
imu_y 位置情報(Y軸)
imu_z 位置情報(Z軸)
imu_qw 四元数(Q)
imu_qx 四元数(X)
imu_qy 四元数(Y)
imu_qz 四元数(Z)
imu_vx 速度(X軸)
imu_vy 速度(Y軸)
imu_vz 速度(Z軸)
imu_ax 加速度(X軸)
imu_ay 加速度(Y軸)
imu_az 加速度(Z軸)
imu_gx 角速度(X軸)
imu_gy 角速度(Y軸)
imu_gz 角速度(Z軸)
imu_mx 磁束密度(X軸)
imu_my 磁束密度(Y軸)
imu_mz 磁束密度(Z軸)
imu_timestamp IMUデータ取得時刻
戻り値:
なし
"""
ret = self.client.publish(
self.topic,
self.to_message(
imu_x, imu_y, imu_z, imu_qw, imu_qx, imu_qy, imu_qz,
imu_vx, imu_vy, imu_vz, imu_ax, imu_ay, imu_az,
imu_gx, imu_gy, imu_gz, imu_mx, imu_my, imu_mz,
imu_timestamp),
self.qos)
if self.debug:
print('[IMUPublisher] publish topic={} ret={}'.format(self.topic, str(ret)))
print('[IMUPublisher] msg={}'.format(self.to_message(
imu_x, imu_y, imu_z, imu_qw, imu_qx, imu_qy, imu_qz,
imu_vx, imu_vy, imu_vz, imu_ax, imu_ay, imu_az,
imu_gx, imu_gy, imu_gz, imu_mx, imu_my, imu_mz,
imu_timestamp)))
def to_message(self, imu_x, imu_y, imu_z,
imu_qw, imu_qx, imu_qy, imu_qz,
imu_vx, imu_vy, imu_vz,
imu_ax, imu_ay, imu_az,
imu_gx, imu_gy, imu_gz,
imu_mx, imu_my, imu_mz,
imu_timestamp):
"""
Marvelmindデータ(辞書型、IMUデータのみ)を
メッセージ文字列化する。
引数:
imu_x 位置情報(X軸)
imu_y 位置情報(Y軸)
imu_z 位置情報(Z軸)
imu_qw 四元数(Q)
imu_qx 四元数(X)
imu_qy 四元数(Y)
imu_qz 四元数(Z)
imu_vx 速度(X軸)
imu_vy 速度(Y軸)
imu_vz 速度(Z軸)
imu_ax 加速度(X軸)
imu_ay 加速度(Y軸)
imu_az 加速度(Z軸)
imu_gx 角速度(X軸)
imu_gy 角速度(Y軸)
imu_gz 角速度(Z軸)
imu_mx 磁束密度(X軸)
imu_my 磁束密度(Y軸)
imu_mz 磁束密度(Z軸)
imu_timestamp IMUデータ取得時刻
戻り値:
メッセージ文字列
"""
message = {
'imu/x': to_float(imu_x),
'imu/y': to_float(imu_y),
'imu/z': to_float(imu_z),
'imu/qw': to_float(imu_qw),
'imu/qx': to_float(imu_qx),
'imu/qy': to_float(imu_qy),
'imu/qz': to_float(imu_qz),
'imu/vx': to_float(imu_vx),
'imu/vy': to_float(imu_vy),
'imu/vz': to_float(imu_vz),
'imu/ax': to_float(imu_ax),
'imu/ay': to_float(imu_ay),
'imu/az': to_float(imu_az),
'imu/gx': to_float(imu_gx),
'imu/gy': to_float(imu_gy),
'imu/gz': to_float(imu_gz),
'imu/mx': to_float(imu_mx),
'imu/my': to_float(imu_my),
'imu/mz': to_float(imu_mz),
'imu/timestamp': to_float(imu_timestamp),
}
return json.dumps(message) | 1,033 | 0 | 78 |
e348e8fc45a6e77e7787283a95d82cd9fd9945ea | 183 | py | Python | python/moBu/menu/clean/__init__.py | CountZer0/PipelineConstructionSet | 0aa73a8a63c72989b2d1c677efd78dad4388d335 | [
"BSD-3-Clause"
] | 21 | 2015-04-27T05:01:36.000Z | 2021-11-22T13:45:14.000Z | python/moBu/menu/clean/__init__.py | 0xb1dd1e/PipelineConstructionSet | 621349da1b6d1437e95d0c9e48ee9f36d59f19fd | [
"BSD-3-Clause"
] | null | null | null | python/moBu/menu/clean/__init__.py | 0xb1dd1e/PipelineConstructionSet | 621349da1b6d1437e95d0c9e48ee9f36d59f19fd | [
"BSD-3-Clause"
] | 7 | 2015-04-11T11:37:19.000Z | 2020-05-22T09:49:04.000Z | '''
Author: Jason.Parks
Created: April 25, 2012
Module: menu.clean.__init__
Purpose: to import menu clean
'''
if not __name__ == '__main__':
print "menu.clean.__init__ imported" | 20.333333 | 38 | 0.721311 | '''
Author: Jason.Parks
Created: April 25, 2012
Module: menu.clean.__init__
Purpose: to import menu clean
'''
if not __name__ == '__main__':
print "menu.clean.__init__ imported" | 0 | 0 | 0 |
21d6858d5d6148960d84325725569e7f3ca79771 | 23,676 | py | Python | lib/membase/helper/cluster_helper.py | pavithra-mahamani/testrunner | d204491caa23f1fbe90505646534ed7810d96289 | [
"Apache-2.0"
] | null | null | null | lib/membase/helper/cluster_helper.py | pavithra-mahamani/testrunner | d204491caa23f1fbe90505646534ed7810d96289 | [
"Apache-2.0"
] | null | null | null | lib/membase/helper/cluster_helper.py | pavithra-mahamani/testrunner | d204491caa23f1fbe90505646534ed7810d96289 | [
"Apache-2.0"
] | 1 | 2020-07-24T07:15:59.000Z | 2020-07-24T07:15:59.000Z | from membase.api.rest_client import RestConnection, RestHelper
from memcached.helper.data_helper import MemcachedClientHelper
from remote.remote_util import RemoteMachineShellConnection
from mc_bin_client import MemcachedClient, MemcachedError
from membase.api.exception import ServerAlreadyJoinedException
from membase.helper.rebalance_helper import RebalanceHelper
from TestInput import TestInputSingleton
import memcacheConstants
import logger
import testconstants
import time
import queue
from threading import Thread
import traceback
| 44.420263 | 127 | 0.564285 | from membase.api.rest_client import RestConnection, RestHelper
from memcached.helper.data_helper import MemcachedClientHelper
from remote.remote_util import RemoteMachineShellConnection
from mc_bin_client import MemcachedClient, MemcachedError
from membase.api.exception import ServerAlreadyJoinedException
from membase.helper.rebalance_helper import RebalanceHelper
from TestInput import TestInputSingleton
import memcacheConstants
import logger
import testconstants
import time
import queue
from threading import Thread
import traceback
class ClusterOperationHelper(object):
# the first ip is taken as the master ip
# Returns True if cluster successfully finished then rebalance
@staticmethod
def add_and_rebalance(servers, wait_for_rebalance=True):
log = logger.Logger.get_logger()
master = servers[0]
all_nodes_added = True
rebalanced = True
rest = RestConnection(master)
if len(servers) > 1:
for serverInfo in servers[1:]:
log.info('adding node : {0}:{1} to the cluster'.format(
serverInfo.ip, serverInfo.port))
otpNode = rest.add_node(master.rest_username, master.rest_password, serverInfo.ip, port=serverInfo.port)
if otpNode:
log.info('added node : {0} to the cluster'.format(otpNode.id))
else:
all_nodes_added = False
break
if all_nodes_added:
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
if wait_for_rebalance:
rebalanced &= rest.monitorRebalance()
else:
rebalanced = False
return all_nodes_added and rebalanced
@staticmethod
def add_all_nodes_or_assert(master, all_servers, rest_settings, test_case):
log = logger.Logger.get_logger()
otpNodes = []
all_nodes_added = True
rest = RestConnection(master)
for serverInfo in all_servers:
if serverInfo.ip != master.ip:
log.info('adding node : {0}:{1} to the cluster'.format(
serverInfo.ip, serverInfo.port))
otpNode = rest.add_node(rest_settings.rest_username,
rest_settings.rest_password,
serverInfo.ip)
if otpNode:
log.info('added node : {0} to the cluster'.format(otpNode.id))
otpNodes.append(otpNode)
else:
all_nodes_added = False
if not all_nodes_added:
if test_case:
test_case.assertTrue(all_nodes_added,
msg="unable to add all the nodes to the cluster")
else:
log.error("unable to add all the nodes to the cluster")
return otpNodes
#wait_if_warmup=True is useful in tearDown method for (auto)failover tests
@staticmethod
def wait_for_ns_servers_or_assert(servers, testcase, wait_time=360,
wait_if_warmup=False, debug=True):
for server in servers:
rest = RestConnection(server)
log = logger.Logger.get_logger()
if debug:
log.info("waiting for ns_server @ {0}:{1}"
.format(server.ip, server.port))
if RestHelper(rest).is_ns_server_running(wait_time):
if debug:
log.info("ns_server @ {0}:{1} is running"
.format(server.ip, server.port))
elif wait_if_warmup:
# wait when warmup completed
buckets = rest.get_buckets()
for bucket in buckets:
testcase.assertTrue(ClusterOperationHelper._wait_warmup_completed(testcase,\
[server], bucket.name, wait_time), "warmup was not completed!")
else:
testcase.fail("ns_server {0} is not running in {1} sec"
.format(server.ip, wait_time))
# returns true if warmup is completed in wait_time sec
# otherwise return false
@staticmethod
def _wait_warmup_completed(self, servers, bucket_name, wait_time=300):
warmed_up = False
log = logger.Logger.get_logger()
for server in servers:
mc = None
start = time.time()
# Try to get the stats for 5 minutes, else hit out.
while time.time() - start < wait_time:
# Get the wamrup time for each server
try:
mc = MemcachedClientHelper.direct_client(server, bucket_name)
stats = mc.stats()
if stats is not None and 'ep_warmup_thread' in stats and stats['ep_warmup_thread'] == 'complete':
break
else:
log.info(" Did not get the stats from the server yet, trying again.....")
time.sleep(2)
except Exception as e:
log.error(
"Could not get ep_warmup_time stats from server %s:%s, exception %s" %
(server.ip, server.port, e))
else:
self.fail(
"Fail! Unable to get the warmup-stats from server %s:%s after trying for %s seconds." % (
server.ip, server.port, wait_time))
# Waiting for warm-up
start = time.time()
warmed_up = False
while time.time() - start < wait_time and not warmed_up:
if mc.stats()["ep_warmup_thread"] == "complete":
log.info("warmup completed, awesome!!! Warmed up. %s items " % (mc.stats()["curr_items_tot"]))
warmed_up = True
continue
elif mc.stats()["ep_warmup_thread"] == "running":
log.info(
"still warming up .... curr_items_tot : %s" % (mc.stats()["curr_items_tot"]))
else:
self.fail("Value of ep warmup thread does not exist, exiting from this server")
time.sleep(5)
mc.close()
return warmed_up
@staticmethod
def verify_persistence(servers, test, keys_count=400000, timeout_in_seconds=300):
log = logger.Logger.get_logger()
master = servers[0]
rest = RestConnection(master)
log.info("Verifying Persistence")
buckets = rest.get_buckets()
for bucket in buckets:
# Load some data
l_threads = MemcachedClientHelper.create_threads([master], bucket.name,
- 1, keys_count, {1024: 0.50, 512: 0.50}, 2, -1,
True, True)
[t.start() for t in l_threads]
# Do persistence verification
ready = ClusterOperationHelper.persistence_verification(servers, bucket.name, timeout_in_seconds)
log.info("Persistence Verification returned ? {0}".format(ready))
log.info("waiting for persistence threads to finish...")
for t in l_threads:
t.aborted = True
for t in l_threads:
t.join()
log.info("persistence thread has finished...")
test.assertTrue(ready, msg="Cannot verify persistence")
@staticmethod
def persistence_verification(servers, bucket, timeout_in_seconds=1260):
log = logger.Logger.get_logger()
verification_threads = []
queue = queue.Queue()
rest = RestConnection(servers[0])
nodes = rest.get_nodes()
nodes_ip = []
for node in nodes:
nodes_ip.append(node.ip)
for i in range(len(servers)):
if servers[i].ip in nodes_ip:
log.info("Server {0}:{1} part of cluster".format(
servers[i].ip, servers[i].port))
rest = RestConnection(servers[i])
t = Thread(target=ClusterOperationHelper.persistence_verification_per_node,
name="verification-thread-{0}".format(servers[i]),
args=(rest, bucket, queue, timeout_in_seconds))
verification_threads.append(t)
for t in verification_threads:
t.start()
for t in verification_threads:
t.join()
log.info("thread {0} finished".format(t.name))
while not queue.empty():
item = queue.get()
if item is False:
return False
return True
@staticmethod
def persistence_verification_per_node(rest, bucket, queue=None, timeout=1260):
log = logger.Logger.get_logger()
stat_key = 'ep_flusher_todo'
start = time.time()
stats = []
# Collect stats data points
while time.time() - start <= timeout:
_new_stats = rest.get_bucket_stats(bucket)
if _new_stats and 'ep_flusher_todo' in _new_stats:
stats.append(_new_stats[stat_key])
time.sleep(0.5)
else:
log.error("unable to obtain stats for bucket : {0}".format(bucket))
value_90th = ClusterOperationHelper.percentile(stats, 90)
average = float(sum(stats)) / len(stats)
log.info("90th percentile value is {0} and average {1}".format(value_90th, average))
if value_90th == 0 and average == 0:
queue.put(False)
return
queue.put(True)
@staticmethod
def percentile(samples, percentile):
element_idx = int(len(samples) * (percentile / 100.0))
samples.sort()
value = samples[element_idx]
return value
@staticmethod
def start_cluster(servers):
for server in servers:
shell = RemoteMachineShellConnection(server)
if shell.is_couchbase_installed():
shell.start_couchbase()
else:
shell.start_membase()
@staticmethod
def stop_cluster(servers):
for server in servers:
shell = RemoteMachineShellConnection(server)
if shell.is_couchbase_installed():
shell.stop_couchbase()
else:
shell.stop_membase()
@staticmethod
def cleanup_cluster(servers, wait_for_rebalance=True, master = None):
log = logger.Logger.get_logger()
if master is None:
master = servers[0]
rest = RestConnection(master)
helper = RestHelper(rest)
helper.is_ns_server_running(timeout_in_seconds=testconstants.NS_SERVER_TIMEOUT)
nodes = rest.node_statuses()
master_id = rest.get_nodes_self().id
for node in nodes:
if int(node.port) in range(9091, 9991):
rest.eject_node(node)
nodes.remove(node)
if len(nodes) > 1:
log.info("rebalancing all nodes in order to remove nodes")
rest.log_client_error("Starting rebalance from test, ejected nodes %s" % \
[node.id for node in nodes if node.id != master_id])
removed = helper.remove_nodes(knownNodes=[node.id for node in nodes],
ejectedNodes=[node.id for node in nodes if node.id != master_id],
wait_for_rebalance=wait_for_rebalance)
success_cleaned = []
alt_addr = TestInputSingleton.input.param("alt_addr", False)
for removed in [node for node in nodes if (node.id != master_id)]:
removed.rest_password = servers[0].rest_password
removed.rest_username = servers[0].rest_username
try:
if alt_addr:
for server in servers:
shell = RemoteMachineShellConnection(server)
internal_IP = shell.get_ip_address()
internal_IP = [x for x in internal_IP if x != "127.0.0.1"]
shell.disconnect()
if internal_IP == removed.ip:
rest = RestConnection(server)
break
else:
rest = RestConnection(removed)
except Exception as ex:
log.error("can't create rest connection after rebalance out for ejected nodes,\
will retry after 10 seconds according to MB-8430: {0} ".format(ex))
time.sleep(10)
rest = RestConnection(removed)
start = time.time()
while time.time() - start < 30:
if len(rest.get_pools_info()["pools"]) == 0:
success_cleaned.append(removed)
break
else:
time.sleep(0.1)
if time.time() - start > 10:
log.error("'pools' on node {0}:{1} - {2}".format(
removed.ip, removed.port, rest.get_pools_info()["pools"]))
for node in {node for node in nodes if (node.id != master_id)} - set(success_cleaned):
log.error("node {0}:{1} was not cleaned after removing from cluster".format(
removed.ip, removed.port))
try:
if alt_addr:
for server in servers:
shell = RemoteMachineShellConnection(server)
internal_IP = shell.get_ip_address()
internal_IP = [x for x in internal_IP if x != "127.0.0.1"]
shell.disconnect()
if internal_IP == removed.ip:
rest = RestConnection(server)
break
else:
rest = RestConnection(node)
if not alt_addr:
rest.force_eject_node()
except Exception as ex:
log.error("force_eject_node {0}:{1} failed: {2}".format(removed.ip, removed.port, ex))
if len({node for node in nodes if (node.id != master_id)}\
- set(success_cleaned)) != 0:
if not alt_addr:
raise Exception("not all ejected nodes were cleaned successfully")
log.info("removed all the nodes from cluster associated with {0} ? {1}".format(servers[0], \
[(node.id, node.port) for node in nodes if (node.id != master_id)]))
@staticmethod
def flushctl_start(servers, username=None, password=None):
for server in servers:
c = MemcachedClient(server.ip, 11210)
if username:
c.sasl_auth_plain(username, password)
c.start_persistence()
@staticmethod
def flushctl_stop(servers, username=None, password=None):
for server in servers:
c = MemcachedClient(server.ip, 11210)
if username:
c.sasl_auth_plain(username, password)
c.stop_persistence()
@staticmethod
def flush_os_caches(servers):
log = logger.Logger.get_logger()
for server in servers:
try:
shell = RemoteMachineShellConnection(server)
shell.flush_os_caches()
log.info("Clearing os caches on {0}".format(server))
except:
pass
@staticmethod
def flushctl_set(master, key, val, bucket='default'):
rest = RestConnection(master)
servers = rest.get_nodes()
for server in servers:
if "kv" in server.services:
_server = {"ip": server.ip, "port": server.port,
"username": master.rest_username,
"password": master.rest_password}
ClusterOperationHelper.flushctl_set_per_node(_server, key, val, bucket)
@staticmethod
def flushctl_set_per_node(server, key, val, bucket='default'):
log = logger.Logger.get_logger()
rest = RestConnection(server)
node = rest.get_nodes_self()
mc = MemcachedClientHelper.direct_client(server, bucket)
log.info("Setting flush param on server {0}, {1} to {2} on {3}".format(server, key, val, bucket))
# Workaround for CBQE-249, ideally this should be node.version
index_path = node.storage[0].get_index_path()
if index_path is '':
# Indicates non 2.0 build
rv = mc.set_flush_param(key, str(val))
else:
type = ClusterOperationHelper._get_engine_param_type(key)
if val == 'true' or val == 'false':
rv = mc.set_param(key, val, type)
else:
rv = mc.set_param(key, str(val), type)
log.info("Setting flush param on server {0}, {1} to {2}, result: {3}".format(server, key, val, rv))
mc.close()
@staticmethod
def _get_engine_param_type(key):
tap_params = ['tap_keepalive', 'tap_throttle_queue_cap', 'tap_throttle_threshold']
checkpoint_params = ['chk_max_items', 'chk_period', 'inconsistent_slave_chk', 'keep_closed_chks',
'max_checkpoints', 'item_num_based_new_chk']
flush_params = ['bg_fetch_delay', 'couch_response_timeout', 'exp_pager_stime', 'flushall_enabled',
'klog_compactor_queue_cap', 'klog_max_log_size', 'klog_max_entry_ratio',
'queue_age_cap', 'max_size', 'max_txn_size', 'mem_high_wat', 'mem_low_wat',
'min_data_age', 'timing_log', 'alog_sleep_time', 'bfilter_enabled' ]
if key in tap_params:
return memcacheConstants.ENGINE_PARAM_TAP
if key in checkpoint_params:
return memcacheConstants.ENGINE_PARAM_CHECKPOINT
if key in flush_params:
return memcacheConstants.ENGINE_PARAM_FLUSH
@staticmethod
def set_expiry_pager_sleep_time(master, bucket, value=30):
log = logger.Logger.get_logger()
rest = RestConnection(master)
servers = rest.get_nodes()
for server in servers:
# this is not bucket specific so no need to pass in the bucketname
log.info("connecting to memcached {0}:{1}".format(server.ip, server.memcached))
mc = MemcachedClientHelper.direct_client(server, bucket)
log.info("Set exp_pager_stime flush param on server {0}:{1}".format(server.ip, server.port))
try:
mc.set_flush_param("exp_pager_stime", str(value))
log.info("Set exp_pager_stime flush param on server {0}:{1}".format(server.ip, server.port))
except Exception as ex:
traceback.print_exc()
log.error("Unable to set exp_pager_stime flush param on memcached {0}:{1}".format(server.ip, server.memcached))
@staticmethod
def get_mb_stats(servers, key):
log = logger.Logger.get_logger()
for server in servers:
c = MemcachedClient(server.ip, 11210)
log.info("Get flush param on server {0}, {1}".format(server, key))
value = c.stats().get(key, None)
log.info("Get flush param on server {0}, {1}".format(server, value))
c.close()
@staticmethod
def change_erlang_threads_values(servers, sync_threads=True, num_threads='16:16'):
"""Change the the type of sync erlang threads and its value
sync_threads=True means sync threads +S with default threads number equal 16:16
sync_threads=False means async threads: +A 16, for instance
Default: +S 16:16
"""
log = logger.Logger.get_logger()
for server in servers:
sh = RemoteMachineShellConnection(server)
product = "membase"
if sh.is_couchbase_installed():
product = "couchbase"
sync_type = sync_threads and "S" or "A"
command = "sed -i 's/+[A,S] .*/+%s %s \\\/g' /opt/%s/bin/%s-server" % \
(sync_type, num_threads, product, product)
o, r = sh.execute_command(command)
sh.log_command_output(o, r)
msg = "modified erlang +%s to %s for server %s"
log.info(msg % (sync_type, num_threads, server.ip))
@staticmethod
def set_erlang_schedulers(servers, value="16:16"):
"""
Set num of erlang schedulers.
Also erase async option (+A)
"""
ClusterOperationHelper.stop_cluster(servers)
log = logger.Logger.get_logger()
for server in servers:
sh = RemoteMachineShellConnection(server)
product = "membase"
if sh.is_couchbase_installed():
product = "couchbase"
command = "sed -i 's/S\+ 128:128/S %s/' /opt/%s/bin/%s-server"\
% (value, product, product)
o, r = sh.execute_command(command)
sh.log_command_output(o, r)
log.info("modified erlang +A to %s for server %s"
% (value, server.ip))
ClusterOperationHelper.start_cluster(servers)
@staticmethod
def change_erlang_gc(servers, value=None):
"""Change the frequency of erlang_gc process
export ERL_FULLSWEEP_AFTER=0 (most aggressive)
Default: None
"""
log = logger.Logger.get_logger()
if value is None:
return
for server in servers:
sh = RemoteMachineShellConnection(server)
product = "membase"
if sh.is_couchbase_installed():
product = "couchbase"
command = "sed -i '/exec erl/i export ERL_FULLSWEEP_AFTER=%s' /opt/%s/bin/%s-server" % \
(value, product, product)
o, r = sh.execute_command(command)
sh.log_command_output(o, r)
msg = "modified erlang gc to full_sweep_after %s on %s " % (value, server.ip)
log.info(msg)
@staticmethod
def begin_rebalance_in(master, servers, timeout=5):
RebalanceHelper.begin_rebalance_in(master, servers, timeout)
@staticmethod
def begin_rebalance_out(master, servers, timeout=5):
RebalanceHelper.begin_rebalance_out(master, servers, timeout)
@staticmethod
def end_rebalance(master):
RebalanceHelper.end_rebalance(master)
@staticmethod
# Returns the otpNode for Orchestrator
def find_orchestrator(master):
rest = RestConnection(master)
status, content = ClusterOperationHelper.find_orchestrator_with_rest(rest)
# Get rid of single quotes 'ns_1@10.1.3.74'
content = content.replace("'", '')
return status, content
@staticmethod
def find_orchestrator_with_rest(rest):
command = "mb_master:master_node()."
status, content = rest.diag_eval(command)
return status, content
@staticmethod
def set_vbuckets(master, vbuckets):
rest = RestConnection(master)
command = "rpc:eval_everywhere(ns_config, set, [couchbase_num_vbuckets_default, {0}]).".format(vbuckets)
status, content = rest.diag_eval(command)
return status, content
| 18,936 | 4,176 | 23 |
f55fc1bf15829cde39cd7479fd5ae4fd2aa6ba00 | 1,516 | py | Python | tests/test_models/test_backbone/test_resnet.py | automaton123456/nanodet | 0e5ca77ca936c2cb4bf040cc4ed9caf226a4a5b1 | [
"Apache-2.0"
] | 4,388 | 2020-11-22T12:10:28.000Z | 2022-03-31T13:51:51.000Z | tests/test_models/test_backbone/test_resnet.py | tkupek/nanodet | b04fafe2e91b3818b95cc3cd299bfe1ed014cb18 | [
"Apache-2.0"
] | 381 | 2020-11-23T08:02:26.000Z | 2022-03-31T09:35:13.000Z | tests/test_models/test_backbone/test_resnet.py | tkupek/nanodet | b04fafe2e91b3818b95cc3cd299bfe1ed014cb18 | [
"Apache-2.0"
] | 838 | 2020-11-22T13:01:43.000Z | 2022-03-30T07:24:38.000Z | import pytest
import torch
from nanodet.model.backbone import ResNet, build_backbone
| 32.255319 | 88 | 0.593668 | import pytest
import torch
from nanodet.model.backbone import ResNet, build_backbone
def test_resnet():
with pytest.raises(KeyError):
cfg = dict(name="ResNet", depth=15)
build_backbone(cfg)
with pytest.raises(AssertionError):
ResNet(depth=18, out_stages=(4, 5, 6))
input = torch.rand(1, 3, 64, 64)
model = ResNet(depth=18, out_stages=(1, 2, 3, 4), activation="PReLU", pretrain=True)
output = model(input)
assert output[0].shape == (1, 64, 16, 16)
assert output[1].shape == (1, 128, 8, 8)
assert output[2].shape == (1, 256, 4, 4)
assert output[3].shape == (1, 512, 2, 2)
model = ResNet(
depth=34, out_stages=(1, 2, 3, 4), activation="LeakyReLU", pretrain=False
)
output = model(input)
assert output[0].shape == (1, 64, 16, 16)
assert output[1].shape == (1, 128, 8, 8)
assert output[2].shape == (1, 256, 4, 4)
assert output[3].shape == (1, 512, 2, 2)
model = ResNet(depth=50, out_stages=(1, 2, 3, 4), pretrain=False)
output = model(input)
assert output[0].shape == (1, 256, 16, 16)
assert output[1].shape == (1, 512, 8, 8)
assert output[2].shape == (1, 1024, 4, 4)
assert output[3].shape == (1, 2048, 2, 2)
model = ResNet(depth=101, out_stages=(1, 2, 3, 4), pretrain=False)
output = model(input)
assert output[0].shape == (1, 256, 16, 16)
assert output[1].shape == (1, 512, 8, 8)
assert output[2].shape == (1, 1024, 4, 4)
assert output[3].shape == (1, 2048, 2, 2)
| 1,406 | 0 | 23 |
76f60614b7a4484297d586fa523b1b6aa918d7e8 | 1,145 | py | Python | stupid_not_work/pwm_socket_server_gui.py | mikeroslikov/BLIMP | 84718a98bb4b31a8a73155547ed1e8537ef80eec | [
"Unlicense"
] | null | null | null | stupid_not_work/pwm_socket_server_gui.py | mikeroslikov/BLIMP | 84718a98bb4b31a8a73155547ed1e8537ef80eec | [
"Unlicense"
] | null | null | null | stupid_not_work/pwm_socket_server_gui.py | mikeroslikov/BLIMP | 84718a98bb4b31a8a73155547ed1e8537ef80eec | [
"Unlicense"
] | null | null | null | import socket
import sys
import time
throttle_position=1000
throttle_min =0
throttle_max = 0xffff
from tkinter import *
master = Tk()
master.geometry("500x500")
master.columnconfigure(0, weight=1)
master.rowconfigure(0, weight=1)
w1 = Scale(master, from_=throttle_min, to=throttle_max, tickinterval=10)
w1.set(throttle_min)
w1.pack(fill=BOTH)
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ("192.168.0.94",10000)
print ( "starting up "+ str(server_address))
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
while True:
# Wait for a connection
print ("waiting for a connection")
connection, client_address = sock.accept()
try:
print("connection from"+ str(client_address))
# Receive the data in small chunks and retransmit it
while True:
connection.sendall(w1.get().to_bytes(2, byteorder='big'))
time.sleep(0.1)
master.update_idletasks()
master.update()
finally:
# Clean up the connection
connection.close()
| 23.854167 | 72 | 0.687336 | import socket
import sys
import time
throttle_position=1000
throttle_min =0
throttle_max = 0xffff
from tkinter import *
master = Tk()
master.geometry("500x500")
master.columnconfigure(0, weight=1)
master.rowconfigure(0, weight=1)
w1 = Scale(master, from_=throttle_min, to=throttle_max, tickinterval=10)
w1.set(throttle_min)
w1.pack(fill=BOTH)
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ("192.168.0.94",10000)
print ( "starting up "+ str(server_address))
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
while True:
# Wait for a connection
print ("waiting for a connection")
connection, client_address = sock.accept()
try:
print("connection from"+ str(client_address))
# Receive the data in small chunks and retransmit it
while True:
connection.sendall(w1.get().to_bytes(2, byteorder='big'))
time.sleep(0.1)
master.update_idletasks()
master.update()
finally:
# Clean up the connection
connection.close()
| 0 | 0 | 0 |
4d656673d216ce0be4fe64d21204d4348b38598e | 60 | py | Python | pyroombaadapter/__init__.py | ymollard/PyRoombaAdapter | a4b63e9b97ac2e27a8b472f596a1111eb3c254b9 | [
"MIT"
] | null | null | null | pyroombaadapter/__init__.py | ymollard/PyRoombaAdapter | a4b63e9b97ac2e27a8b472f596a1111eb3c254b9 | [
"MIT"
] | null | null | null | pyroombaadapter/__init__.py | ymollard/PyRoombaAdapter | a4b63e9b97ac2e27a8b472f596a1111eb3c254b9 | [
"MIT"
] | null | null | null | from pyroombaadapter.pyroombaadapter import PyRoombaAdapter
| 30 | 59 | 0.916667 | from pyroombaadapter.pyroombaadapter import PyRoombaAdapter
| 0 | 0 | 0 |
a4dcc5f588d7e513d03afc395d5dd04c07c6cb9d | 654 | py | Python | chapter1/code/params_global_argparse.py | gabrielmahia/ushuhudAI | ee40c9822852f66c6111d1d485dc676b6da70677 | [
"MIT"
] | 74 | 2020-05-19T01:08:03.000Z | 2022-03-31T14:00:41.000Z | chapter1/code/params_global_argparse.py | gabrielmahia/ushuhudAI | ee40c9822852f66c6111d1d485dc676b6da70677 | [
"MIT"
] | 1 | 2021-06-04T06:08:21.000Z | 2021-06-04T06:08:21.000Z | chapter1/code/params_global_argparse.py | gabrielmahia/ushuhudAI | ee40c9822852f66c6111d1d485dc676b6da70677 | [
"MIT"
] | 47 | 2020-05-05T12:06:31.000Z | 2022-03-10T04:45:01.000Z | import argparse
class Parameters:
"""Global parameters"""
parser = argparse.ArgumentParser(description='Testing parameters')
parser.add_argument("-p1", dest="param1", help="parameter1")
parser.add_argument("-p2", dest="param2", help="parameter2")
params = parser.parse_args()
input_parameters = Parameters(param1=params.param1,param2=params.param2)
view_parameters(input_parameters)
| 26.16 | 73 | 0.707951 | import argparse
class Parameters:
"""Global parameters"""
def __init__(self, **kwargs):
self.param1 = kwargs.get("param1")
self.param2 = kwargs.get("param2")
def view_parameters(input_parameters):
print(input_parameters.param1)
print(input_parameters.param2)
parser = argparse.ArgumentParser(description='Testing parameters')
parser.add_argument("-p1", dest="param1", help="parameter1")
parser.add_argument("-p2", dest="param2", help="parameter2")
params = parser.parse_args()
input_parameters = Parameters(param1=params.param1,param2=params.param2)
view_parameters(input_parameters)
| 185 | 0 | 55 |
ec31ee23234715462a66dee082636edc4e8caf82 | 979 | py | Python | scenarios/intersections/6lane/scenario.py | Semanti1/smarts_baseline | 77dbc350f37ae7735c74a2b8f1585c2818ac3421 | [
"MIT"
] | 5 | 2021-06-15T05:06:10.000Z | 2021-12-01T05:11:49.000Z | scenarios/intersections/6lane/scenario.py | Semanti1/smarts_baseline | 77dbc350f37ae7735c74a2b8f1585c2818ac3421 | [
"MIT"
] | null | null | null | scenarios/intersections/6lane/scenario.py | Semanti1/smarts_baseline | 77dbc350f37ae7735c74a2b8f1585c2818ac3421 | [
"MIT"
] | 1 | 2022-03-31T02:14:09.000Z | 2022-03-31T02:14:09.000Z | import os
from pathlib import Path
from smarts.sstudio import gen_scenario
from smarts.sstudio.types import (
Mission,
Route,
SocialAgentActor,
Scenario,
)
actors = [
SocialAgentActor(
name=f"non-interactive-agent-{speed}-v0",
agent_locator="zoo.policies:non-interactive-agent-v0",
policy_kwargs={"speed": speed},
)
for speed in [10, 30, 80]
]
gen_scenario(
Scenario(
social_agent_missions={
"group-1": (actors, [to_mission("edge-north-NS", "edge-south-NS")]),
"group-2": (actors, [to_mission("edge-west-WE", "edge-east-WE")]),
"group-3": (actors, [to_mission("edge-east-EW", "edge-west-EW")]),
"group-4": (actors, [to_mission("edge-south-SN", "edge-north-SN")]),
}
),
output_dir=Path(__file__).parent,
)
| 25.763158 | 80 | 0.614913 | import os
from pathlib import Path
from smarts.sstudio import gen_scenario
from smarts.sstudio.types import (
Mission,
Route,
SocialAgentActor,
Scenario,
)
actors = [
SocialAgentActor(
name=f"non-interactive-agent-{speed}-v0",
agent_locator="zoo.policies:non-interactive-agent-v0",
policy_kwargs={"speed": speed},
)
for speed in [10, 30, 80]
]
def to_mission(start_edge, end_edge):
route = Route(begin=(start_edge, 1, 0), end=(end_edge, 1, "max"))
return Mission(route=route)
gen_scenario(
Scenario(
social_agent_missions={
"group-1": (actors, [to_mission("edge-north-NS", "edge-south-NS")]),
"group-2": (actors, [to_mission("edge-west-WE", "edge-east-WE")]),
"group-3": (actors, [to_mission("edge-east-EW", "edge-west-EW")]),
"group-4": (actors, [to_mission("edge-south-SN", "edge-north-SN")]),
}
),
output_dir=Path(__file__).parent,
)
| 118 | 0 | 23 |
f1d5e52ea40f07596966b608a0b31cc62fe50969 | 4,976 | py | Python | Using Databases with Python/Geodata/geoload.py | Karoline0097/University-of-Michigan-Python-for-Everybody | 8b3999638c0c074ae3c1120de87cf8f31740ebb8 | [
"MIT"
] | null | null | null | Using Databases with Python/Geodata/geoload.py | Karoline0097/University-of-Michigan-Python-for-Everybody | 8b3999638c0c074ae3c1120de87cf8f31740ebb8 | [
"MIT"
] | null | null | null | Using Databases with Python/Geodata/geoload.py | Karoline0097/University-of-Michigan-Python-for-Everybody | 8b3999638c0c074ae3c1120de87cf8f31740ebb8 | [
"MIT"
] | null | null | null | ## STEP: GATHER DATA
# data source: where.data, Google Geodata API
# edit where.data to add an address nearby where you live
# use the Google geocoding API to clean up some user-entered geographic locations of university names
import urllib.request, urllib.parse, urllib.error
import http
import sqlite3
import json
import time
import ssl
import sys
api_key = False
# If you have a Google Places API key, enter it here
# api_key = 'AIzaSy___IDByT70'
if api_key is False:
# If you do not have a Google Places API key, use data subset from Py4E server
# no rate limit
api_key = 42
serviceurl = "http://py4e-data.dr-chuck.net/json?"
else:
# If you have a Google Places API key
serviceurl = "https://maps.googleapis.com/maps/api/geocode/json?"
# Additional detail for urllib
# http.client.HTTPConnection.debuglevel = 1
# connection to the database which checks access to the file geodata.sqlite
# if this file doesn't exist, it creates geodata.sqlite
conn = sqlite3.connect('geodata.sqlite')
# database handle
cur = conn.cursor()
# create table Locations
# attributes/columns: address (string from where.data), geodata (JSON from API)
cur.execute('''
CREATE TABLE IF NOT EXISTS Locations (address TEXT, geodata TEXT)''')
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# Google geocoding API is rate limited to a fixed number of requests per day
# So if you have a lot of data you might need to stop and restart the lookup process several times.
# So we break the problem into two phases.
# First phase:
# take input address data in the file where.data and read it one line at a time,
# retrieve the geocoded response and store it in database (geodata.sqlite).
# Before we use the geocoding API, we simply check to see if
# we already have the data for that particular line of input
# You can re-start the process at any time by removing the file geodata.sqlite
# You can stop at any time,
# and there is a counter that you can use to limit the number of calls to the geocoding API for each run
file_handle = open("where.data")
count = 0
for line in file_handle:
# retrieve 200 addresses
if count > 200 :
print('Retrieved 200 locations, restart to retrieve more')
break
# Remove spaces at the beginning and at the end of the address
address = line.strip()
print('')
# from the Locations table, select the row corresponding to this address, and select its geodata column
cur.execute("SELECT geodata FROM Locations WHERE address= ?",
(memoryview(address.encode()), ))
# scan to the point where you find un-retrieved locations and starts retrieving them
try:
# retrieve data after executing a SELECT statement
# call the cursor’s fetchone() method to retrieve a single matching row
data = cur.fetchone()[0]
# address is already in database, so skip and continue with next address
print("Found in database ", address)
continue
except:
# if we don't have the data for the location go on to
# call the geocoding API to retrieve the data and store it in the database.
pass
# create url to get json data fro that address from API
parms = dict()
# parms dictionary: {'adress':address}
parms["address"] = address
# parms dictionary: {'key':api_key}
if api_key is not False: parms['key'] = api_key
# concatenate this encoded address with URL ...json?
url = serviceurl + urllib.parse.urlencode(parms)
print('Retrieving', url)
# url web data handle
url_handle = urllib.request.urlopen(url, context=ctx)
# decode web data from UTF8 to Unicode
data = url_handle.read().decode()
print('Retrieved', len(data), 'characters', data[:20].replace('\n', ' '))
count = count + 1
try:
# parse string containing json into structured object
# json_data is python dictionary
json_data = json.loads(data)
except:
print(data) # We print in case unicode causes an error
continue
# look at status field of json data
if 'status' not in json_data or (json_data['status'] != 'OK' and json_data['status'] != 'ZERO_RESULTS') :
print('==== Failure To Retrieve ====')
print(data)
break
# insert new address and associated geodata into database
cur.execute('''INSERT INTO Locations (address, geodata)
VALUES ( ?, ? )''', (memoryview(address.encode()), memoryview(data.encode()) ) )
# commit to database
conn.commit()
# every 10th address, pause for 5 seconds
# (pauses help to respect rate limit)
if count % 10 == 0 :
print('Pausing for a bit...')
time.sleep(5)
# Once you have some data loaded into geodata.sqlite,
# you can visualize the data using the geodump.py program.
print("Run geodump.py to read the data from the database so you can vizualize it on a map.")
| 37.984733 | 109 | 0.697548 | ## STEP: GATHER DATA
# data source: where.data, Google Geodata API
# edit where.data to add an address nearby where you live
# use the Google geocoding API to clean up some user-entered geographic locations of university names
import urllib.request, urllib.parse, urllib.error
import http
import sqlite3
import json
import time
import ssl
import sys
api_key = False
# If you have a Google Places API key, enter it here
# api_key = 'AIzaSy___IDByT70'
if api_key is False:
# If you do not have a Google Places API key, use data subset from Py4E server
# no rate limit
api_key = 42
serviceurl = "http://py4e-data.dr-chuck.net/json?"
else:
# If you have a Google Places API key
serviceurl = "https://maps.googleapis.com/maps/api/geocode/json?"
# Additional detail for urllib
# http.client.HTTPConnection.debuglevel = 1
# connection to the database which checks access to the file geodata.sqlite
# if this file doesn't exist, it creates geodata.sqlite
conn = sqlite3.connect('geodata.sqlite')
# database handle
cur = conn.cursor()
# create table Locations
# attributes/columns: address (string from where.data), geodata (JSON from API)
cur.execute('''
CREATE TABLE IF NOT EXISTS Locations (address TEXT, geodata TEXT)''')
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# Google geocoding API is rate limited to a fixed number of requests per day
# So if you have a lot of data you might need to stop and restart the lookup process several times.
# So we break the problem into two phases.
# First phase:
# take input address data in the file where.data and read it one line at a time,
# retrieve the geocoded response and store it in database (geodata.sqlite).
# Before we use the geocoding API, we simply check to see if
# we already have the data for that particular line of input
# You can re-start the process at any time by removing the file geodata.sqlite
# You can stop at any time,
# and there is a counter that you can use to limit the number of calls to the geocoding API for each run
file_handle = open("where.data")
count = 0
for line in file_handle:
# retrieve 200 addresses
if count > 200 :
print('Retrieved 200 locations, restart to retrieve more')
break
# Remove spaces at the beginning and at the end of the address
address = line.strip()
print('')
# from the Locations table, select the row corresponding to this address, and select its geodata column
cur.execute("SELECT geodata FROM Locations WHERE address= ?",
(memoryview(address.encode()), ))
# scan to the point where you find un-retrieved locations and starts retrieving them
try:
# retrieve data after executing a SELECT statement
# call the cursor’s fetchone() method to retrieve a single matching row
data = cur.fetchone()[0]
# address is already in database, so skip and continue with next address
print("Found in database ", address)
continue
except:
# if we don't have the data for the location go on to
# call the geocoding API to retrieve the data and store it in the database.
pass
# create url to get json data fro that address from API
parms = dict()
# parms dictionary: {'adress':address}
parms["address"] = address
# parms dictionary: {'key':api_key}
if api_key is not False: parms['key'] = api_key
# concatenate this encoded address with URL ...json?
url = serviceurl + urllib.parse.urlencode(parms)
print('Retrieving', url)
# url web data handle
url_handle = urllib.request.urlopen(url, context=ctx)
# decode web data from UTF8 to Unicode
data = url_handle.read().decode()
print('Retrieved', len(data), 'characters', data[:20].replace('\n', ' '))
count = count + 1
try:
# parse string containing json into structured object
# json_data is python dictionary
json_data = json.loads(data)
except:
print(data) # We print in case unicode causes an error
continue
# look at status field of json data
if 'status' not in json_data or (json_data['status'] != 'OK' and json_data['status'] != 'ZERO_RESULTS') :
print('==== Failure To Retrieve ====')
print(data)
break
# insert new address and associated geodata into database
cur.execute('''INSERT INTO Locations (address, geodata)
VALUES ( ?, ? )''', (memoryview(address.encode()), memoryview(data.encode()) ) )
# commit to database
conn.commit()
# every 10th address, pause for 5 seconds
# (pauses help to respect rate limit)
if count % 10 == 0 :
print('Pausing for a bit...')
time.sleep(5)
# Once you have some data loaded into geodata.sqlite,
# you can visualize the data using the geodump.py program.
print("Run geodump.py to read the data from the database so you can vizualize it on a map.")
| 0 | 0 | 0 |
139922438afd400fe3016bba57e000f385c74743 | 392 | py | Python | setup.py | roblabs/gdal2tilesp | c92cc808c2ee614b2088d17634b76da9590b7273 | [
"Apache-2.0"
] | 18 | 2016-08-19T06:24:19.000Z | 2022-01-07T06:00:40.000Z | setup.py | roblabs/gdal2tilesp | c92cc808c2ee614b2088d17634b76da9590b7273 | [
"Apache-2.0"
] | 11 | 2016-08-18T15:00:13.000Z | 2020-02-03T19:43:50.000Z | setup.py | roblabs/gdal2tilesp | c92cc808c2ee614b2088d17634b76da9590b7273 | [
"Apache-2.0"
] | 11 | 2016-08-25T08:40:04.000Z | 2019-12-11T18:08:57.000Z | from setuptools import setup
setup(
name='gdal2tilesp.py',
version='3.14.15926',
author='',
author_email='',
packages=['.'],
scripts=['gdal2tilesp.py'],
url='https://github.com/roblabs/gdal2tilesp',
license='LICENSE.txt',
description='Enhancements to tile cutter for parallelism and image format support',
long_description=open('README.md').read(),
)
| 26.133333 | 87 | 0.668367 | from setuptools import setup
setup(
name='gdal2tilesp.py',
version='3.14.15926',
author='',
author_email='',
packages=['.'],
scripts=['gdal2tilesp.py'],
url='https://github.com/roblabs/gdal2tilesp',
license='LICENSE.txt',
description='Enhancements to tile cutter for parallelism and image format support',
long_description=open('README.md').read(),
)
| 0 | 0 | 0 |
8712734418a8763b66fe9e2ce2e53f044fb08ff4 | 1,121 | py | Python | 155-MinStack/MinStack.py | DarknessFall/MyLeetCode | 4c7dcacfc208a0188542b73ae62bb4f7e57bcf51 | [
"MIT"
] | null | null | null | 155-MinStack/MinStack.py | DarknessFall/MyLeetCode | 4c7dcacfc208a0188542b73ae62bb4f7e57bcf51 | [
"MIT"
] | null | null | null | 155-MinStack/MinStack.py | DarknessFall/MyLeetCode | 4c7dcacfc208a0188542b73ae62bb4f7e57bcf51 | [
"MIT"
] | null | null | null |
stack = MinStackNew()
stack.push(0)
stack.push(-1)
stack.push(0)
x = stack.getMin()
print(x)
stack.pop()
stack.pop()
x = stack.getMin()
print(x) | 15.569444 | 49 | 0.600357 | class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack1 = []
self.stack2 = []
def push(self, x: int) -> None:
if len(self.stack1) == 0 or x <= self.getMin():
self.stack2.append(x)
self.stack1.append(x)
def pop(self) -> None:
value = self.stack1.pop()
if value == self.getMin():
self.stack2.pop()
def top(self) -> int:
return self.stack1[-1]
def getMin(self) -> int:
return self.stack2[-1]
class MinStackNew:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.min = 0
def push(self, x: int) -> None:
if len(self.stack) == 0:
self.min = x
if x <= self.min:
self.stack.append(self.min)
self.min = x
self.stack.append(x)
def pop(self) -> None:
if self.stack.pop() == self.min:
self.min = self.stack.pop()
def top(self) -> int:
return self.stack[-1]
def getMin(self) -> int:
return self.min
stack = MinStackNew()
stack.push(0)
stack.push(-1)
stack.push(0)
x = stack.getMin()
print(x)
stack.pop()
stack.pop()
x = stack.getMin()
print(x) | 507 | 416 | 47 |
da2ce721703a51a4f0e81c417b3dd0f0f7cf326e | 11,728 | py | Python | Examples/site/server/myclass.py | cyh-ustc/BlokusHacker | d08bd34df30b1b7ce6a09640b83c82babda9d593 | [
"MIT"
] | 20 | 2016-04-02T10:38:22.000Z | 2021-09-20T07:47:26.000Z | Examples/site/server/myclass.py | cyh-ustc/BlokusHacker | d08bd34df30b1b7ce6a09640b83c82babda9d593 | [
"MIT"
] | 3 | 2016-04-02T08:44:45.000Z | 2016-04-11T01:20:48.000Z | Examples/site/server/myclass.py | cyh-ustc/BlokusHacker | d08bd34df30b1b7ce6a09640b83c82babda9d593 | [
"MIT"
] | 14 | 2016-03-31T14:11:28.000Z | 2017-08-13T01:36:59.000Z |
#from simplejson import JSONDecoder
#from simplejson import JSONEncoder
# --------------------------------------------------------------------------------------
# This block define the input of the app
# the name of style is not difined
# json_example:
# '{"State": "Normal", "Style": "10", "Moves": [[1, 2], [1, 0], [0, 0], [1, 1], [1, 3]]}'
#
# dict_example:
# {'State': 'Normal', 'Style': '10', 'Moves': [(1, 2), (1, 0), (0, 0), (1, 1), (1, 3)]}
# Moves could be a set or a list
# --------------------------------------------------------------------------------------
# rule one: using tuple to define a point
# rule two: the massage flow in the program is dict:last_move
# rule three: the procession in this file only work well if you make sure that "move" is not empty
# --------------------------------------------------------------------------------------
class Information(object):
""" This class define the information using in communication between server and users """
class ChessBoard(object):
"""ChessBoard is the board of the game"""
from generator import *
if __name__ == '__main__':
main()
| 36.880503 | 120 | 0.517224 |
#from simplejson import JSONDecoder
#from simplejson import JSONEncoder
# --------------------------------------------------------------------------------------
# This block define the input of the app
# the name of style is not difined
# json_example:
# '{"State": "Normal", "Style": "10", "Moves": [[1, 2], [1, 0], [0, 0], [1, 1], [1, 3]]}'
#
# dict_example:
# {'State': 'Normal', 'Style': '10', 'Moves': [(1, 2), (1, 0), (0, 0), (1, 1), (1, 3)]}
# Moves could be a set or a list
# --------------------------------------------------------------------------------------
# rule one: using tuple to define a point
# rule two: the massage flow in the program is dict:last_move
# rule three: the procession in this file only work well if you make sure that "move" is not empty
# --------------------------------------------------------------------------------------
class Information(object):
""" This class define the information using in communication between server and users """
def __init__(self, moves=None): # moves contains the position in the chess board
self.moves = moves
self.movesD = {}
self.movesJ = ''
if isinstance(self.moves, str):
self.movesJ = self.moves
self.Decode()
if not self.movesD.__contains__('State'): # the name of 'State' may should Change
self.movesD['State'] = 'Normal'
elif not self.movesD.__contains__('Moves'):
self.movesD['Moves'] = ''
elif not self.movesD.__contains__('Style'):
self.movesD['Style'] = ''
self.Decode()
elif isinstance(self.moves, dict):
self.movesD = self.moves
if not self.movesD.__contains__('State'): # the name of 'State' may should Change
self.movesD['State'] = 'normal'
elif not self.movesD.__contains__('Moves'):
self.movesD['Moves'] = ''
elif not self.movesD.__contains__('Style'):
self.movesD['Style'] = ''
self.Encode()
def Encode(self):
self.movesJ = JSONEncoder().encode(self.movesD)
def Decode(self):
self.movesD = JSONDecoder().decode(self.movesJ)
class ChessBoard(object):
"""ChessBoard is the board of the game"""
def __init__(self, size, moves=None, value=0):
self.board = array.create2D(size, size, value)
self.size = size
self.essentialPoints = self.initEsentialPoints()
self.forbiddenPoints = self.initForbiddenPoints()
self.styles = Style(moves)
def showboard(self):
array.write2D(self.board)
def checkMoves(self, last_move):
flag = False
moves = last_move["Moves"]
for point in moves:
# print self.essentialPoints
if point in self.essentialPoints:
flag = True
elif point in self.forbiddenPoints:
print('Forbidden')
break
return flag # moves is a list of pooints
def initForbiddenPoints(self):
return []
def initEsentialPoints(self):
return [(4, 4), (9, 9)]
def setForbiddenPoint(self, points):
for point in points:
if point not in self.forbiddenPoints:
self.board[point[0]][point[1]] = 2
self.forbiddenPoints.append(point)
def setEssentialPoint(self, points):
for point in points:
if point not in self.essentialPoints:
self.board[point[0]][point[1]] = 1
self.essentialPoints.append(point)
def addEssentialPoint(self, point):
points = []
if point[0] < self.size - 1 and point[1] < self.size - 1:
points.append((point[0] + 1, point[1] + 1))
if point[0] < self.size - 1 and point[1] > 0:
points.append((point[0] + 1, point[1] - 1))
if point[0] > 0 and point[1] < self.size - 1:
points.append((point[0] - 1, point[1] + 1))
if point[0] > 0 and point[1] > 0:
points.append((point[0] - 1, point[1] - 1))
self.setEssentialPoint(points)
def addForbiddenPoint(self, point):
points = [point]
if point[0] < self.size - 1:
points.append((point[0] + 1, point[1]))
if point[0] > 0:
points.append((point[0] - 1, point[1]))
if point[1] < self.size - 1:
points.append((point[0], point[1] + 1))
if point[1] > 0:
points.append((point[0], point[1] - 1))
self.setForbiddenPoint(points)
def rmEssentialPoint(self):
for point in self.forbiddenPoints:
if point in self.essentialPoints:
self.board[point[0]][point[1]] = 2
self.essentialPoints.remove(point)
def update(self, last_move):
# print 'db update'
is_valid = self.checkMoves(last_move)
if is_valid:
move = last_move['Moves']
for point in move:
self.addEssentialPoint(point)
self.addForbiddenPoint(point)
self.rmEssentialPoint()
return is_valid
def judgeStyle(self, Moves): # lastmoves is the information of the last move
style = Style(Moves)
return style.ID
def Show(self):
# for point in self.forbiddenPoints:
# self.board[point[0]][point[1]] = -1
# for point in self.essentialPoints:
# self.board[point[0]][point[1]] = 1
array.write2D(self.board)
class Player(object):
def __init__(self, id=None):
self.id = id
self.size = 14
self.board = ChessBoard(self.size)
self.chess_ID_list = [i for i in range(1, 22)]
def checkmove_player(self, last_move):
flag = True
move_style_ID = Style(last_move).ID
if move_style_ID not in self.chess_ID_list:
flag = False
# print 'not in chess list'
if not self.board.checkMoves(last_move):
flag = False
# print 'not valiate'
return flag
def popChess(self, move_style_ID):
self.chess_ID_list.remove(move_style_ID)
def Judge_nomoves(self):
"""
这个方法用来判定某个玩家是否有子可下,使用的时枚举法
过程:
1.判断是否还有棋子(没有棋子不可下)
2.对于:剩下的每个棋子,每个正反面,每个方向,每个棋子中的点,平移至每个essential point的情况做判定 (估算运算量21×2×4×5×20=16800)
:return:
"""
# 1.判断是否还有棋子(没有棋子不可下)
if self.chess_ID_list is []:
return True
else:
# 2.对于:剩下的每个棋子,每个正反面,每个方向,每个棋子中的点,平移至每个essential point的情况做判定
for chess_ID in self.chess_ID_list: # 每个棋子
chess = Style(chess_ID)
for i in range(2):
chess.Invert() # 每个正反面
for j in range(4):
chess.Rotate() # 每个方向
for point in chess.pointsSet: # 每个棋子中的点
for essentialPoint in self.board.essentialPoints:
line_shift = essentialPoint[0] - point[0] # 平移至每个essential point
row_shift = essentialPoint[1] - point[1]
possible_move = self.shift_chess(line_shift, row_shift, chess.pointsSet, chess_ID)
if possible_move is not None:
flag = self.board.checkMoves(possible_move)
if flag:
return False
return True
def shift_chess(self, line_shift, row_shift, pointsSet, chess_ID):
move = {'State': 'Normal', 'Style': chess_ID, 'Moves': []}
for point in pointsSet:
if point[0] + line_shift > self.size - 1 or point[1] + row_shift > self.size or point[0] + line_shift < 0 \
or point[1] + row_shift < 0:
return None
move['Moves'].append((point[0] + line_shift, point[1] + row_shift))
return move
def my_move(self, last_move):
move_style = Style(last_move)
is_valid = self.checkmove_player(last_move)
if is_valid:
self.popChess(move_style.ID)
self.board.update(last_move)
return is_valid
def other_move(self, other_move):
self.board.setForbiddenPoint(other_move['Moves'])
def countScores(self):
score = 0
for ID in self.chess_ID_list:
if ID == 1:
score += 1
elif ID == 2:
score += 2
elif ID <= 4:
score += 3
elif ID <= 9:
score += 4
else:
score += 5
return score
class Game(object):
def __init__(self, p1, p2, last_moves=None):
assert isinstance(p1, Player) and isinstance(p2, Player)
self.p1 = p1
self.p2 = p2
self.last_moves = last_moves
def update(self, player_id, last_moves):
is_valid = True
self.last_moves = last_moves
assert last_moves.__contains__('State') and last_moves.__contains__('Moves')
if last_moves['State'] is not 'Normal':
if last_moves['State'] is 'Unmove':
pass
elif player_id is self.p1.id:
is_valid = self.p1.my_move(last_moves['Moves'])
self.p2.other_move(last_moves['Moves'])
self.last_moves = last_moves
else:
is_valid = self.p2.my_move(last_moves['Moves'])
self.p1.other_move(last_moves['Moves'])
self.last_moves = last_moves
return is_valid
def check_end(self):
is_end = False
if self.p1.Judge_nomoves() and self.p2.Judge_nomoves():
is_end = True
return is_end
def get_winner(self):
p1_score = self.p1.countScores()
p2_score = self.p2.countScores()
if p1_score > p2_score:
return self.p1.id
else:
return self.p2.id
from generator import *
def main():
player = Player()
while True:
for i in range(1000):
if player.Judge_nomoves():
last_move_l = Generator().generate_chess()
# Generator().showChess(last_move_l)
last_move = {'Stste': 'Normal', 'Style': '', 'Moves': last_move_l}
player.my_move(last_move)
player.board.Show()
A = input()
if A == 'show':
print(player.chess_ID_list)
input()
# if A is not '\n':
# ID = int(raw_input('ID: '))
# line_shift = int(raw_input('line_shift: '))
# row_shift = int(raw_input('row_shift: '))
# sty = Style(ID)
# point_list = []
# for point in sty.pointsSet:
# point_list.append((point[0]+line_shift, point[1]+row_shift))
# input_move = {'Stste': 'Normal', 'Style' : '', 'Moves' : point_list}
# player.move(input_move)
# player.board.Show()
# raw_input()
# player1 = Player()
# player2 = Player()
# lastmoves = {'Stste': 'Normal', 'Style': '10', 'Moves': [(1, 2), (1, 0), (0, 0), (1, 1), (1, 3)]}
# cb = ChessBoard(14)
# cb.essentialPoints = [(0, 0)]
# cb.update(lastmoves)
# cb.Show()
if __name__ == '__main__':
main()
| 8,315 | 1,954 | 650 |
1d16b85f47778da2655aaada8a1dc36f8747f3db | 3,525 | py | Python | autumn/model_runner.py | malanchak/AuTuMN | 0cbd006d1f15da414d02eed44e48bb5c06f0802e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | autumn/model_runner.py | malanchak/AuTuMN | 0cbd006d1f15da414d02eed44e48bb5c06f0802e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | autumn/model_runner.py | malanchak/AuTuMN | 0cbd006d1f15da414d02eed44e48bb5c06f0802e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | """
Build and run any AuTuMN model, storing the outputs
"""
import os
import logging
import yaml
from datetime import datetime
from autumn import constants
from autumn.tool_kit.timer import Timer
from autumn.tool_kit.serializer import serialize_model
from autumn.tool_kit.scenarios import Scenario
from autumn.tool_kit.utils import (
get_git_branch,
get_git_hash,
)
from autumn.db.models import store_run_models
logger = logging.getLogger(__name__)
def build_model_runner(
model_name: str, param_set_name: str, build_model, params: dict,
):
"""
Factory function that returns a 'run_model' function.
"""
assert model_name, "Value 'model_name' must be set."
assert build_model, "Value 'build_model' must be set."
assert params, "Value 'params' must be set."
if not param_set_name:
param_set_name = "main-model"
def run_model(run_scenarios=True):
"""
Run the model, save the outputs.
"""
logger.info(f"Running {model_name} {param_set_name}...")
# Ensure project folder exists.
project_dir = os.path.join(constants.OUTPUT_DATA_PATH, "run", model_name, param_set_name)
timestamp = datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
output_dir = os.path.join(project_dir, timestamp)
os.makedirs(output_dir, exist_ok=True)
# Determine where to save model outputs
output_db_path = os.path.join(output_dir, "outputs.db")
# Save model parameters to output dir.
param_path = os.path.join(output_dir, "params.yml")
with open(param_path, "w") as f:
yaml.dump(params, f)
# Save model run metadata to output dir.
meta_path = os.path.join(output_dir, "meta.yml")
metadata = {
"model_name": model_name,
"param_set_name": param_set_name,
"start_time": timestamp,
"git_branch": get_git_branch(),
"git_commit": get_git_hash(),
}
with open(meta_path, "w") as f:
yaml.dump(metadata, f)
with Timer("Running model scenarios"):
num_scenarios = 1 + len(params["scenarios"].keys())
scenarios = []
for scenario_idx in range(num_scenarios):
scenario = Scenario(build_model, scenario_idx, params)
scenarios.append(scenario)
# Run the baseline scenario.
baseline_scenario = scenarios[0]
baseline_scenario.run()
baseline_model = baseline_scenario.model
save_serialized_model(baseline_model, output_dir, "baseline")
if not run_scenarios:
# Do not run non-baseline models
scenarios = scenarios[:1]
# Run all the other scenarios
for scenario in scenarios[1:]:
scenario.run(base_model=baseline_model)
name = f"scenario-{scenario.idx}"
save_serialized_model(scenario.model, output_dir, name)
with Timer("Saving model outputs to the database"):
models = [s.model for s in scenarios]
store_run_models(models, output_db_path)
return run_model
| 33.571429 | 97 | 0.639716 | """
Build and run any AuTuMN model, storing the outputs
"""
import os
import logging
import yaml
from datetime import datetime
from autumn import constants
from autumn.tool_kit.timer import Timer
from autumn.tool_kit.serializer import serialize_model
from autumn.tool_kit.scenarios import Scenario
from autumn.tool_kit.utils import (
get_git_branch,
get_git_hash,
)
from autumn.db.models import store_run_models
logger = logging.getLogger(__name__)
def build_model_runner(
model_name: str, param_set_name: str, build_model, params: dict,
):
"""
Factory function that returns a 'run_model' function.
"""
assert model_name, "Value 'model_name' must be set."
assert build_model, "Value 'build_model' must be set."
assert params, "Value 'params' must be set."
if not param_set_name:
param_set_name = "main-model"
def run_model(run_scenarios=True):
"""
Run the model, save the outputs.
"""
logger.info(f"Running {model_name} {param_set_name}...")
# Ensure project folder exists.
project_dir = os.path.join(constants.OUTPUT_DATA_PATH, "run", model_name, param_set_name)
timestamp = datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
output_dir = os.path.join(project_dir, timestamp)
os.makedirs(output_dir, exist_ok=True)
# Determine where to save model outputs
output_db_path = os.path.join(output_dir, "outputs.db")
# Save model parameters to output dir.
param_path = os.path.join(output_dir, "params.yml")
with open(param_path, "w") as f:
yaml.dump(params, f)
# Save model run metadata to output dir.
meta_path = os.path.join(output_dir, "meta.yml")
metadata = {
"model_name": model_name,
"param_set_name": param_set_name,
"start_time": timestamp,
"git_branch": get_git_branch(),
"git_commit": get_git_hash(),
}
with open(meta_path, "w") as f:
yaml.dump(metadata, f)
with Timer("Running model scenarios"):
num_scenarios = 1 + len(params["scenarios"].keys())
scenarios = []
for scenario_idx in range(num_scenarios):
scenario = Scenario(build_model, scenario_idx, params)
scenarios.append(scenario)
# Run the baseline scenario.
baseline_scenario = scenarios[0]
baseline_scenario.run()
baseline_model = baseline_scenario.model
save_serialized_model(baseline_model, output_dir, "baseline")
if not run_scenarios:
# Do not run non-baseline models
scenarios = scenarios[:1]
# Run all the other scenarios
for scenario in scenarios[1:]:
scenario.run(base_model=baseline_model)
name = f"scenario-{scenario.idx}"
save_serialized_model(scenario.model, output_dir, name)
with Timer("Saving model outputs to the database"):
models = [s.model for s in scenarios]
store_run_models(models, output_db_path)
return run_model
def save_serialized_model(model, output_dir: str, name: str):
model_path = os.path.join(output_dir, "models")
os.makedirs(model_path, exist_ok=True)
model_filepath = os.path.join(model_path, f"{name}.yml")
model_data = serialize_model(model)
with open(model_filepath, "w") as f:
yaml.dump(model_data, f)
| 310 | 0 | 23 |
728cb69af3c08ef7582f9c410b1fc5f27c722c62 | 2,349 | py | Python | api/image_similarity.py | reneraab/librephotos | a3972ab520586e721c67f283b1a50ccb7abe2b01 | [
"MIT"
] | null | null | null | api/image_similarity.py | reneraab/librephotos | a3972ab520586e721c67f283b1a50ccb7abe2b01 | [
"MIT"
] | null | null | null | api/image_similarity.py | reneraab/librephotos | a3972ab520586e721c67f283b1a50ccb7abe2b01 | [
"MIT"
] | null | null | null | import numpy as np
import requests
from django.db.models import Q
from api.models import Photo, User
from api.util import logger
from ownphotos.settings import IMAGE_SIMILARITY_SERVER
| 29 | 87 | 0.648361 | import numpy as np
import requests
from django.db.models import Q
from api.models import Photo, User
from api.util import logger
from ownphotos.settings import IMAGE_SIMILARITY_SERVER
def search_similar_embedding(user, emb, result_count=100, threshold=27):
if type(user) == int:
user_id = user
else:
user_id = user.id
image_embedding = np.array(emb, dtype=np.float32)
post_data = {
"user_id": user_id,
"image_embedding": image_embedding.tolist(),
"n": result_count,
"threshold": threshold,
}
res = requests.post(IMAGE_SIMILARITY_SERVER + "/search/", json=post_data)
if res.status_code == 200:
return res.json()["result"]
else:
logger.error("error retrieving similar embeddings for user {}".format(user_id))
return []
def search_similar_image(user, photo):
if type(user) == int:
user_id = user
else:
user_id = user.id
if photo.clip_embeddings == None:
photo._generate_clip_embeddings()
if photo.clip_embeddings == None:
return []
image_embedding = np.array(photo.clip_embeddings, dtype=np.float32)
post_data = {"user_id": user_id, "image_embedding": image_embedding.tolist()}
res = requests.post(IMAGE_SIMILARITY_SERVER + "/search/", json=post_data)
if res.status_code == 200:
return res.json()
else:
logger.error(
"error retrieving similar photos to {} belonging to user {}".format(
photo.image_hash, user.username
)
)
return []
def build_image_similarity_index(user):
logger.info("builing similarity index for user {}".format(user.username))
photos = (
Photo.objects.filter(Q(hidden=False) & Q(owner=user))
.exclude(clip_embeddings=None)
.only("clip_embeddings")
)
image_hashes = []
image_embeddings = []
for photo in photos:
image_hashes.append(photo.image_hash)
image_embedding = np.array(photo.clip_embeddings, dtype=np.float32)
image_embeddings.append(image_embedding.tolist())
post_data = {
"user_id": user.id,
"image_hashes": image_hashes,
"image_embeddings": image_embeddings,
}
res = requests.post(IMAGE_SIMILARITY_SERVER + "/build/", json=post_data)
return res.json()
| 2,092 | 0 | 69 |
37bce0df6bd8a43af644e580e4fb57dc494b883f | 768 | py | Python | days/day101/Bite 18. Find the most common word/harry.py | alex-vegan/100daysofcode-with-python-course | b6c12316abe18274b7963371b8f0ed2fd549ef07 | [
"MIT"
] | 2 | 2018-10-28T17:12:37.000Z | 2018-10-28T17:12:39.000Z | days/day101/Bite 18. Find the most common word/harry.py | alex-vegan/100daysofcode-with-python-course | b6c12316abe18274b7963371b8f0ed2fd549ef07 | [
"MIT"
] | 3 | 2018-10-28T17:11:04.000Z | 2018-10-29T22:36:36.000Z | days/day101/Bite 18. Find the most common word/harry.py | alex-vegan/100daysofcode-with-python-course | b6c12316abe18274b7963371b8f0ed2fd549ef07 | [
"MIT"
] | null | null | null | import os
import urllib.request
from collections import Counter
import re
# data provided
stopwords_file = os.path.join('/tmp', 'stopwords')
harry_text = os.path.join('/tmp', 'harry')
urllib.request.urlretrieve('http://bit.ly/2EuvyHB', stopwords_file)
urllib.request.urlretrieve('http://bit.ly/2C6RzuR', harry_text)
#stopwords_file = 'stopwords.txt'
#harry_text = 'harry.txt'
| 33.391304 | 68 | 0.64974 | import os
import urllib.request
from collections import Counter
import re
# data provided
stopwords_file = os.path.join('/tmp', 'stopwords')
harry_text = os.path.join('/tmp', 'harry')
urllib.request.urlretrieve('http://bit.ly/2EuvyHB', stopwords_file)
urllib.request.urlretrieve('http://bit.ly/2C6RzuR', harry_text)
#stopwords_file = 'stopwords.txt'
#harry_text = 'harry.txt'
def get_harry_most_common_word():
with open(stopwords_file) as sw_f, open(harry_text) as ht_f:
stopwords_list = sw_f.read().strip().split('\n')
c = Counter()
words = re.findall(r"[\w|']+", ht_f.read().lower())
for word in words:
if not word in stopwords_list:
c[word] += 1
return c.most_common(1)[0] | 354 | 0 | 24 |
6e2916af0581365bb3494ca2d3c85afdbe9c4fb8 | 2,062 | py | Python | libs/preprocessors/silhouette.py | ajkdrag/Who-Is-That-Pokemon-Bot | 6176148ee674c051458c744c6e05ad0934a7dcf7 | [
"MIT"
] | null | null | null | libs/preprocessors/silhouette.py | ajkdrag/Who-Is-That-Pokemon-Bot | 6176148ee674c051458c744c6e05ad0934a7dcf7 | [
"MIT"
] | null | null | null | libs/preprocessors/silhouette.py | ajkdrag/Who-Is-That-Pokemon-Bot | 6176148ee674c051458c744c6e05ad0934a7dcf7 | [
"MIT"
] | null | null | null | import logging
import cv2 as cv
from utils.functions import iter_dir, join, make_dirs
LOG = logging.getLogger(__name__)
| 36.175439 | 95 | 0.632881 | import logging
import cv2 as cv
from utils.functions import iter_dir, join, make_dirs
LOG = logging.getLogger(__name__)
class Silhouette:
def __init__(self, config):
self.config = config
def get_holes(self, image, thresh, base):
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
im_bw = cv.threshold(gray, thresh, 255, cv.THRESH_BINARY)[1]
im_bw_inv = cv.bitwise_not(im_bw)
contour, _ = cv.findContours(im_bw_inv, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE)
for cnt in contour:
cv.drawContours(im_bw_inv, [cnt], 0, base, -1)
nt = cv.bitwise_not(im_bw)
im_bw_inv = cv.bitwise_or(im_bw_inv, nt)
return im_bw_inv
def remove_background(
self, image, thresh, base=255, scale_factor=0.45, kernel_range=range(1, 2), border=None
):
border = border or kernel_range[-1]
holes = self.get_holes(image, thresh, base)
small = cv.resize(holes, None, fx=scale_factor, fy=scale_factor)
bordered = cv.copyMakeBorder(
small, border, border, border, border, cv.BORDER_CONSTANT
)
for i in kernel_range:
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (2 * i + 1, 2 * i + 1))
bordered = cv.morphologyEx(bordered, cv.MORPH_CLOSE, kernel)
unbordered = bordered[border:-border, border:-border]
mask = cv.resize(unbordered, (image.shape[1], image.shape[0]))
return mask
def preprocess(self):
for file in iter_dir(self.config.get("in_dir")):
relative_path = file.parent.relative_to(self.config.get("in_dir"))
out_dir = join(self.config.get("out_dir"), relative_path)
LOG.info("Reading :%s", str(file))
img = cv.imread(str(file))
silhouette = 255 - self.remove_background(image=img, thresh=230)
make_dirs(out_dir)
out_path = join(out_dir, file.name)
cv.imwrite(out_path, silhouette)
LOG.info("Successfully extracted silhouette to: %s", out_path)
| 1,813 | -4 | 131 |
62732ead68f40bf265f23cb83b3530c32c6e7b6e | 2,084 | py | Python | giraffez/fmt.py | istvan-fodor/giraffez | 6b4d27eb1a1eaf188c6885c7364ef27e92b1b957 | [
"Apache-2.0"
] | 122 | 2016-08-18T21:12:58.000Z | 2021-11-24T14:45:19.000Z | giraffez/fmt.py | istvan-fodor/giraffez | 6b4d27eb1a1eaf188c6885c7364ef27e92b1b957 | [
"Apache-2.0"
] | 68 | 2016-08-31T18:19:16.000Z | 2021-11-01T19:21:22.000Z | giraffez/fmt.py | istvan-fodor/giraffez | 6b4d27eb1a1eaf188c6885c7364ef27e92b1b957 | [
"Apache-2.0"
] | 44 | 2016-08-19T01:22:21.000Z | 2022-03-23T17:39:40.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import textwrap
from .constants import *
from .errors import *
from .logging import log
from ._compat import *
| 29.771429 | 90 | 0.644914 | # -*- coding: utf-8 -*-
#
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import textwrap
from .constants import *
from .errors import *
from .logging import log
from ._compat import *
def escape_quotes(s):
return s.replace("'", "''")
def format_indent(body, indent=" ", initial=""):
wrapper = textwrap.TextWrapper(initial_indent=initial,
subsequent_indent=indent)
return wrapper.fill(body)
def format_table(lines, header=True):
widths = [0] * len(lines[0])
for line in lines:
widths = [max(a, len(str(b))) for a, b in zip(widths, line)]
output = []
for i, line in enumerate(lines):
output.append(" | ".join([str(field).ljust(w) for w, field in zip(widths, line)]))
if i == 0 and header:
output.append("-"*len(output[0]))
return "\n".join(output)
def safe_name(s):
return s.replace(" ", "_").lower()
def quote_string(s, quote_character="'"):
return "{0}{1}{0}".format(quote_character, s)
def replace_cr(s):
if not isinstance(s, str):
return s
return s.strip().replace('\r', '\n')
def truncate(s, n=7, c=70, placeholder="..."):
if not isinstance(s, basestring):
raise GiraffeError("Cannot truncate non-string value")
lines = s.split('\n')
line_count = len(lines)
char_count = len(s)
if char_count < (n * c) and line_count < n:
return s
if char_count > (n * c):
s = s[0:n*c]
lines = s.split('\n')
lines = "\n".join(lines[0:n])
return "{} {}".format(lines, placeholder)
| 1,192 | 0 | 161 |
d3521872ad60a69a63f6579c90ba8e0a110ff706 | 3,731 | py | Python | unet/unet_model.py | visinf/pixelpyramids | cd59fe8a8f744f556f44c1faeed958822b39fe7c | [
"Apache-2.0"
] | 8 | 2021-11-10T17:45:32.000Z | 2022-02-22T16:40:54.000Z | unet/unet_model.py | visinf/pixelpyramids | cd59fe8a8f744f556f44c1faeed958822b39fe7c | [
"Apache-2.0"
] | 1 | 2021-12-23T07:09:23.000Z | 2021-12-24T06:24:39.000Z | unet/unet_model.py | visinf/pixelpyramids | cd59fe8a8f744f556f44c1faeed958822b39fe7c | [
"Apache-2.0"
] | 1 | 2021-11-15T06:50:39.000Z | 2021-11-15T06:50:39.000Z | """ Full assembly of the parts to form the complete network """
import sys
import torch.nn.functional as F
from .unet_parts import *
'''class UNet(nn.Module):
def __init__(self, n_channels, n_classes, isfine=True, bilinear=True):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.is_fine = isfine
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
factor = 2 if bilinear else 1
self.down4 = Down(512, 1024 // factor)
self.up1 = Up(1024, 512 // factor, bilinear)
self.up2 = Up(512, 256 // factor, bilinear)
self.up3 = Up(256, 128 // factor, bilinear)
self.up4 = Up(128, 64, bilinear)
self.condup = CondUp()
self.outc = OutConv(64, n_classes)
def forward(self, x=None):
if self.is_fine:
x = self.condup(x)
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits'''
| 35.198113 | 99 | 0.537925 | """ Full assembly of the parts to form the complete network """
import sys
import torch.nn.functional as F
from .unet_parts import *
'''class UNet(nn.Module):
def __init__(self, n_channels, n_classes, isfine=True, bilinear=True):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.is_fine = isfine
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
factor = 2 if bilinear else 1
self.down4 = Down(512, 1024 // factor)
self.up1 = Up(1024, 512 // factor, bilinear)
self.up2 = Up(512, 256 // factor, bilinear)
self.up3 = Up(256, 128 // factor, bilinear)
self.up4 = Up(128, 64, bilinear)
self.condup = CondUp()
self.outc = OutConv(64, n_classes)
def forward(self, x=None):
if self.is_fine:
x = self.condup(x)
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits'''
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, n_layers, isfine=True, bilinear=True):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.is_fine = isfine
self.bilinear = bilinear
self.n_layers = n_layers
print(" ------------- " + str(n_layers) + "-----------------")
print('downs ----')
out_channels = 64
self.inc = DoubleConv(n_channels, out_channels)
self.downs = []
for l_idx in range(0,n_layers-1):
self.downs.append(Down(out_channels*(2**l_idx), out_channels*(2**(l_idx+1))))
print(out_channels*(2**l_idx), out_channels*(2**(l_idx+1)))
self.downs.append(Down(out_channels*(2**(n_layers-1)), out_channels*(2**(n_layers-1))))
print(out_channels*(2**(n_layers-1)), out_channels*(2**(n_layers-1)))
print('ups ----')
self.ups = []
for l_idx in reversed(range(1,n_layers)):
#print('l_idx ',l_idx)
self.ups.append(Up(out_channels*(2**(l_idx+1)), out_channels*(2**(l_idx-1)), bilinear))
print(out_channels*(2**(l_idx+1)), out_channels*(2**(l_idx-1)))
#, self_attn = (l_idx < 2 and n_layers >= 4)
self.ups.append(Up(out_channels*(2**1), out_channels*(2**0), bilinear))
print(out_channels*(2**1), out_channels*(2**0))
print("-------------------------------------------------------")
self.downs = nn.ModuleList(self.downs)
self.ups = nn.ModuleList(self.ups)
self.condup = CondUp()
self.outc = OutConv(out_channels, n_classes)
def forward(self, x=None,y=None):
if self.is_fine:
x = self.condup(x)
x1 = self.inc(x)
if self.n_layers > 1:
down_outs = [x1]
#print('x1 before ',x1.size())
for layer in self.downs:
x1 = layer(x1)
#print('x1 ',x1.size())
down_outs.append(x1)
#print('All down_outs ',[x.size() for x in down_outs])
x_n = self.ups[0](down_outs[-1], down_outs[-2])
#print('x_n ',x_n.size())
for i, layer in enumerate(self.ups[1:]):
x_n = layer(x_n, down_outs[-(i+3)])
#print('x_n ',x_n.size())
else:
x_n = x1
logits = self.outc(x_n)
return logits
| 2,342 | 1 | 76 |
edc112350f0641b049e49966a7cbc254a44c2bb1 | 6,467 | py | Python | pyuvdata/tests/test_fhd_cal.py | r-xue/pyuvdata | 667abc1a8a8a4fefd91f68a1cb15d4f62cd9fb60 | [
"BSD-2-Clause"
] | null | null | null | pyuvdata/tests/test_fhd_cal.py | r-xue/pyuvdata | 667abc1a8a8a4fefd91f68a1cb15d4f62cd9fb60 | [
"BSD-2-Clause"
] | null | null | null | pyuvdata/tests/test_fhd_cal.py | r-xue/pyuvdata | 667abc1a8a8a4fefd91f68a1cb15d4f62cd9fb60 | [
"BSD-2-Clause"
] | null | null | null | # -*- mode: python; coding: utf-8 -*
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for FHD_cal object.
"""
from __future__ import absolute_import, division, print_function
import nose.tools as nt
import os
import numpy as np
from pyuvdata import UVCal
import pyuvdata.tests as uvtest
from pyuvdata.data import DATA_PATH
# set up FHD file list
testdir = os.path.join(DATA_PATH, 'fhd_cal_data/')
testfile_prefix = '1061316296_'
obs_testfile = os.path.join(testdir, testfile_prefix + 'obs.sav')
cal_testfile = os.path.join(testdir, testfile_prefix + 'cal.sav')
settings_testfile = os.path.join(testdir, testfile_prefix + 'settings.txt')
def test_ReadFHDcalWriteReadcalfits():
"""
FHD cal to calfits loopback test.
Read in FHD cal files, write out as calfits, read back in and check for object
equality.
"""
fhd_cal = UVCal()
calfits_cal = UVCal()
fhd_cal.read_fhd_cal(cal_testfile, obs_testfile, settings_file=settings_testfile)
outfile = os.path.join(DATA_PATH, 'test/outtest_FHDcal_1061311664.calfits')
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
nt.assert_equal(fhd_cal, calfits_cal)
# do it again with fit gains (rather than raw)
fhd_cal.read_fhd_cal(cal_testfile, obs_testfile,
settings_file=settings_testfile, raw=False)
outfile = os.path.join(DATA_PATH, 'test/outtest_FHDcal_1061311664.calfits')
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
nt.assert_equal(fhd_cal, calfits_cal)
def test_extra_history():
"""
test that setting the extra_history keyword works
"""
fhd_cal = UVCal()
calfits_cal = UVCal()
extra_history = 'Some extra history for testing\n'
fhd_cal.read_fhd_cal(cal_testfile, obs_testfile,
settings_file=settings_testfile,
extra_history=extra_history)
outfile = os.path.join(DATA_PATH, 'test/outtest_FHDcal_1061311664.calfits')
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
nt.assert_equal(fhd_cal, calfits_cal)
nt.assert_true(extra_history in fhd_cal.history)
# try again with a list of history strings
extra_history = ['Some extra history for testing',
'And some more history as well']
fhd_cal.read_fhd_cal(cal_testfile, obs_testfile,
settings_file=settings_testfile,
extra_history=extra_history)
outfile = os.path.join(DATA_PATH, 'test/outtest_FHDcal_1061311664.calfits')
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
nt.assert_equal(fhd_cal, calfits_cal)
for line in extra_history:
nt.assert_true(line in fhd_cal.history)
def test_flags_galaxy():
"""
test that files with time, freq and tile flags and galaxy models behave as expected
"""
testdir = os.path.join(DATA_PATH, 'fhd_cal_data/flag_set')
obs_testfile_flag = os.path.join(testdir, testfile_prefix + 'obs.sav')
cal_testfile_flag = os.path.join(testdir, testfile_prefix + 'cal.sav')
settings_testfile_flag = os.path.join(testdir, testfile_prefix + 'settings.txt')
fhd_cal = UVCal()
calfits_cal = UVCal()
fhd_cal.read_fhd_cal(cal_testfile_flag, obs_testfile_flag,
settings_file=settings_testfile_flag)
outfile = os.path.join(DATA_PATH, 'test/outtest_FHDcal_1061311664.calfits')
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
nt.assert_equal(fhd_cal, calfits_cal)
def test_breakReadFHDcal():
"""Try various cases of missing files."""
fhd_cal = UVCal()
nt.assert_raises(Exception, fhd_cal.read_fhd_cal, cal_testfile) # Missing obs
uvtest.checkWarnings(fhd_cal.read_fhd_cal, [cal_testfile, obs_testfile],
message=['No settings file'])
# Check only pyuvdata version history with no settings file
nt.assert_equal(fhd_cal.history, '\n' + fhd_cal.pyuvdata_version_str)
def test_read_multi():
"""Test reading in multiple files."""
testdir2 = os.path.join(DATA_PATH, 'fhd_cal_data/set2')
obs_testfile_list = [obs_testfile, os.path.join(testdir2, testfile_prefix + 'obs.sav')]
cal_testfile_list = [cal_testfile, os.path.join(testdir2, testfile_prefix + 'cal.sav')]
settings_testfile_list = [settings_testfile, os.path.join(testdir2, testfile_prefix + 'settings.txt')]
fhd_cal = UVCal()
calfits_cal = UVCal()
uvtest.checkWarnings(fhd_cal.read_fhd_cal, [cal_testfile_list, obs_testfile_list],
{'settings_file': settings_testfile_list},
message='UVParameter diffuse_model does not match')
outfile = os.path.join(DATA_PATH, 'test/outtest_FHDcal_1061311664.calfits')
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
nt.assert_equal(fhd_cal, calfits_cal)
def test_break_read_multi():
"""Test errors for different numbers of files."""
testdir2 = os.path.join(DATA_PATH, 'fhd_cal_data/set2')
obs_testfile_list = [obs_testfile, os.path.join(testdir2, testfile_prefix + 'obs.sav')]
cal_testfile_list = [cal_testfile, os.path.join(testdir2, testfile_prefix + 'cal.sav')]
settings_testfile_list = [settings_testfile, os.path.join(testdir2, testfile_prefix + 'settings.txt')]
fhd_cal = UVCal()
nt.assert_raises(ValueError, fhd_cal.read_fhd_cal, cal_testfile_list,
obs_testfile_list[0], settings_file=settings_testfile_list)
nt.assert_raises(ValueError, fhd_cal.read_fhd_cal, cal_testfile_list,
obs_testfile_list, settings_file=settings_testfile_list[0])
nt.assert_raises(ValueError, fhd_cal.read_fhd_cal, cal_testfile_list,
obs_testfile_list + obs_testfile_list, settings_file=settings_testfile_list)
nt.assert_raises(ValueError, fhd_cal.read_fhd_cal, cal_testfile_list,
obs_testfile_list, settings_file=settings_testfile_list + settings_testfile_list)
nt.assert_raises(ValueError, fhd_cal.read_fhd_cal, cal_testfile_list[0],
obs_testfile_list, settings_file=settings_testfile_list[0])
nt.assert_raises(ValueError, fhd_cal.read_fhd_cal, cal_testfile_list[0],
obs_testfile_list[0], settings_file=settings_testfile_list)
| 41.722581 | 106 | 0.720427 | # -*- mode: python; coding: utf-8 -*
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for FHD_cal object.
"""
from __future__ import absolute_import, division, print_function
import nose.tools as nt
import os
import numpy as np
from pyuvdata import UVCal
import pyuvdata.tests as uvtest
from pyuvdata.data import DATA_PATH
# set up FHD file list
testdir = os.path.join(DATA_PATH, 'fhd_cal_data/')
testfile_prefix = '1061316296_'
obs_testfile = os.path.join(testdir, testfile_prefix + 'obs.sav')
cal_testfile = os.path.join(testdir, testfile_prefix + 'cal.sav')
settings_testfile = os.path.join(testdir, testfile_prefix + 'settings.txt')
def test_ReadFHDcalWriteReadcalfits():
"""
FHD cal to calfits loopback test.
Read in FHD cal files, write out as calfits, read back in and check for object
equality.
"""
fhd_cal = UVCal()
calfits_cal = UVCal()
fhd_cal.read_fhd_cal(cal_testfile, obs_testfile, settings_file=settings_testfile)
outfile = os.path.join(DATA_PATH, 'test/outtest_FHDcal_1061311664.calfits')
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
nt.assert_equal(fhd_cal, calfits_cal)
# do it again with fit gains (rather than raw)
fhd_cal.read_fhd_cal(cal_testfile, obs_testfile,
settings_file=settings_testfile, raw=False)
outfile = os.path.join(DATA_PATH, 'test/outtest_FHDcal_1061311664.calfits')
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
nt.assert_equal(fhd_cal, calfits_cal)
def test_extra_history():
"""
test that setting the extra_history keyword works
"""
fhd_cal = UVCal()
calfits_cal = UVCal()
extra_history = 'Some extra history for testing\n'
fhd_cal.read_fhd_cal(cal_testfile, obs_testfile,
settings_file=settings_testfile,
extra_history=extra_history)
outfile = os.path.join(DATA_PATH, 'test/outtest_FHDcal_1061311664.calfits')
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
nt.assert_equal(fhd_cal, calfits_cal)
nt.assert_true(extra_history in fhd_cal.history)
# try again with a list of history strings
extra_history = ['Some extra history for testing',
'And some more history as well']
fhd_cal.read_fhd_cal(cal_testfile, obs_testfile,
settings_file=settings_testfile,
extra_history=extra_history)
outfile = os.path.join(DATA_PATH, 'test/outtest_FHDcal_1061311664.calfits')
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
nt.assert_equal(fhd_cal, calfits_cal)
for line in extra_history:
nt.assert_true(line in fhd_cal.history)
def test_flags_galaxy():
"""
test that files with time, freq and tile flags and galaxy models behave as expected
"""
testdir = os.path.join(DATA_PATH, 'fhd_cal_data/flag_set')
obs_testfile_flag = os.path.join(testdir, testfile_prefix + 'obs.sav')
cal_testfile_flag = os.path.join(testdir, testfile_prefix + 'cal.sav')
settings_testfile_flag = os.path.join(testdir, testfile_prefix + 'settings.txt')
fhd_cal = UVCal()
calfits_cal = UVCal()
fhd_cal.read_fhd_cal(cal_testfile_flag, obs_testfile_flag,
settings_file=settings_testfile_flag)
outfile = os.path.join(DATA_PATH, 'test/outtest_FHDcal_1061311664.calfits')
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
nt.assert_equal(fhd_cal, calfits_cal)
def test_breakReadFHDcal():
"""Try various cases of missing files."""
fhd_cal = UVCal()
nt.assert_raises(Exception, fhd_cal.read_fhd_cal, cal_testfile) # Missing obs
uvtest.checkWarnings(fhd_cal.read_fhd_cal, [cal_testfile, obs_testfile],
message=['No settings file'])
# Check only pyuvdata version history with no settings file
nt.assert_equal(fhd_cal.history, '\n' + fhd_cal.pyuvdata_version_str)
def test_read_multi():
"""Test reading in multiple files."""
testdir2 = os.path.join(DATA_PATH, 'fhd_cal_data/set2')
obs_testfile_list = [obs_testfile, os.path.join(testdir2, testfile_prefix + 'obs.sav')]
cal_testfile_list = [cal_testfile, os.path.join(testdir2, testfile_prefix + 'cal.sav')]
settings_testfile_list = [settings_testfile, os.path.join(testdir2, testfile_prefix + 'settings.txt')]
fhd_cal = UVCal()
calfits_cal = UVCal()
uvtest.checkWarnings(fhd_cal.read_fhd_cal, [cal_testfile_list, obs_testfile_list],
{'settings_file': settings_testfile_list},
message='UVParameter diffuse_model does not match')
outfile = os.path.join(DATA_PATH, 'test/outtest_FHDcal_1061311664.calfits')
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
nt.assert_equal(fhd_cal, calfits_cal)
def test_break_read_multi():
"""Test errors for different numbers of files."""
testdir2 = os.path.join(DATA_PATH, 'fhd_cal_data/set2')
obs_testfile_list = [obs_testfile, os.path.join(testdir2, testfile_prefix + 'obs.sav')]
cal_testfile_list = [cal_testfile, os.path.join(testdir2, testfile_prefix + 'cal.sav')]
settings_testfile_list = [settings_testfile, os.path.join(testdir2, testfile_prefix + 'settings.txt')]
fhd_cal = UVCal()
nt.assert_raises(ValueError, fhd_cal.read_fhd_cal, cal_testfile_list,
obs_testfile_list[0], settings_file=settings_testfile_list)
nt.assert_raises(ValueError, fhd_cal.read_fhd_cal, cal_testfile_list,
obs_testfile_list, settings_file=settings_testfile_list[0])
nt.assert_raises(ValueError, fhd_cal.read_fhd_cal, cal_testfile_list,
obs_testfile_list + obs_testfile_list, settings_file=settings_testfile_list)
nt.assert_raises(ValueError, fhd_cal.read_fhd_cal, cal_testfile_list,
obs_testfile_list, settings_file=settings_testfile_list + settings_testfile_list)
nt.assert_raises(ValueError, fhd_cal.read_fhd_cal, cal_testfile_list[0],
obs_testfile_list, settings_file=settings_testfile_list[0])
nt.assert_raises(ValueError, fhd_cal.read_fhd_cal, cal_testfile_list[0],
obs_testfile_list[0], settings_file=settings_testfile_list)
| 0 | 0 | 0 |
1a2717656fd27b5bbc8cd7307b564457181591a0 | 2,605 | py | Python | qsdsan/sanunits/_component_splitter.py | stetsonrowles/QSDsan | a74949fcf9e6ff91e9160a75bedaf6fab2191efb | [
"NCSA",
"CNRI-Python",
"FTL"
] | null | null | null | qsdsan/sanunits/_component_splitter.py | stetsonrowles/QSDsan | a74949fcf9e6ff91e9160a75bedaf6fab2191efb | [
"NCSA",
"CNRI-Python",
"FTL"
] | null | null | null | qsdsan/sanunits/_component_splitter.py | stetsonrowles/QSDsan | a74949fcf9e6ff91e9160a75bedaf6fab2191efb | [
"NCSA",
"CNRI-Python",
"FTL"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
QSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems
This module is developed by:
Yalin Li <zoe.yalin.li@gmail.com>
This module is under the University of Illinois/NCSA Open Source License.
Please refer to https://github.com/QSD-Group/QSDsan/blob/master/LICENSE.txt
for license details.
'''
# %%
from biosteam._graphics import splitter_graphics
from .. import SanUnit
__all__ = ('ComponentSplitter',)
class ComponentSplitter(SanUnit):
'''
Split the influent into individual components,
the last effluent contains all remaining components.
'''
_ins_size_is_fixed = False
_outs_size_is_fixed = False
_graphics = splitter_graphics
@property
def split_keys(self):
'''
[iterable] An iterable containing IDs of components to be splitted to
different effluents. Element of the item in the iterable can be str of
another iterable containing component IDs. If the item is also iterable,
all components whose ID are in the iterable will be splitted to the same
effluent. Note that the split is 1 (i.e., all of the remaining components
will be diverted to the effluent).
'''
return self._split_keys
@split_keys.setter
| 29.269663 | 105 | 0.59501 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
QSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems
This module is developed by:
Yalin Li <zoe.yalin.li@gmail.com>
This module is under the University of Illinois/NCSA Open Source License.
Please refer to https://github.com/QSD-Group/QSDsan/blob/master/LICENSE.txt
for license details.
'''
# %%
from biosteam._graphics import splitter_graphics
from .. import SanUnit
__all__ = ('ComponentSplitter',)
class ComponentSplitter(SanUnit):
'''
Split the influent into individual components,
the last effluent contains all remaining components.
'''
def __init__(self, ID='', ins=None, outs=(), split_keys=()):
SanUnit.__init__(self, ID, ins, outs)
self.split_keys = split_keys
_ins_size_is_fixed = False
_outs_size_is_fixed = False
_graphics = splitter_graphics
def _run(self):
last = self.outs[-1]
last.mix_from(self.ins)
splitted = []
num = 0
for cmps in self.split_keys:
if isinstance(cmps, str):
self.outs[num].imass[cmps] = last.imass[cmps]
last.imass[cmps] = 0
if cmps in splitted:
raise ValueError(f'The component {cmps} appears more than once in `split_dict`.')
splitted.append(cmps)
else:
try: iter(cmps)
except:
raise ValueError('Elements of the split must be str or iterable, '
f'not {type(cmps).__name__}.')
for cmp in cmps:
self.outs[num].imass[cmp] = last.imass[cmp]
last.imass[cmp] = 0
if cmp in splitted:
raise ValueError(f'The component {cmps} appears more than once in `split_dict`.')
splitted.append(cmp)
num += 1
@property
def split_keys(self):
'''
[iterable] An iterable containing IDs of components to be splitted to
different effluents. Element of the item in the iterable can be str of
another iterable containing component IDs. If the item is also iterable,
all components whose ID are in the iterable will be splitted to the same
effluent. Note that the split is 1 (i.e., all of the remaining components
will be diverted to the effluent).
'''
return self._split_keys
@split_keys.setter
def split_keys(self, i):
self._split_keys = i
| 1,190 | 0 | 84 |
8ef77eaada8f6cb34ed14bebcaae741bfa4909f6 | 926 | py | Python | antaytheist/quote/quotefallacy.py | antaytheist/antaytheist | 52f64f5e09c4bd52d694eb44af62bb897ab492da | [
"MIT"
] | null | null | null | antaytheist/quote/quotefallacy.py | antaytheist/antaytheist | 52f64f5e09c4bd52d694eb44af62bb897ab492da | [
"MIT"
] | null | null | null | antaytheist/quote/quotefallacy.py | antaytheist/antaytheist | 52f64f5e09c4bd52d694eb44af62bb897ab492da | [
"MIT"
] | null | null | null | # fallacy quoter for antaytheist
import json
import re
import requests
fallaciesjson = requests.get("https://yourlogicalfallacyis.com/js/data/fallacies.json", timeout=15)
fallaciesjson.raise_for_status()
fallacies = json.loads(fallaciesjson.text)
fallacynames = []
for fallacyitem in fallacies:
fallacynames.append(fallacyitem["title"])
| 28.9375 | 103 | 0.61987 | # fallacy quoter for antaytheist
import json
import re
import requests
fallaciesjson = requests.get("https://yourlogicalfallacyis.com/js/data/fallacies.json", timeout=15)
fallaciesjson.raise_for_status()
fallacies = json.loads(fallaciesjson.text)
fallacynames = []
for fallacyitem in fallacies:
fallacynames.append(fallacyitem["title"])
def quote(args):
if args[0].lower() not in fallacynames:
return {"success": False, "error": 'Invalid fallacy "' + args[0] + '"'}
for fallacyitem in fallacies:
if fallacyitem["title"] == args[0].lower():
myfallacy = fallacyitem
break
return {
"success": True,
"output": {
"text": re.sub(" ", " ", myfallacy["description"]) + "\n\n" + myfallacy["exampleText"]
},
"source": {
"name": "your logical fallacy is", "url": "https://yourlogicalfallacyis.com/"
}
}
| 560 | 0 | 23 |
8180e77980290adfbae6f1839d3968062a315434 | 171 | py | Python | programmers/lv2_review/test.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | programmers/lv2_review/test.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | programmers/lv2_review/test.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | idx = 0
a, b, c = map(int, input().split())
for i in range(a):
for j in range(b):
for k in range(c):
idx += 1
print(i, j, k)
print(idx) | 21.375 | 35 | 0.461988 | idx = 0
a, b, c = map(int, input().split())
for i in range(a):
for j in range(b):
for k in range(c):
idx += 1
print(i, j, k)
print(idx) | 0 | 0 | 0 |
6b00713419d5784bad69590ca1e8452a747dae46 | 809 | py | Python | ImageProcessing/LiveStreamVideo/Video/old/webcamread.py | gudduarnav/ImageProcessingPython | e0d5d3f64d8a209ff986e10d81763f9614e2403d | [
"MIT"
] | null | null | null | ImageProcessing/LiveStreamVideo/Video/old/webcamread.py | gudduarnav/ImageProcessingPython | e0d5d3f64d8a209ff986e10d81763f9614e2403d | [
"MIT"
] | null | null | null | ImageProcessing/LiveStreamVideo/Video/old/webcamread.py | gudduarnav/ImageProcessingPython | e0d5d3f64d8a209ff986e10d81763f9614e2403d | [
"MIT"
] | null | null | null | # Live stream a Colored video from Webcam or default Video Capture device
import cv2
# Open the default Video Capture device (or WebCam)
v = cv2.VideoCapture(0)
# Check if the file can be opened successfully
if(v.isOpened() == "False"):
print("ERROR: Cannot open the Video File")
quit()
print("SUCCESS: Video File opened")
# Read one frame from the file
while v.isOpened() == True:
ret, frame = v.read()
# Check if the frame is successfully read
if(ret == False):
print("ERROR: Cannot read more video frame")
break
# Display the frame data
cv2.imshow("Video Frame", frame)
# Wait for 25 ms for a user key press to exit
if(cv2.waitKey(25) != -1):
break
# Close the video file
v.release()
# Close all open window
cv2.destroyAllWindows()
| 22.472222 | 73 | 0.663782 | # Live stream a Colored video from Webcam or default Video Capture device
import cv2
# Open the default Video Capture device (or WebCam)
v = cv2.VideoCapture(0)
# Check if the file can be opened successfully
if(v.isOpened() == "False"):
print("ERROR: Cannot open the Video File")
quit()
print("SUCCESS: Video File opened")
# Read one frame from the file
while v.isOpened() == True:
ret, frame = v.read()
# Check if the frame is successfully read
if(ret == False):
print("ERROR: Cannot read more video frame")
break
# Display the frame data
cv2.imshow("Video Frame", frame)
# Wait for 25 ms for a user key press to exit
if(cv2.waitKey(25) != -1):
break
# Close the video file
v.release()
# Close all open window
cv2.destroyAllWindows()
| 0 | 0 | 0 |
b85d122594b4531661a033b8ba3925c5dc50ff2c | 724 | py | Python | 06.py | sqldan/advent-of-code-2020 | 17c5240da16dec81d84859061fdb5d3a3174c860 | [
"MIT"
] | null | null | null | 06.py | sqldan/advent-of-code-2020 | 17c5240da16dec81d84859061fdb5d3a3174c860 | [
"MIT"
] | null | null | null | 06.py | sqldan/advent-of-code-2020 | 17c5240da16dec81d84859061fdb5d3a3174c860 | [
"MIT"
] | null | null | null | answers = []
with open("input06.txt") as f:
ans = set('')
for line in f:
line = line.strip()
if not line:
answers.append(ans)
ans = set('')
else:
ans = ans.union(set(line))
ans = ans.union(set(line))
answers.append(ans)
print(sum([len(a) for a in answers ]))
answers = []
with open("input06.txt") as f:
ans = set('abcdefghijklmnopqrstuvwxyz')
for line in f:
line = line.strip()
if not line:
answers.append(ans)
ans = set('abcdefghijklmnopqrstuvwxyz')
else:
ans = ans.intersection(set(line))
ans = ans.intersection(set(line))
answers.append(ans)
print(sum([len(a) for a in answers ])) | 23.354839 | 51 | 0.558011 | answers = []
with open("input06.txt") as f:
ans = set('')
for line in f:
line = line.strip()
if not line:
answers.append(ans)
ans = set('')
else:
ans = ans.union(set(line))
ans = ans.union(set(line))
answers.append(ans)
print(sum([len(a) for a in answers ]))
answers = []
with open("input06.txt") as f:
ans = set('abcdefghijklmnopqrstuvwxyz')
for line in f:
line = line.strip()
if not line:
answers.append(ans)
ans = set('abcdefghijklmnopqrstuvwxyz')
else:
ans = ans.intersection(set(line))
ans = ans.intersection(set(line))
answers.append(ans)
print(sum([len(a) for a in answers ])) | 0 | 0 | 0 |
8f1f5c58abd02cdb6caf693636dad3802db579f4 | 145 | py | Python | code/AST/IfAST.py | antuniooh/Dattebayo-compiler | fd6767aee1c131dcbf76fc061bce43224df03f8b | [
"MIT"
] | null | null | null | code/AST/IfAST.py | antuniooh/Dattebayo-compiler | fd6767aee1c131dcbf76fc061bce43224df03f8b | [
"MIT"
] | null | null | null | code/AST/IfAST.py | antuniooh/Dattebayo-compiler | fd6767aee1c131dcbf76fc061bce43224df03f8b | [
"MIT"
] | 1 | 2021-12-05T14:00:39.000Z | 2021-12-05T14:00:39.000Z | from .AST import AST
| 18.125 | 41 | 0.648276 | from .AST import AST
class IfAST(AST):
def __init__(self, condition, scope):
self.condition = condition
self.scope = scope
| 78 | -4 | 49 |
97dddffe5b61ea8e1e48b4113345224990d3eccd | 4,648 | py | Python | bedevere/util.py | hugovk/bedevere | 1163568e89fe5b4bbad66885fef6d3a2fc85c736 | [
"Apache-2.0"
] | null | null | null | bedevere/util.py | hugovk/bedevere | 1163568e89fe5b4bbad66885fef6d3a2fc85c736 | [
"Apache-2.0"
] | null | null | null | bedevere/util.py | hugovk/bedevere | 1163568e89fe5b4bbad66885fef6d3a2fc85c736 | [
"Apache-2.0"
] | null | null | null | import enum
import sys
import gidgethub
DEFAULT_BODY = ""
TAG_NAME = "gh-issue-number"
NEWS_NEXT_DIR = "Misc/NEWS.d/next/"
CLOSING_TAG = f"<!-- /{TAG_NAME} -->"
BODY = f"""\
{{body}}
<!-- {TAG_NAME}: gh-{{issue_number}} -->
* gh-{{issue_number}}
{CLOSING_TAG}
"""
@enum.unique
def create_status(context, state, *, description=None, target_url=None):
"""Create the data for a status.
The argument order is such that you can use functools.partial() to set the
context to avoid repeatedly specifying it throughout a module.
"""
status = {
"context": context,
"state": state.value,
}
if description is not None:
status["description"] = description
if target_url is not None:
status["target_url"] = target_url
return status
async def post_status(gh, event, status):
"""Post a status in reaction to an event."""
await gh.post(event.data["pull_request"]["statuses_url"], data=status)
def skip_label(what):
"""Generate a "skip" label name."""
return f"skip {what}"
def skip(what, issue):
"""See if an issue has a "skip {what}" label."""
return skip_label(what) in labels(issue)
def label_name(event_data):
"""Get the label name from a label-related webhook event."""
return event_data["label"]["name"]
async def files_for_PR(gh, pull_request):
"""Get files for a pull request."""
# For some unknown reason there isn't any files URL in a pull request
# payload.
files_url = f'{pull_request["url"]}/files'
data = []
async for filedata in gh.getiter(files_url): # pragma: no branch
data.append(
{
"file_name": filedata["filename"],
"patch": filedata.get("patch", ""),
}
)
return data
async def issue_for_PR(gh, pull_request):
"""Get the issue data for a pull request."""
return await gh.getitem(pull_request["issue_url"])
async def patch_body(gh, pull_request, issue_number):
"""Updates the description of a PR with the gh issue number if it exists.
returns if body exists with issue_number
"""
if "body" not in pull_request or pull_request["body"] is None:
return await gh.patch(
pull_request["url"],
data=BODY.format(body=DEFAULT_BODY, issue_number=issue_number),
)
if f"GH-{issue_number}\n" not in pull_request["body"]:
return await gh.patch(
pull_request["url"],
data=BODY.format(body=pull_request["body"], issue_number=issue_number),
)
return
async def is_core_dev(gh, username):
"""Check if the user is a CPython core developer."""
org_teams = "/orgs/python/teams"
team_name = "python core"
async for team in gh.getiter(org_teams):
if team["name"].lower() == team_name: # pragma: no branch
break
else:
raise ValueError(f"{team_name!r} not found at {org_teams!r}")
# The 'teams' object only provides a URL to a deprecated endpoint,
# so manually construct the URL to the non-deprecated team membership
# endpoint.
membership_url = f"/teams/{team['id']}/memberships/{username}"
try:
await gh.getitem(membership_url)
except gidgethub.BadRequest as exc:
if exc.status_code == 404:
return False
raise
else:
return True
def is_news_dir(filename):
"Return True if file is in the News directory."
return filename.startswith(NEWS_NEXT_DIR)
def normalize_title(title, body):
"""Normalize the title if it spills over into the PR's body."""
if not (title.endswith("…") and body.startswith("…")):
return title
else:
# Being paranoid in case \r\n is used.
return title[:-1] + body[1:].partition("\r\n")[0]
async def get_pr_for_commit(gh, sha):
"""Find the PR containing the specific commit hash."""
prs_for_commit = await gh.getitem(
f"/search/issues?q=type:pr+repo:python/cpython+sha:{sha}"
)
if prs_for_commit["total_count"] > 0: # there should only be one
return prs_for_commit["items"][0]
return None
| 27.502959 | 83 | 0.6321 | import enum
import sys
import gidgethub
DEFAULT_BODY = ""
TAG_NAME = "gh-issue-number"
NEWS_NEXT_DIR = "Misc/NEWS.d/next/"
CLOSING_TAG = f"<!-- /{TAG_NAME} -->"
BODY = f"""\
{{body}}
<!-- {TAG_NAME}: gh-{{issue_number}} -->
* gh-{{issue_number}}
{CLOSING_TAG}
"""
@enum.unique
class StatusState(enum.Enum):
SUCCESS = "success"
ERROR = "error"
FAILURE = "failure"
def create_status(context, state, *, description=None, target_url=None):
"""Create the data for a status.
The argument order is such that you can use functools.partial() to set the
context to avoid repeatedly specifying it throughout a module.
"""
status = {
"context": context,
"state": state.value,
}
if description is not None:
status["description"] = description
if target_url is not None:
status["target_url"] = target_url
return status
async def post_status(gh, event, status):
"""Post a status in reaction to an event."""
await gh.post(event.data["pull_request"]["statuses_url"], data=status)
def skip_label(what):
"""Generate a "skip" label name."""
return f"skip {what}"
def labels(issue):
return {label_data["name"] for label_data in issue["labels"]}
def skip(what, issue):
"""See if an issue has a "skip {what}" label."""
return skip_label(what) in labels(issue)
def label_name(event_data):
"""Get the label name from a label-related webhook event."""
return event_data["label"]["name"]
def user_login(item):
return item["user"]["login"]
async def files_for_PR(gh, pull_request):
"""Get files for a pull request."""
# For some unknown reason there isn't any files URL in a pull request
# payload.
files_url = f'{pull_request["url"]}/files'
data = []
async for filedata in gh.getiter(files_url): # pragma: no branch
data.append(
{
"file_name": filedata["filename"],
"patch": filedata.get("patch", ""),
}
)
return data
async def issue_for_PR(gh, pull_request):
"""Get the issue data for a pull request."""
return await gh.getitem(pull_request["issue_url"])
async def patch_body(gh, pull_request, issue_number):
"""Updates the description of a PR with the gh issue number if it exists.
returns if body exists with issue_number
"""
if "body" not in pull_request or pull_request["body"] is None:
return await gh.patch(
pull_request["url"],
data=BODY.format(body=DEFAULT_BODY, issue_number=issue_number),
)
if f"GH-{issue_number}\n" not in pull_request["body"]:
return await gh.patch(
pull_request["url"],
data=BODY.format(body=pull_request["body"], issue_number=issue_number),
)
return
async def is_core_dev(gh, username):
"""Check if the user is a CPython core developer."""
org_teams = "/orgs/python/teams"
team_name = "python core"
async for team in gh.getiter(org_teams):
if team["name"].lower() == team_name: # pragma: no branch
break
else:
raise ValueError(f"{team_name!r} not found at {org_teams!r}")
# The 'teams' object only provides a URL to a deprecated endpoint,
# so manually construct the URL to the non-deprecated team membership
# endpoint.
membership_url = f"/teams/{team['id']}/memberships/{username}"
try:
await gh.getitem(membership_url)
except gidgethub.BadRequest as exc:
if exc.status_code == 404:
return False
raise
else:
return True
def is_news_dir(filename):
"Return True if file is in the News directory."
return filename.startswith(NEWS_NEXT_DIR)
def normalize_title(title, body):
"""Normalize the title if it spills over into the PR's body."""
if not (title.endswith("…") and body.startswith("…")):
return title
else:
# Being paranoid in case \r\n is used.
return title[:-1] + body[1:].partition("\r\n")[0]
def no_labels(event_data):
if "label" not in event_data:
print(
"no 'label' key in payload; "
"'unlabeled' event triggered by label deletion?",
file=sys.stderr,
)
return True
else:
return False
async def get_pr_for_commit(gh, sha):
"""Find the PR containing the specific commit hash."""
prs_for_commit = await gh.getitem(
f"/search/issues?q=type:pr+repo:python/cpython+sha:{sha}"
)
if prs_for_commit["total_count"] > 0: # there should only be one
return prs_for_commit["items"][0]
return None
| 344 | 76 | 91 |
88d146fb72f4520dbbb2e8d70ecdedfaf44b1bb6 | 4,218 | py | Python | dev_ws/src/everyday_studio/capture.py | MichaelContrerasR/Face-every-day-maker | 788b36e3b4a3830c6a1f45c3d92b9b35a0ff46e0 | [
"Apache-2.0"
] | 7 | 2021-06-13T10:33:24.000Z | 2021-08-22T12:46:33.000Z | dev_ws/src/everyday_studio/capture.py | MichaelContrerasR/Face-every-day-maker | 788b36e3b4a3830c6a1f45c3d92b9b35a0ff46e0 | [
"Apache-2.0"
] | null | null | null | dev_ws/src/everyday_studio/capture.py | MichaelContrerasR/Face-every-day-maker | 788b36e3b4a3830c6a1f45c3d92b9b35a0ff46e0 | [
"Apache-2.0"
] | 5 | 2021-06-13T04:31:03.000Z | 2021-07-19T22:03:23.000Z | """
Author: John Betacourt Gonzalez
Aka: @JohnBetaCode
"""
# =============================================================================
# LIBRARIES AND DEPENDENCIES - LIBRARIES AND DEPENDENCIES - LIBRARIES AND DEPEN
# =============================================================================
from utils import printlog, try_catch_log
import cv2
import numpy as np
import os
# =============================================================================
# CLASSES - CLASSES - CLASSES - CLASSES - CLASSES - CLASSES - CLASSES - CLASSE
# =============================================================================
# =============================================================================
# FUNCTIONS - FUNCTIONS - FUNCTIONS - FUNCTIONS - FUNCTIONS - FUNCTIONS - FUNC
# =============================================================================
# =============================================================================
# MAIN FUNCTION - MAIN FUNCTION - MAIN FUNCTION - MA[-IN FUNCTION - MAIN FUNCTION
# IMPLEMENTATION EXAMPLE - IMPLEMENTATION EXAMPLE - IMPLEMENTATION EXAMPLE - IM
# =============================================================================
| 34.292683 | 124 | 0.490991 | """
Author: John Betacourt Gonzalez
Aka: @JohnBetaCode
"""
# =============================================================================
# LIBRARIES AND DEPENDENCIES - LIBRARIES AND DEPENDENCIES - LIBRARIES AND DEPEN
# =============================================================================
from utils import printlog, try_catch_log
import cv2
import numpy as np
import os
# =============================================================================
# CLASSES - CLASSES - CLASSES - CLASSES - CLASSES - CLASSES - CLASSES - CLASSE
# =============================================================================
class VideoWriter:
def __init__(
self,
file_path: str,
file_name: str,
rate: int = 30,
) -> None:
"""!
Object class constructor for video captures
@param file_path 'str' File path to write video
@param file_name 'str' File name to save video
@param rate 'int' Rate to write video (default: 30)
@return None
"""
# ---------------------------------------------------------------------
# Video capture path variables
self.file_name = f"{file_name}.avi"
self.file_path = file_path
self.file_dir = os.path.join(self.file_path, self.file_name)
# Video capture object
self.video_fourcc = cv2.VideoWriter_fourcc("M", "J", "P", "G")
self.video_writer = None
# Video capture features
self.video_rate = rate
self.video_width = None
self.video_height = None
self.ready = True
def start(self, img: np.ndarray) -> None:
"""!
Initializes video capture object
@param img: 'cv2.math' image to be recorded in
video capture. Its features will define
the properties of the capture object.
@return None
"""
# Defines video writer properties
self.video_width = img.shape[1]
self.video_height = img.shape[0]
# Check that path exits
if not os.path.isdir(self.file_path):
printlog(
msg=f"path {self.file_path} created",
msg_type="WARN",
)
os.mkdir(self.file_path)
# Check that video file does not exits, if does, then remove it
elif os.path.isfile(os.path.join(self.file_path, self.file_name)):
os.remove(os.path.join(self.file_path, self.file_name))
printlog(
msg=f"previous video file {self.file_name} removed",
msg_type="WARN",
)
self.video_writer = cv2.VideoWriter(
self.file_dir,
self.video_fourcc,
self.video_rate,
(self.video_width, self.video_height),
)
printlog(
msg=f"{self.file_name} video file created, size:{self.video_width}X{self.video_height}, rate:{self.video_rate}",
msg_type="INFO",
)
@try_catch_log
def write(self, img: np.ndarray) -> None:
"""!
Writes the next image whenever the VideoWritter
is ready.
@param img 'cv2.math' image to record in
video capture
@return None
"""
# Starts the video writter object
if self.video_writer is None:
self.start(img=img)
# Writes the image into the video
self.video_writer.write(img)
@try_catch_log
def close(self) -> None:
"""!
Closes video capture object
@return None
"""
if self.video_writer is not None:
self.video_writer.release()
# =============================================================================
# FUNCTIONS - FUNCTIONS - FUNCTIONS - FUNCTIONS - FUNCTIONS - FUNCTIONS - FUNC
# =============================================================================
# =============================================================================
# MAIN FUNCTION - MAIN FUNCTION - MAIN FUNCTION - MA[-IN FUNCTION - MAIN FUNCTION
# IMPLEMENTATION EXAMPLE - IMPLEMENTATION EXAMPLE - IMPLEMENTATION EXAMPLE - IM
# =============================================================================
| 0 | 3,008 | 22 |
77bffa1098b8768211df90eae754fc4c4e7a4356 | 3,101 | py | Python | parsons/ngpvan/locations.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
] | 3 | 2019-09-05T16:57:15.000Z | 2019-10-01T19:56:58.000Z | parsons/ngpvan/locations.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
] | 22 | 2019-09-03T13:23:37.000Z | 2019-10-03T20:32:48.000Z | parsons/ngpvan/locations.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
] | 2 | 2019-09-01T18:30:10.000Z | 2019-10-03T20:07:46.000Z | """NGPVAN Locations Endpoints"""
from parsons.etl.table import Table
import logging
logger = logging.getLogger(__name__)
| 28.190909 | 88 | 0.530474 | """NGPVAN Locations Endpoints"""
from parsons.etl.table import Table
import logging
logger = logging.getLogger(__name__)
class Locations(object):
def __init__(self, van_connection):
self.connection = van_connection
def get_locations(self, name=None):
"""
Get locations.
`Args:`
name: str
Filter locations by name.
`Returns:`
Parsons Table
See :ref:`parsons-table` for output options.
"""
tbl = Table(self.connection.get_request('locations', params={'name': name}))
logger.info(f'Found {tbl.num_rows} locations.')
return self._unpack_loc(tbl)
def get_location(self, location_id):
"""
Get a location.
`Args:`
location_id: int
The location id.
`Returns:`
dict
"""
r = self.connection.get_request(f'locations/{location_id}')
logger.info(f'Found location {location_id}.')
return r
def create_location(self, name, address_line1=None, address_line2=None, city=None,
state=None, zip_code=None):
"""
Find or create a location. If location already exists, will return location id.
`Args:`
name: str
A name for this location, no longer than 50 characters.
address_line1: str
First line of a street address.
address_line2: str
Second line of a street address.
city: str
City or town name.
state: str
Two or three character state or province code (e.g., MN, ON, NSW, etc.).
zip_code: str
ZIP, ZIP+4, Postal Code, Post code, etc.
`Returns:`
int
A location id.
"""
location = {'name': name,
'address': {
'addressLine1': address_line1,
'addressLine2': address_line2,
'city': city,
'stateOrProvince': state,
'zipOrPostalCode': zip_code
}}
r = self.connection.post_request('locations/findOrCreate', json=location)
logger.info(f'Location {r} created.')
return r
def delete_location(self, location_id):
"""
Delete a location.
`Args:`
location_id: int
The location id
`Returns:`
``None``
"""
r = self.connection.delete_request(f'locations/{location_id}')
logger.info(f'Location {location_id} deleted.')
return r
def _unpack_loc(self, table):
# Internal method to unpack location json
if isinstance(table, tuple):
return table
if 'address' in table.columns:
table.unpack_dict('address', prepend=False)
if 'geoLocation' in table.columns:
table.unpack_dict('geoLocation', prepend=False)
return table
| 399 | 2,555 | 23 |
07f3903bad8e2886e3957bfe37cc6140d43c9015 | 14,018 | py | Python | kindlestrip.py | cxumol/kindlestrip | 05646993262cf7cb998ead2ca60867d20e14cbe3 | [
"Unlicense"
] | 3 | 2018-10-19T09:51:29.000Z | 2020-07-22T19:06:13.000Z | kindlestrip.py | cxumol/kindlestrip | 05646993262cf7cb998ead2ca60867d20e14cbe3 | [
"Unlicense"
] | null | null | null | kindlestrip.py | cxumol/kindlestrip | 05646993262cf7cb998ead2ca60867d20e14cbe3 | [
"Unlicense"
] | 1 | 2019-11-17T04:06:26.000Z | 2019-11-17T04:06:26.000Z | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
#
# This is a python script. You need a Python interpreter to run it.
# For example, ActiveState Python, which exists for windows.
#
# This script strips the penultimate record from a Mobipocket file.
# This is useful because the current KindleGen add a compressed copy
# of the source files used in this record, making the ebook produced
# about twice as big as it needs to be.
#
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
#
# Written by Paul Durrant, 2010-2011, paul@durrant.co.uk
#
# Changelog
# 1.00 - Initial version
# 1.10 - Added an option to output the stripped data
# 1.20 - Added check for source files section (thanks Piquan)
# 1.30 - Added Support for K8 style mobis
# 1.31 - To get K8 style mobis to work properly, need to replace SRCS section with section of 0 length
# 1.35a- Backport of fixes from 1.32-1.35 to 1.31 to workaround latest Kindlegen changes
__version__ = '1.36.1'
import codecs
import getopt
import locale
import os
import struct
import sys
iswindows = sys.platform.startswith('win')
# Because Windows (and Mac OS X) allows full unicode filenames and paths
# any paths in pure bytestring python 2.X code must be utf-8 encoded as they will need to
# be converted on the fly to full unicode for Windows platforms. Any other 8-bit str
# encoding would lose characters that can not be represented in that encoding
# these are simple support routines to allow use of utf-8 encoded bytestrings as paths in main program
# to be converted on the fly to full unicode as temporary un-named values to prevent
# the potential mixing of unicode and bytestring string values in the main program
# force string to be utf-8 encoded whether unicode or bytestring
# get sys.argv arguments and encode them into utf-8
# Python 2.X is broken in that it does not recognize CP65001 as UTF-8
# Almost all sane operating systems now default to utf-8 (or full unicode) as the
# proper default encoding so that all files and path names
# in any language can be properly represented.
if __name__ == '__main__':
cli_main()
| 36.792651 | 112 | 0.643458 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
#
# This is a python script. You need a Python interpreter to run it.
# For example, ActiveState Python, which exists for windows.
#
# This script strips the penultimate record from a Mobipocket file.
# This is useful because the current KindleGen add a compressed copy
# of the source files used in this record, making the ebook produced
# about twice as big as it needs to be.
#
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
#
# Written by Paul Durrant, 2010-2011, paul@durrant.co.uk
#
# Changelog
# 1.00 - Initial version
# 1.10 - Added an option to output the stripped data
# 1.20 - Added check for source files section (thanks Piquan)
# 1.30 - Added Support for K8 style mobis
# 1.31 - To get K8 style mobis to work properly, need to replace SRCS section with section of 0 length
# 1.35a- Backport of fixes from 1.32-1.35 to 1.31 to workaround latest Kindlegen changes
__version__ = '1.36.1'
import codecs
import getopt
import locale
import os
import struct
import sys
iswindows = sys.platform.startswith('win')
# Because Windows (and Mac OS X) allows full unicode filenames and paths
# any paths in pure bytestring python 2.X code must be utf-8 encoded as they will need to
# be converted on the fly to full unicode for Windows platforms. Any other 8-bit str
# encoding would lose characters that can not be represented in that encoding
# these are simple support routines to allow use of utf-8 encoded bytestrings as paths in main program
# to be converted on the fly to full unicode as temporary un-named values to prevent
# the potential mixing of unicode and bytestring string values in the main program
def pathof(s):
if isinstance(s, unicode):
print "Warning: pathof expects utf-8 encoded byestring: ", s
if iswindows:
return s
return s.encode('utf-8')
if iswindows:
return s.decode('utf-8')
return s
# force string to be utf-8 encoded whether unicode or bytestring
def utf8_str(p, enc='utf-8'):
if isinstance(p, unicode):
return p.encode('utf-8')
if enc != 'utf-8':
return p.decode(enc).encode('utf-8')
return p
# get sys.argv arguments and encode them into utf-8
def utf8_argv():
global iswindows
if iswindows:
# Versions 2.x of Python don't support Unicode in sys.argv on
# Windows, with the underlying Windows API instead replacing multi-byte
# characters with '?'. So use shell32.GetCommandLineArgvW to get sys.argv
# as a list of Unicode strings and encode them as utf-8
from ctypes import POINTER, byref, cdll, c_int, windll
from ctypes.wintypes import LPCWSTR, LPWSTR
GetCommandLineW = cdll.kernel32.GetCommandLineW
GetCommandLineW.argtypes = []
GetCommandLineW.restype = LPCWSTR
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)]
CommandLineToArgvW.restype = POINTER(LPWSTR)
cmd = GetCommandLineW()
argc = c_int(0)
argv = CommandLineToArgvW(cmd, byref(argc))
if argc.value > 0:
# Remove Python executable and commands if present
start = argc.value - len(sys.argv)
return [argv[i].encode('utf-8') for i in
xrange(start, argc.value)]
# this should never happen
return None
else:
argv = []
argvencoding = sys.stdin.encoding
if argvencoding == None:
argvencoding = sys.getfilesystemencoding()
if argvencoding == None:
argvencoding = 'utf-8'
for arg in sys.argv:
if type(arg) == unicode:
argv.append(arg.encode('utf-8'))
else:
argv.append(arg.decode(argvencoding).encode('utf-8'))
return argv
# Python 2.X is broken in that it does not recognize CP65001 as UTF-8
def add_cp65001_codec():
try:
codecs.lookup('cp65001')
except LookupError:
codecs.register(
lambda name: name == 'cp65001' and codecs.lookup('utf-8') or None)
return
# Almost all sane operating systems now default to utf-8 (or full unicode) as the
# proper default encoding so that all files and path names
# in any language can be properly represented.
def set_utf8_default_encoding():
if sys.getdefaultencoding() in ['utf-8', 'UTF-8','cp65001','CP65001']:
return
# Regenerate setdefaultencoding.
reload(sys)
sys.setdefaultencoding('utf-8')
for attr in dir(locale):
if attr[0:3] != 'LC_':
continue
aref = getattr(locale, attr)
try:
locale.setlocale(aref, '')
except locale.Error:
continue
try:
lang = locale.getlocale(aref)[0]
except (TypeError, ValueError):
continue
if lang:
try:
locale.setlocale(aref, (lang, 'UTF-8'))
except locale.Error:
os.environ[attr] = lang + '.UTF-8'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
return
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
class StripException(Exception):
pass
def patchdata(datain, off, new):
dout=[]
dout.append(datain[:off])
dout.append(new)
dout.append(datain[off+len(new):])
return ''.join(dout)
def joindata(datain, new):
dout=[]
dout.append(datain)
dout.append(new)
return ''.join(dout)
class SRCSStripper:
def sec_info(self, secnum):
start_offset, flgval = struct.unpack_from('>2L', self.datain, 78+(secnum*8))
if secnum == self.num_sections:
next_offset = len(self.datain)
else:
next_offset, nflgval = struct.unpack_from('>2L', self.datain, 78+((secnum+1)*8))
return start_offset, flgval, next_offset
def loadSection(self, secnum):
start_offset, tval, next_offset = self.sec_info(secnum)
return self.datain[start_offset: next_offset]
def __init__(self, datain):
if datain[0x3C:0x3C+8] != 'BOOKMOBI':
raise StripException("invalid file format")
self.datain = datain
self.num_sections, = struct.unpack('>H', datain[76:78])
# get mobiheader
mobiheader = self.loadSection(0)
# get SRCS section number and count
self.srcs_secnum, self.srcs_cnt = struct.unpack_from('>2L', mobiheader, 0xe0)
if self.srcs_secnum == 0xffffffff or self.srcs_cnt == 0:
raise StripException("File doesn't contain the sources section.")
print "SRCS section number is: ", self.srcs_secnum
print "SRCS section count is: ", self.srcs_cnt
# store away srcs sections in case the user wants them later
self.srcs_headers = []
self.srcs_data = []
for i in xrange(self.srcs_secnum, self.srcs_secnum + self.srcs_cnt):
data = self.loadSection(i)
self.srcs_headers.append(data[0:16])
self.srcs_data.append(data[16:])
# find its SRCS region starting offset and total length
self.srcs_offset, fval, temp2 = self.sec_info(self.srcs_secnum)
next = self.srcs_secnum + self.srcs_cnt
next_offset, temp1, temp2 = self.sec_info(next)
self.srcs_length = next_offset - self.srcs_offset
print "SRCS length is: 0x%x" % self.srcs_length
if self.datain[self.srcs_offset:self.srcs_offset+4] != 'SRCS':
raise StripException("SRCS section num does not point to SRCS.")
# first write out the number of sections
self.data_file = self.datain[:76]
self.data_file = joindata(self.data_file, struct.pack('>H',self.num_sections))
# we are going to make the SRCS section lengths all be 0
# offsets up to and including the first srcs record must not be changed
last_offset = -1
for i in xrange(self.srcs_secnum+1):
offset, flgval, temp = self.sec_info(i)
last_offset = offset
self.data_file = joindata(self.data_file, struct.pack('>L',offset) + struct.pack('>L',flgval))
# print "section: %d, offset %0x, flgval %0x" % (i, offset, flgval)
# for every additional record in SRCS records set start to last_offset (they are all zero length)
for i in xrange(self.srcs_secnum + 1, self.srcs_secnum + self.srcs_cnt):
temp1, flgval, temp2 = self.sec_info(i)
self.data_file = joindata(self.data_file, struct.pack('>L',last_offset) + struct.pack('>L',flgval))
# print "section: %d, offset %0x, flgval %0x" % (i, last_offset, flgval)
# for every record after the SRCS records we must start it earlier by an amount
# equal to the total length of all of the SRCS section
delta = 0 - self.srcs_length
for i in xrange(self.srcs_secnum + self.srcs_cnt , self.num_sections):
offset, flgval, temp = self.sec_info(i)
offset += delta
self.data_file = joindata(self.data_file, struct.pack('>L',offset) + struct.pack('>L',flgval))
# print "section: %d, offset %0x, flgval %0x" % (i, offset, flgval)
# now pad it out to begin right at the first offset
# typically this is 2 bytes of nulls
first_offset, flgval = struct.unpack_from('>2L', self.data_file, 78)
self.data_file = joindata(self.data_file, '\0' * (first_offset - len(self.data_file)))
# now add on every thing up to the original src_offset and then everything after it
dout = []
dout.append(self.data_file)
dout.append(self.datain[first_offset: self.srcs_offset])
dout.append(self.datain[self.srcs_offset+self.srcs_length:])
self.data_file = "".join(dout)
# update the srcs_secnum and srcs_cnt in the new mobiheader
offset0, flgval0 = struct.unpack_from('>2L', self.data_file, 78)
offset1, flgval1 = struct.unpack_from('>2L', self.data_file, 86)
mobiheader = self.data_file[offset0:offset1]
mobiheader = mobiheader[:0xe0]+ struct.pack('>L', 0xffffffff) + struct.pack('>L', 0) + mobiheader[0xe8:]
self.data_file = patchdata(self.data_file, offset0, mobiheader)
print "done"
def getResult(self):
return self.data_file
def getStrippedData(self):
return self.srcs_data
def getHeader(self):
return self.srcs_headers
def usage(progname):
print ('KindleStrip v%(__version__)s. '
'Written 2010-2012 by Paul Durrant and Kevin Hendricks.' % globals())
print "Strips the Sources record from Mobipocket ebooks"
print "For ebooks generated using KindleGen 1.1 and later that add the source"
print "Usage:"
print " %s [Options] <infile> <outfile>" % progname
print "Options: "
print " -h print this help message "
print " -d dump stripped SRCS records to the current working directory "
def main(argv=utf8_argv()):
progname = os.path.basename(argv[0])
DUMPSRCS = False
try:
opts, args = getopt.getopt(argv[1:], "hd")
except getopt.GetoptError, err:
print str(err)
usage(progname)
return 2
if len(args) != 2:
usage(progname)
return 1
for o, a in opts:
if o == "-h":
usage(progname)
sys.exit(0)
if o == "-d":
DUMPSRCS = True
infile = args[0]
outfile = args[1]
try:
data_file = file(pathof(infile), 'rb').read()
strippedFile = SRCSStripper(data_file)
file(pathof(outfile), 'wb').write(strippedFile.getResult())
if DUMPSRCS:
headers = strippedFile.getHeader()
secdatas = strippedFile.getStrippedData()
for i in xrange(0, len(headers)):
hdr = headers[i]
secdata = secdatas[i]
if hdr[0:4] == "SRCS":
fname = "kindlestrip_source_archive.zip"
elif hdr[0:4] == "CMET":
fname = "kindlestrip_build_log.txt"
else:
fname = "kindlestrip_unknown%05d.dat" % i
print "Stripped Record Type: ", hdr[0:4], " file: ", fname
fname = "./" + fname
file(pathof(fname), 'wb').write(secdata)
except StripException, e:
print "Error: %s" % e
return 1
return 0
def cli_main():
add_cp65001_codec()
set_utf8_default_encoding()
sys.exit(main())
if __name__ == '__main__':
cli_main()
| 10,127 | 14 | 536 |
246d46ae6c505df31b87fe4cb7e01b2fa96c9f97 | 1,484 | py | Python | tools/icon_map_generator.py | pjeanjean/dakara-player | 0251f42ab86a3ae8fdfc2bb61d156527807dedf2 | [
"MIT"
] | null | null | null | tools/icon_map_generator.py | pjeanjean/dakara-player | 0251f42ab86a3ae8fdfc2bb61d156527807dedf2 | [
"MIT"
] | null | null | null | tools/icon_map_generator.py | pjeanjean/dakara-player | 0251f42ab86a3ae8fdfc2bb61d156527807dedf2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import re
import os
import json
from argparse import ArgumentParser
CSS_ICON_NAME_PARSER = r"""\.fa-([^:]*?):(?=[^}]*?content:\s*['"](.*?)['"])"""
def generate(css_file, json_file):
"""Generate a file that contains code for character names
"""
# check css_file exists
if not os.path.isfile(css_file):
raise FileNotFoundError("File '{}' not found".format(css_file))
# load css file
with open(css_file, "r") as file:
css_content = file.read()
# parse css file
css_matcher = re.findall(CSS_ICON_NAME_PARSER, css_content, re.S)
# convert icons
icon_dict = {}
for name, code in css_matcher:
if code.startswith("\\"):
code_hex = "0x" + code[1:]
else:
code_hex = hex(ord(code))
icon_dict[name] = code_hex
# write json file
with open(json_file, "w") as file:
file.write(json.encode(icon_dict))
def get_arg_parser():
"""Create the parser
"""
parser = ArgumentParser("Icon map generator")
parser.add_argument(
"css_file", help="File with CSS rules mapping icons name and character."
)
parser.add_argument(
"json_file", help="Output file with hexadecimal code for icons."
)
return parser
if __name__ == "__main__":
parser = get_arg_parser()
args = parser.parse_args()
generate(args.css_file, args.json_file)
print("JSON file saved in '{}'".format(args.json_file))
| 22.830769 | 80 | 0.623989 | #!/usr/bin/env python3
import re
import os
import json
from argparse import ArgumentParser
CSS_ICON_NAME_PARSER = r"""\.fa-([^:]*?):(?=[^}]*?content:\s*['"](.*?)['"])"""
def generate(css_file, json_file):
"""Generate a file that contains code for character names
"""
# check css_file exists
if not os.path.isfile(css_file):
raise FileNotFoundError("File '{}' not found".format(css_file))
# load css file
with open(css_file, "r") as file:
css_content = file.read()
# parse css file
css_matcher = re.findall(CSS_ICON_NAME_PARSER, css_content, re.S)
# convert icons
icon_dict = {}
for name, code in css_matcher:
if code.startswith("\\"):
code_hex = "0x" + code[1:]
else:
code_hex = hex(ord(code))
icon_dict[name] = code_hex
# write json file
with open(json_file, "w") as file:
file.write(json.encode(icon_dict))
def get_arg_parser():
"""Create the parser
"""
parser = ArgumentParser("Icon map generator")
parser.add_argument(
"css_file", help="File with CSS rules mapping icons name and character."
)
parser.add_argument(
"json_file", help="Output file with hexadecimal code for icons."
)
return parser
if __name__ == "__main__":
parser = get_arg_parser()
args = parser.parse_args()
generate(args.css_file, args.json_file)
print("JSON file saved in '{}'".format(args.json_file))
| 0 | 0 | 0 |
fccb8d9930d2562fe22c0a8e111619c543d501f6 | 11,310 | py | Python | workunit.py | ringThePeople/LCA_web_service | 4addd167d63e5d60816939939c5fb7d16d2cfbd7 | [
"MIT"
] | null | null | null | workunit.py | ringThePeople/LCA_web_service | 4addd167d63e5d60816939939c5fb7d16d2cfbd7 | [
"MIT"
] | null | null | null | workunit.py | ringThePeople/LCA_web_service | 4addd167d63e5d60816939939c5fb7d16d2cfbd7 | [
"MIT"
] | null | null | null | import json
from numpy import *
import csv
import berexapi as bex
from pandas import Series, DataFrame
import pandas as pd
import tools.color as toolcolor
import tools.arrange as toolarrange
import numpy as np
import itertools
import tools.repack as toolrepack
import tools.genie3 as toolgenie3
ERROR_CODE = ["", 1]
if __name__ == '__main__':
print "SOMETHING HERE"
| 31.859155 | 204 | 0.660566 | import json
from numpy import *
import csv
import berexapi as bex
from pandas import Series, DataFrame
import pandas as pd
import tools.color as toolcolor
import tools.arrange as toolarrange
import numpy as np
import itertools
import tools.repack as toolrepack
import tools.genie3 as toolgenie3
ERROR_CODE = ["", 1]
def is_valid_form(_data):
return 0
def timeseriesanalysis(_tsdata):
# expand with analysis tool
# import other .py file
return _tsdata
def tsv2json_n2(_tsvdata, _bexdata, _validpairs, over_list, under_list, bex_all, all_vp, position_list, graph_count, totalwidth, totalheight):
_line = _tsvdata.split('\n')
graphdict = {'nodes':[], 'edges':[]}
for eachline in _line[1:]:
if(len(eachline)==0):
continue
e1, e2 = eachline.strip().split('\t')
if((e1,e2) in bex_all):
bex_all.remove((e1,e2))
# handling edge style if in berex
if((e1,e2) in _bexdata):
for v_ps in _validpairs:
if v_ps['source'] == e1 and v_ps['target'] == e2:
graphdict['edges'].append({'data':{'source': e1, 'target': e2, 'interaction':v_ps['interaction'], 'dbsource':v_ps['dbsource']}, 'style':{'line-color': '#c0c0c0', 'width': 6, 'line-style': 'solid'}})
print "e1", e1, "e2", e2
break
continue
graphdict['edges'].append({'data':{'source': e1, 'target': e2}, 'style':{'line-style': 'dashed'} })
x_list = []
y_list = []
for eachnode in _line[0].strip().split(';'):
(x,y) = position_list[eachnode]
x_list.append(x)
y_list.append(y)
x_length = max(x_list)
y_length = max(y_list)
# for eachnode in _line[0].strip().split(';'):
# (x,y) = position_list[eachnode]
# xp = x*100.0/x_length
# yp = y*100.0/y_length
# if(eachnode in over_list):
# graphdict['nodes'].append({'data':{'id':eachnode, 'col':'#c7030a', 'x':xp, 'y':yp}})
# continue
# if(eachnode in under_list):
# graphdict['nodes'].append({'data':{'id':eachnode, 'col':'#69bc39', 'x':xp, 'y':yp}})
# continue
# graphdict['nodes'].append({'data':{'id':eachnode, 'col':'#ffff00', 'x':xp, 'y':yp}})
wm = 0.22*totalwidth
hm = 0.64 * totalheight
for eachnode in _line[0].strip().split(';'):
(x,y) = position_list[eachnode]
print "y_length", y_length, "y", y
y = y_length - y + 2
x = x*wm/x_length
y = y*hm/y_length
if graph_count == 1:
x = x * 2
# print "position",eachnode,"x",x,"y",y
# print "over_list", over_list
# print "under_list", under_list
# print "each_node", eachnode
if(eachnode in over_list):
graphdict['nodes'].append({'data':{'id':eachnode, 'col':'#c7030a', 'x':x, 'y':y}})
continue
if(eachnode in under_list):
graphdict['nodes'].append({'data':{'id':eachnode, 'col':'#69bc39', 'x':x, 'y':y}})
continue
graphdict['nodes'].append({'data':{'id':eachnode, 'col':'#ffff00', 'x':x, 'y':y}})
return json.dumps(graphdict)
def run(file_io_list, _options):
if not file_io_list:
return ERROR_CODE
if is_valid_form(file_io_list):
return ERROR_CODE
uw,uh = _options['size']
totalwidth = float(uw)
totalheight = float(uh)
TS_arrays =[]
for file_io in file_io_list:
TS_df = pd.read_csv(file_io,sep="\t")
gene_names = TS_df.columns.tolist()
TS_array = TS_df.as_matrix()
TS_arrays.append(TS_array)
TS_data = np.asarray(TS_arrays)
table_len = int(TS_data[0].shape[0])
print table_len
period_info = _options['period']
ntree = int(period_info['ntree'])
threshold = float(period_info['threshold'])
graph_count = 1
#temp
normalized_df = pd.DataFrame(np.copy(TS_data[0]),columns=gene_names)
#normalized_df=df.copy(deep=True)
for gene in gene_names:
normalized_df[gene] = (normalized_df[gene]-normalized_df[gene].mean())/(normalized_df[gene].std())
thr = 0.05
over_exp_genes , under_exp_genes = toolcolor.get_over_under_exp_genes(normalized_df, thr=thr)
print "over_ list",over_exp_genes
print "under_ list", under_exp_genes
sp = 1
i = 0
_jsons = []
_2col_list = []
v_p_list = []
bex_to_edgelist_list = []
over_exp_genes_list = []
under_exp_genes_list = []
#berex request
all_edge_pos_tuple_list = list(itertools.permutations(gene_names,2))
all_edge_pos = [{'source':edge_pos[0],'target':edge_pos[1]} for edge_pos in all_edge_pos_tuple_list]
all_vp = bex.get_berexedges(all_edge_pos)
bex_all = bex.berexresult_to_edgelist(all_vp)
if period_info['type'] == 'at_once':
graph_count = 1
_adj = toolgenie3.run_GENIE3(TS_data, gene_names=gene_names, ntrees=ntree, threshold=threshold)
tcol = toolrepack.adj2twocol(_adj)
tcol_rows = [row for row in tcol.split('\n')]
#Inferece edges
list_nodes = []
for ii in range(1, len(tcol_rows)):
if(len(tcol_rows[ii])==0):
continue
source_target_dic = {}
temp_row = tcol_rows[ii].split('\t')
source_target_dic['source'] = temp_row[0]
source_target_dic['target'] = temp_row[1]
list_nodes.append(source_target_dic)
v_p = bex.get_berexedges(list_nodes)
bex_to_edgelist = bex.berexresult_to_edgelist(v_p)
#calculate position value
_line = tcol.split('\n')
all_node = []
for eachnode in _line[0].strip().split(';'):
all_node.append(eachnode)
adj = pd.DataFrame(0,index=all_node,columns=all_node)
for eachline in _line[1:]:
#e1 is source, e2 is target
if(len(eachline)==0):
continue
e1, e2 = eachline.strip().split('\t')
adj[e2][e1] = 1
adj_list = []
adj_list.append(adj)
position_list = toolarrange.arrange_node_position(adj_list)
_jsons.append(tsv2json_n2(tcol, bex_to_edgelist, v_p, over_exp_genes, under_exp_genes, bex_all, all_vp, position_list, graph_count, totalwidth, totalheight))
elif period_info['type'] == 'available':
period_val = int(period_info['value'])
adj_list = []
# graphs are more than one.
if (sp+(2 * period_val)-1 < table_len):
graph_count = 2
print "period_val :", period_val
while (sp+((i+1) * period_val)-1 <= table_len) :
print "interation : " , i+1
start_t = sp+ (i *period_val)
end_t = sp+((i+1) * period_val)-1
# df_2=df.copy(deep=True)
df = pd.DataFrame(np.copy(TS_data[0]),columns=gene_names)
normalized_df=df.iloc[start_t:(end_t+1),:]
for gene in gene_names:
normalized_df[gene] = (normalized_df[gene]-normalized_df[gene].mean())/(normalized_df[gene].std())
thr = 0.05
over_exp, under_exp = toolcolor.get_over_under_exp_genes(normalized_df, thr=thr)
over_exp_genes_list.append(over_exp)
under_exp_genes_list.append(under_exp)
_adj = toolgenie3.run_GENIE3(TS_data, gene_names=gene_names,start_point = start_t ,end_point = end_t, ntrees=ntree, threshold=threshold)
_2col_list.append(toolrepack.adj2twocol(_adj))
if _2col_list[i][0:4] == 'None':
i = i+1
continue
try:
tcol = [row for row in _2col_list[i].split('\n')]
list_nodes = []
for ii in range(1, len(tcol)):
if(len(tcol[ii])==0):
continue
source_target_dic = {}
temp_row = tcol[ii].split('\t')
source_target_dic['source'] = temp_row[0]
source_target_dic['target'] = temp_row[1]
list_nodes.append(source_target_dic)
v_p_list.append(bex.get_berexedges(list_nodes))
bex_to_edgelist_list.append(bex.berexresult_to_edgelist(v_p_list[i]))
except:
i = i + 1
continue
#calculate position value
_line = _2col_list[i].split('\n')
all_node = []
for eachnode in _line[0].strip().split(';'):
all_node.append(eachnode)
adj = pd.DataFrame(0,index=all_node,columns=all_node)
for eachline in _line[1:]:
if(len(eachline)==0):
continue
#e1 is source, e2 is target
e1, e2 = eachline.strip().split('\t')
adj[e2][e1] = 1
adj_list.append(adj)
i = i + 1 #
position_list = toolarrange.arrange_node_position(adj_list)
for i in range(0, len(_2col_list)):
if _2col_list[i] is None:
continue
_jsons.append(tsv2json_n2(_2col_list[i], bex_to_edgelist_list[i], v_p_list[i], over_exp_genes_list[i], under_exp_genes_list[i], bex_all, all_vp, position_list, graph_count, totalwidth, totalheight))
#end - type : 'available'
elif period_info['type'] == 'selective':
period_string_list = (period_info['selective_form']).split(';')
start_point_list = (period_string_list[0].split(','))[0:-1]
# each period -> each end point
each_period_list = (period_string_list[1].split(','))[0:-1]
if(len(start_point_list) == 0):
print "Selective Period input is Empty"
#graphs are more than one
if(len(start_point)>1):
graph_count = 2
adj_list = []
for i in range(0, len(start_point_list)):
if(len(start_point_list[i]) == 0):
print i+1, "th period's Start Point is Empty"
continue
start_point = int(start_point_list[i])
try:
each_period = int(each_period_list[i])
except:
print i+1, "th wrong period"
continue
# this method : each period operation
# end_point = start_point + each_period - 1
# change selective system
end_point = each_period
if(end_point <= table_len):
print "iteration : ", i+1
df = pd.DataFrame(np.copy(TS_data[0]),columns=gene_names)
normalized_df=df.iloc[start_point:(end_point+1),:]
for gene in gene_names:
normalized_df[gene] = (normalized_df[gene]-normalized_df[gene].mean())/(normalized_df[gene].std())
thr = 0.05
over_exp , under_exp = toolcolor.get_over_under_exp_genes(normalized_df, thr=thr)
over_exp_genes_list.append(over_exp)
under_exp_genes_list.append(under_exp)
_adj = toolgenie3.run_GENIE3(TS_data, gene_names=gene_names,start_point = start_point ,end_point = end_point, ntrees=ntree, threshold=threshold)
_2col_list.append(toolrepack.adj2twocol(_adj))
if _2col_list[i] is None:
continue
try:
tcol = [row for row in _2col_list[i].split('\n')]
list_nodes = []
for ii in range(1, len(tcol)):
if(len(tcol[ii])==0):
continue
source_target_dic = {}
temp_row = tcol[ii].split('\t')
source_target_dic['source'] = temp_row[0]
source_target_dic['target'] = temp_row[1]
list_nodes.append(source_target_dic)
v_p_list.append(bex.get_berexedges(list_nodes))
bex_to_edgelist_list.append(bex.berexresult_to_edgelist(v_p_list[i]))
except:
continue
_line = _2col_list[i].split('\n')
all_node = []
for eachnode in _line[0].strip().split(';'):
all_node.append(eachnode)
adj = pd.DataFrame(0,index=all_node,columns=all_node)
for eachline in _line[1:]:
if(len(eachline)==0):
continue
#e1 is source, e2 is target
e1, e2 = eachline.strip().split('\t')
adj[e2][e1] = 1
adj_list.append(adj)
position_list = toolarrange.arrange_node_position(adj_list)
for i in range(0, len(_2col_list)):
if _2col_list[i] is None:
continue
try:
_jsons.append(tsv2json_n2(_2col_list[i], bex_to_edgelist_list[i], v_p_list[i], over_exp_genes_list[i], under_exp_genes_list[i], bex_all, all_vp, position_list, graph_count, totalwidth, totalheight))
except:
print "error detection"
continue
return [_jsons, 0]
if __name__ == '__main__':
print "SOMETHING HERE"
| 10,820 | 0 | 100 |
6a568a60623456f59bbd5683df020e05177d6d7f | 26 | py | Python | dbipyt/__init__.py | paixlukee/dbi-python | 4a00b2bba7f5f875cd23ec07ce061c539e9655c0 | [
"MIT"
] | 2 | 2018-10-27T17:47:59.000Z | 2018-10-27T20:36:13.000Z | dbipyt/__init__.py | paixlukee/dbi-python | 4a00b2bba7f5f875cd23ec07ce061c539e9655c0 | [
"MIT"
] | null | null | null | dbipyt/__init__.py | paixlukee/dbi-python | 4a00b2bba7f5f875cd23ec07ce061c539e9655c0 | [
"MIT"
] | null | null | null | from dbipyt import dbipyt
| 13 | 25 | 0.846154 | from dbipyt import dbipyt
| 0 | 0 | 0 |
2496d5bf833996999c495500f73b3012f376f5fa | 13,697 | py | Python | asdfghjkl/matrices.py | rioyokotalab/asdfghjkl | f435c1e2527162fb07512b4ce5058460aab238b9 | [
"MIT"
] | null | null | null | asdfghjkl/matrices.py | rioyokotalab/asdfghjkl | f435c1e2527162fb07512b4ce5058460aab238b9 | [
"MIT"
] | null | null | null | asdfghjkl/matrices.py | rioyokotalab/asdfghjkl | f435c1e2527162fb07512b4ce5058460aab238b9 | [
"MIT"
] | null | null | null | import os
import copy
import torch
import torch.distributed as dist
from .symmatrix import SymMatrix
HESSIAN = 'hessian' # Hessian
FISHER_EXACT = 'fisher_exact' # exact Fisher
FISHER_MC = 'fisher_mc' # Fisher estimation by Monte-Carlo sampling
COV = 'cov' # no-centered covariance a.k.a. empirical Fisher
SHAPE_FULL = 'full' # full
SHAPE_BLOCK_DIAG = 'block_diag' # layer-wise block-diagonal
SHAPE_KRON = 'kron' # Kronecker-factored
SHAPE_DIAG = 'diag' # diagonal
__all__ = [
'MatrixManager',
'FISHER_EXACT',
'FISHER_MC',
'COV',
'HESSIAN',
'SHAPE_FULL',
'SHAPE_BLOCK_DIAG',
'SHAPE_KRON',
'SHAPE_DIAG'
]
_supported_types = [HESSIAN, FISHER_EXACT, FISHER_MC, COV]
_supported_shapes = [SHAPE_FULL, SHAPE_BLOCK_DIAG, SHAPE_KRON, SHAPE_DIAG]
_normalizations = (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d)
| 36.23545 | 132 | 0.571658 | import os
import copy
import torch
import torch.distributed as dist
from .symmatrix import SymMatrix
HESSIAN = 'hessian' # Hessian
FISHER_EXACT = 'fisher_exact' # exact Fisher
FISHER_MC = 'fisher_mc' # Fisher estimation by Monte-Carlo sampling
COV = 'cov' # no-centered covariance a.k.a. empirical Fisher
SHAPE_FULL = 'full' # full
SHAPE_BLOCK_DIAG = 'block_diag' # layer-wise block-diagonal
SHAPE_KRON = 'kron' # Kronecker-factored
SHAPE_DIAG = 'diag' # diagonal
__all__ = [
'MatrixManager',
'FISHER_EXACT',
'FISHER_MC',
'COV',
'HESSIAN',
'SHAPE_FULL',
'SHAPE_BLOCK_DIAG',
'SHAPE_KRON',
'SHAPE_DIAG'
]
_supported_types = [HESSIAN, FISHER_EXACT, FISHER_MC, COV]
_supported_shapes = [SHAPE_FULL, SHAPE_BLOCK_DIAG, SHAPE_KRON, SHAPE_DIAG]
_normalizations = (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d)
def _requires_matrix(module: torch.nn.Module):
if not hasattr(module, 'weight'):
return False
if module.weight.requires_grad:
return True
return hasattr(module, 'bias') and module.bias.requires_grad
class MatrixManager:
def __init__(self, model, matrix_types, scale=1., smoothing_weight=None):
self._model = model
self._device = next(model.parameters()).device
if isinstance(matrix_types, str):
matrix_types = [matrix_types]
for mat_type in matrix_types:
assert mat_type in _supported_types, f'Invalid matrix_type: {mat_type}. matrix_type must be in {_supported_types}.'
# remove duplicates
self._matrix_types = set(matrix_types)
# for updating stats
self._scale = scale
self._smoothing_weight = smoothing_weight
self._stats_names = set()
@staticmethod
def _get_save_field(matrix_type, stats_name=None):
if stats_name is None:
return matrix_type
return f'{stats_name}_{matrix_type}'
def _clear_stats(self, stats_name):
if stats_name in self._stats_names:
self._stats_names.remove(stats_name)
def _check_stats_name(self, stats_name):
if stats_name is None:
return
assert stats_name in self._stats_names, f'stats {stats_name} does not exist.'
def accumulate_matrices(
self, stats_name, scale=None, smoothing_weight=None
):
"""
Accumulate the latest fisher values to acc_fisher.
module.{fisher_type} = fisher
module.{stats_name}_{fisher_type} = acc_fisher
"""
self._stats_names.add(stats_name)
if scale is None:
scale = self._scale
if smoothing_weight is None:
smoothing_weight = self._smoothing_weight
for module in self._model.modules():
for mat_type in self._matrix_types:
matrix = getattr(module, mat_type, None)
if matrix is None:
continue
matrix.scaling(scale)
stats_attr = self._get_save_field(mat_type, stats_name)
stats = getattr(module, stats_attr, None)
if stats is None:
setattr(module, stats_attr, copy.deepcopy(matrix))
continue
if smoothing_weight:
w = smoothing_weight
stats_ema = stats.scaling(1 - w) + matrix.scaling(w)
setattr(module, stats_attr, stats_ema)
else:
stats = stats + matrix
setattr(module, stats_attr, stats)
def save_matrices(self, root, relative_dir='', stats_name=None):
"""
Save fisher for each fisher_type and for each module.
module.{stats_name}_{fisher_type} = fisher
"""
self._check_stats_name(stats_name)
# save all fisher and collect relative_paths
relative_paths = {}
for mat_type in self._matrix_types:
relative_paths[mat_type] = {}
for mname, module in self._model.named_modules():
stats_attr = self._get_save_field(mat_type, stats_name)
stats = getattr(module, stats_attr, None)
# if module does not have computed matrices, skip
if stats is None:
continue
_relative_dir = os.path.join(relative_dir, mat_type, mname)
rst = stats.save(root, _relative_dir)
if module is self._model:
relative_paths[mat_type].update(rst)
else:
relative_paths[mat_type][mname] = rst
return relative_paths
def load_matrices(self, root, relative_paths, matrix_shapes):
for mat_shape in matrix_shapes:
assert mat_shape in _supported_shapes, f'Invalid matrix_shape: {mat_shape}. matrix_shape must be in {_supported_shapes}'
def root_join(path_or_dict):
if isinstance(path_or_dict, dict):
rst = {}
for k, v in path_or_dict.items():
rst[k] = root_join(v)
return rst
else:
return os.path.join(root, path_or_dict)
paths = root_join(relative_paths)
for mat_type in self._matrix_types:
mat_paths = paths.get(mat_type, None)
if mat_paths is None:
raise ValueError(f'matrix type {mat_type} does not exist.')
def _load_path(mat_shape, load_key, path_key, module_name=None):
try:
if module_name:
kwargs = {load_key: mat_paths[module_name][path_key]}
else:
kwargs = {load_key: mat_paths[path_key]}
matrix.load(**kwargs)
except (KeyError, FileNotFoundError):
if module_name:
raise ValueError(
f'{mat_type}.{mat_shape} for module {module_name} does not exist.'
)
else:
raise ValueError(
f'{mat_type}.{mat_shape} does not exist.'
)
# load layer-wise matrices
for mname, module in self._model.named_modules():
if module is self._model:
continue
if not _requires_matrix(module):
continue
matrix = SymMatrix(device=self._device)
if SHAPE_BLOCK_DIAG in matrix_shapes:
_load_path(SHAPE_BLOCK_DIAG, 'path', 'tril', mname)
if SHAPE_KRON in matrix_shapes:
if isinstance(module, _normalizations):
_load_path(
'unit_wise', 'unit_path', 'unit_wise', mname
)
else:
_load_path(SHAPE_KRON, 'kron_path', 'kron', mname)
if SHAPE_DIAG in matrix_shapes:
_load_path(SHAPE_DIAG, 'diag_path', 'diag', mname)
setattr(module, mat_type, matrix)
# full matrix
if SHAPE_FULL in matrix_shapes:
matrix = SymMatrix(device=self._device)
_load_path(SHAPE_FULL, 'path', 'tril')
setattr(self._model, mat_type, matrix)
def matrices_exist(self, root, relative_paths, matrix_shapes):
try:
self.load_matrices(root, relative_paths, matrix_shapes)
return True
except ValueError:
return False
def clear_matrices(self, stats_name):
"""
Clear fisher for each fisher_type and for each module.
module.{stats_name}_{fisher_type} = fisher
"""
self._check_stats_name(stats_name)
# save all fisher and collect relative_paths
for mat_type in self._matrix_types:
stats_attr = self._get_save_field(mat_type, stats_name)
for module in self._model.modules():
if hasattr(module, stats_attr):
delattr(module, stats_attr)
self._clear_stats(stats_name)
def matrices_to_vector(self, stats_name):
"""
Flatten all fisher values.
module.{stats_name}_{fisher_type} = fisher
fisher = {
'diag': {'weight': torch.Tensor, 'bias': torch.Tensor},
'kron': {'A': torch.Tensor, 'B': torch.Tensor},
'block_diag': {'F': torch.Tensor},
}
"""
self._check_stats_name(stats_name)
vec = []
for mat_type in self._matrix_types:
stats_attr = self._get_save_field(mat_type, stats_name)
for module in self._model.modules():
stats = getattr(module, stats_attr, None)
if stats is None:
continue
vec.extend(stats.to_vector())
vec = [v.flatten() for v in vec]
return torch.cat(vec)
def vector_to_matrices(self, vec, stats_name):
"""
Unflatten vector like fisher.
module.{stats_name}_{fisher_type} = fisher
fisher = {
'diag': {'weight': torch.Tensor, 'bias': torch.Tensor},
'kron': {'A': torch.Tensor, 'B': torch.Tensor},
'block_diag': {'F': torch.Tensor},
}
"""
self._check_stats_name(stats_name)
pointer = 0
for mat_type in self._matrix_types:
stats_attr = self._get_save_field(mat_type, stats_name)
for module in self._model.modules():
stats = getattr(module, stats_attr, None)
if stats is None:
continue
pointer = stats.to_matrices(vec, pointer)
assert pointer == torch.numel(vec)
def reduce_matrices(
self, stats_name=None, is_master=True, all_reduce=False
):
# pack
packed_tensor = self.matrices_to_vector(stats_name)
# reduce
if all_reduce:
dist.all_reduce(packed_tensor)
else:
dist.reduce(packed_tensor, dst=0)
# unpack
if is_master or all_reduce:
self.vector_to_matrices(
packed_tensor.div_(dist.get_world_size()), stats_name
)
dist.barrier()
def _collect_metrics(
self,
matrix_type,
matrix_shape,
stats_name,
metrics_fn,
reduce_fn,
init
):
stats_attr = self._get_save_field(matrix_type, stats_name)
if matrix_shape == SHAPE_FULL:
matrix = getattr(self._model, stats_attr, None)
assert matrix is not None and matrix.has_data, f'{matrix_type}.{matrix_shape} does not exist.'
return getattr(matrix, metrics_fn)()
rst = init
for mname, module in self._model.named_modules():
if module is self._model:
continue
if not _requires_matrix(module):
continue
matrix = getattr(module, stats_attr, None)
assert matrix is not None, f'{matrix_type} for {mname} does not exist.'
if matrix_shape == SHAPE_BLOCK_DIAG:
assert matrix.has_data, f'{matrix_type}.{matrix_shape} for {mname} does not exist.'
rst = reduce_fn(rst, getattr(matrix, metrics_fn)())
elif matrix_shape == SHAPE_KRON:
if isinstance(module, _normalizations):
assert matrix.has_unit, f'{matrix_type}.unit_wise for {mname} does not exist.'
rst = reduce_fn(rst, getattr(matrix.unit, metrics_fn)())
else:
assert matrix.has_kron, f'{matrix_type}.{matrix_shape} for {mname} does not exist.'
rst = reduce_fn(rst, getattr(matrix.kron, metrics_fn)())
elif matrix_shape == SHAPE_DIAG:
assert matrix.has_diag, f'{matrix_type}.{matrix_shape} for {mname} does not exist.'
rst = reduce_fn(rst, getattr(matrix.diag, metrics_fn)())
else:
raise ValueError(f'Invalid matrix_shape: {matrix_shape}.')
return rst
def get_eigenvalues(self, matrix_type, matrix_shape, stats_name=None):
def reduce(val1, val2):
val1.append(val2)
return val1
rst = self._collect_metrics(
matrix_type,
matrix_shape,
stats_name,
metrics_fn='eigenvalues',
reduce_fn=reduce,
init=[]
)
if not isinstance(rst, torch.Tensor):
rst = torch.sort(torch.cat(rst), descending=True)[0]
return rst
def get_top_eigenvalue(self, matrix_type, matrix_shape, stats_name=None):
def reduce(val1, val2):
val1 = max(val1, val2)
return val1
rst = self._collect_metrics(
matrix_type,
matrix_shape,
stats_name,
metrics_fn='top_eigenvalue',
reduce_fn=reduce,
init=-1
)
return rst
def get_trace(self, matrix_type, matrix_shape, stats_name=None):
def reduce(val1, val2):
val1 += val2
return val1
rst = self._collect_metrics(
matrix_type,
matrix_shape,
stats_name,
metrics_fn='trace',
reduce_fn=reduce,
init=0
)
return rst
def get_effective_dim(
self, matrix_type, matrix_shape, reg, stats_name=None
):
eigs = self.get_eigenvalues(
matrix_type, matrix_shape, stats_name=stats_name
)
return torch.sum(eigs / (eigs + reg))
| 7,856 | 4,942 | 46 |
7c4a5c208693dc301133a460806a91704d8e72d2 | 148 | py | Python | chuck-pyth/code/copytildone.py | KaiserPhemi/python-101 | 703facf41f1b47c486e5044696ce87b6d28a9ab3 | [
"MIT"
] | 41 | 2015-02-27T22:13:41.000Z | 2021-11-14T15:37:29.000Z | chuck-pyth/code/copytildone.py | KaiserPhemi/python-101 | 703facf41f1b47c486e5044696ce87b6d28a9ab3 | [
"MIT"
] | 2 | 2015-12-15T04:03:15.000Z | 2017-01-13T15:29:47.000Z | chuck-pyth/code/copytildone.py | KaiserPhemi/python-101 | 703facf41f1b47c486e5044696ce87b6d28a9ab3 | [
"MIT"
] | 45 | 2015-01-03T17:26:02.000Z | 2022-01-09T16:06:04.000Z | while True:
line = raw_input('> ')
if line[0] == '#' :
continue
if line == 'done':
break
print line
print 'Done!'
| 13.454545 | 26 | 0.472973 | while True:
line = raw_input('> ')
if line[0] == '#' :
continue
if line == 'done':
break
print line
print 'Done!'
| 0 | 0 | 0 |
6a63ed94125041dfe1732611478aa856e87d8ccc | 1,029 | py | Python | test_package/conanfile.py | lucteo/conan-llvm | 48ac43d0338f425b8fdea66b29f531c695b7fe1c | [
"MIT"
] | 2 | 2017-02-17T11:47:03.000Z | 2020-03-20T23:47:08.000Z | test_package/conanfile.py | lucteo/conan-llvm | 48ac43d0338f425b8fdea66b29f531c695b7fe1c | [
"MIT"
] | 1 | 2018-02-20T22:48:01.000Z | 2018-02-21T18:13:37.000Z | test_package/conanfile.py | lucteo/conan-llvm | 48ac43d0338f425b8fdea66b29f531c695b7fe1c | [
"MIT"
] | 3 | 2018-01-14T20:34:27.000Z | 2018-04-01T08:53:34.000Z | from conans import ConanFile, CMake
import os
############### CONFIGURE THESE VALUES ##################
default_user = "lucteo"
default_channel = "testing"
#########################################################
channel = os.getenv("CONAN_CHANNEL", default_channel)
username = os.getenv("CONAN_USERNAME", default_user)
| 33.193548 | 82 | 0.579203 | from conans import ConanFile, CMake
import os
############### CONFIGURE THESE VALUES ##################
default_user = "lucteo"
default_channel = "testing"
#########################################################
channel = os.getenv("CONAN_CHANNEL", default_channel)
username = os.getenv("CONAN_USERNAME", default_user)
class TestLlvmConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = "llvm/3.5.2@%s/%s" % (username, channel)
generators = "cmake"
def build(self):
cmake = CMake(self.settings)
self.output.info("Running CMake")
self.run('cmake "%s" %s' % (self.conanfile_directory, cmake.command_line))
self.output.info("Building the llvm test project")
self.run("cmake --build . %s" % cmake.build_config)
def imports(self):
self.copy(pattern="*.dll", dst="bin", src="bin")
self.copy(pattern="*.dylib", dst="bin", src="lib")
self.copy(pattern="*", dst="bin", src="bin")
def test(self):
pass
| 449 | 234 | 23 |
ede3be627374eb4e0bbf1cb79ed587d97de563ee | 19,123 | py | Python | code/network_helpers.py | lujonathanh/BETS | 7121e16281e0e7973c1ee6f631b59ac1f7e7a7d5 | [
"Apache-2.0"
] | 12 | 2019-04-02T05:22:03.000Z | 2021-09-18T14:46:29.000Z | code/network_helpers.py | lujonathanh/BETS | 7121e16281e0e7973c1ee6f631b59ac1f7e7a7d5 | [
"Apache-2.0"
] | 1 | 2019-06-05T20:12:18.000Z | 2019-06-06T16:12:03.000Z | code/network_helpers.py | lujonathanh/BETS | 7121e16281e0e7973c1ee6f631b59ac1f7e7a7d5 | [
"Apache-2.0"
] | 3 | 2020-07-15T21:34:46.000Z | 2021-02-21T17:47:01.000Z | __author__ = 'jlu96'
import csv
import numpy as np
import json
import pandas as pd
import collections
global data_dir
data_dir = "/Users/jlu96/v-causal-snps/data/GeneExpressionData/GGR_Network/"
global genecols
genecols = ["Official Symbol Interactor A", "Official Symbol Interactor B"]
def annotate_cols(df, cols, genes, name):
"""Return the sum across a row's cols of genes in the gene"""
df[name] = np.sum([df[col].isin(genes).astype(int) for col in cols], axis=0)
return df
def make_type_col(df, col_name):
"""
:param df:
:param name:
:param col_name:
:return: Add a new column with a column of 1s indicating the type of the column.
"""
df[col_name] = 1
return df
def annotate_cols_summary(df, cols, col_name = "Type", do_reverse=False):
"""Assumes there is a non-zero, non-null in each of the cols. Summarizes them one-by-one"""
assert len(set(cols).intersection(set(df.columns.values))) == len(cols)
sum_df = df.copy()
type_array = np.empty(len(df), dtype=object)
type_array[:] = ""
rev_cols = cols[:]
if do_reverse:
rev_cols.reverse()
for col in rev_cols:
indices = np.where(np.logical_and(pd.notnull(sum_df[col]), sum_df[col] != 0))[0]
print("FOr col", col, ":", len(indices))
type_array[indices] += col + ","
sum_df[col_name] = type_array
#print sum_df[sum_df[col_name] != ""][col_name]
return sum_df
def get_annotated(df, names=[], cols=genecols,
suffixes = ["_A", "_B"]):
""" Given annotation type
:param df:
:param name:
:return: df with annotations, and with a col of all rows containing at least one
"""
file_dir = "/Users/jlu96/v-causal-snps/data/GeneExpressionData/GGR_Network/raw_files/"
name2filename = {}
name2filename["Diabetes_highconf"] = file_dir + "GSEA-diabetes-high-conf-7_29_16-genes.txt"
name2filename["Diabetes_lowconf"] = file_dir + "GSEA-diabetes-low-conf-7_29_16-genes.txt"
name2filename["Metabolism_GSEA_highconf"] = file_dir + "GSEA-gluconeo-glyc-gluc-metab-8_2_16-genes.txt"
name2filename["Metabolism_GSEA_lowconf"] = file_dir + "GSEA-gluconeo-am-glyc-lip-gluc-metab-7_29_16-genes.txt"
name2filename["Immune"] = file_dir + "immune-GSEA-GOC-genes.txt"
name2filename["Immune_GOC-Priority"] = file_dir + "Immune-GOC-Priority.txt"
name2filename["Immune_GSEA"] = file_dir + "GSEA-immune-inflammatory-7_29_16-genes.txt"
name2filename["GR"] = file_dir + "GR_GO_direct_candidates_union.txt"
name2filename["GR_direct-candidate"] = file_dir + "GR_direct_candidates-HGNC.txt"
name2filename["GR_GO"] = file_dir + "GR_Pathway-GO-HGNC.txt"
name2filename["DEG_edgeR-0.05"] = file_dir + "sig_genes_reg_fdr-0.05-all.txt"
default_loads = ["GR", "DEG_edgeR-0.05", "Diabetes_highconf", "Diabetes_lowconf", "Immune", "Metabolism_GSEA_highconf",
"Metabolism_GSEA_lowconf"]
if names == []:
names = default_loads
else:
for name in names:
if name not in name2filename:
raise ValueError("Name " + name + " not in list of annotations")
df_genes = get_genes(df, genecols=cols)
for name in names:
filename = name2filename[name]
annot_genes = load_genes(filename)
# Get the df's genes. See # annotated
both_genes = df_genes.intersection(annot_genes)
print("# ", name, " genes in df: ", len(both_genes))
newcols = []
# Annotate each column if extra genes
for col, suffix in zip(cols, suffixes):
newcol = name + suffix
newcols.append(newcol)
df = annotate_cols(df, [col], annot_genes, newcol)
# Annotate the total number in that row
df[name] = np.sum([df[newcol] for newcol in newcols], axis=0)
print("# ", name, " edges in df: ", len(np.where(df[name])[0]))
return df
def get_genes_in(df, name, genecols=["Gene"], verbose=False):
"""
:param df:
:param name:
:return: Genes where the colname is not empty
"""
in_df = df[np.logical_and(pd.notnull(df[name]), df[name] != 0)]
genes = get_genes(in_df, genecols=genecols)
if verbose:
print("Genes in ", name, ":", len(genes))
return genes
def filter_pairs(pairs, genes):
"""
:param pairs:
:param genes:
:return: Limit only to pairsi n the genes
"""
return [p for p in pairs if p[0] in genes and p[1] in genes]
# def get_cause_plot_triples(cause2effects, sort_dict=None):
# """
# :param cause2effects: Dictionary of the causes and effects
# :param sort_dict: Dictionary returning a key to sort the effects by
# :return: a list of plot_triples, cause at beginning
# """
# plot_triples_list = []
# for cause in cause2effects:
# effects = sorted(cause2effects[cause], key = lambda entry: sort_dict[entry], reverse=True)
# effect_list = pj.partition_inputs(list(effects), int(round(len(effects)/2.0)))
#
#
# plot_triples_list.extend([[cause] + e for e in effect_list])
#
# print "Plot triples: "
# print plot_triples_list[0:20]
#
# return plot_triples_list
def limit_to_genes_all(df, genes, cols=genecols):
"""
:param df:
:param genes:
:param cols:
:return: df where all values in cols are in genes
"""
num_cols = len(cols)
indices = np.sum([df[col].isin(genes).values for col in cols], axis=0) >= num_cols
new_df = df[indices].copy()
new_df.index = list(range(len(new_df)))
return new_df
# def df_to_graph_causal(df, key_col, cause_col, source_col=None, target_col=None, type_cols=[]):
# """
# :param df: Input dataframe
# :param key_col: Column containing source-target pairs
# :param cause_col: Column saying the type of causal relation. If None, assume just PPI
# :param source_col: The source column of the causal relation if it exists
# :param target_col: Target column
# :param type_cols: Other attributes to annotate the edge with
# :return: A Digraph where each edge has attributes: source, target (None if Causal Type is None)
# Causal Type, and other type_col annotations
# """
# G = nx.Graph()
#
# for i in range(len(df)):
# if pd.notnull(cause_col):
#
#
# type_dict = {}
# for type_col in type_cols:
# type_dict[type_col] = df[type_col][i]
#
# G.add_edge(source, target, attr_dict=type_dict)
#
# return G
def matr_to_net(matr_df, edge_name=None, abs_name=None, cause_effect_col = "Cause-Effect", colnames=None, make_pair=False,
make_type=True, name=None, sort_by=None, extra_dict=None,
no_abs=False, do_sort=True):
"""
Convert a coefficient matrix to a network.
:param matr_df: rows and columns are the genes
:param edge_name: The name to give the column of edge values (from matrix)
:param extra_dict: Dictionary to update the rest with.
:param cause_effect_col:
:param colnames: Customize cause effect colnames
:param make_pair: Make a pair column?
:param extra_dict: an extra dictionary of attributes you want to specify
:return: net_df, the network from all matrix nonzero entries
"""
if colnames == None:
colnames = ["Cause", "Effect"]
if edge_name == None:
edge_name = "Weight"
if abs_name == None:
abs_name = "AbsWeight"
if sort_by == None:
sort_by = abs_name
matr = matr_df.values
genes = matr_df.columns.values
indices = np.where(matr != 0)
betas = matr[indices]
net_dict = collections.OrderedDict()
net_dict[cause_effect_col] = ["-".join(x) for x in zip(genes[indices[0]],genes[indices[1]])]
net_dict[colnames[0]] = genes[indices[0]]
net_dict[colnames[1]] = genes[indices[1]]
net_dict[edge_name] = betas
if not no_abs:
net_dict[abs_name] = np.absolute(betas)
if extra_dict != None:
net_dict.update(extra_dict)
net_df = pd.DataFrame.from_dict(net_dict)
if make_pair:
net_df = make_pair_col(net_df, colnames)
if make_type:
net_df["Type"] = name
if do_sort:
net_df.sort_values(sort_by, ascending=False, inplace=True)
print("New network (edges, attributes) = ", net_df.shape)
return net_df
def matr_file_to_net_file(matr_file, name, net_file=None, conv_to_hg=True, add_pair=True):
"""Convert a matrix file to a network file"""
if not net_file:
net_file = matr_file[:-4] + "-network.txt"
print(name)
cause_name = name + " Cause"
effect_name = name + " Effect"
matr_df = pd.read_csv(matr_file, sep="\t", header=0, index_col=0)
print(matr_df.head())
net_df = matr_to_net(matr_df, name, colnames=[cause_name, effect_name])
print(net_df.head())
if conv_to_hg:
net_df[cause_name] = [ensg2hg(gene) for gene in net_df[cause_name]]
net_df[effect_name] = [ensg2hg(gene) for gene in net_df[effect_name]]
print("Post conversion: ")
print(net_df.head())
if add_pair:
net_df = make_pair_col(net_df, [cause_name, effect_name], "Pair")
print()
print("FINAL:")
print(net_df.head())
print("Writing to ", net_file)
net_df.to_csv(net_file, sep="\t", index=False)
def overlay_dfs(old_df, over_df, key = "Pair", over_cols=[], fill_empty=False, fill_genecols=genecols, how='outer'):
"""
Overlay dfs where the key is Pair and using the known genecols.
:param old_df:
:param over_df: Df to overlay
:param key: Key to use to match up rows. Unwrap this into the genecols
:param over_cols: The columns to merge over
:param cols: Columns to overlay with
:return:
"""
if len(genecols) != 2:
raise ValueError("There must be 2 genecols to unwrap the pair.")
if over_cols == []:
over_cols = over_df.columns.values
# columns to merge over
add_df = over_df[over_cols]
print("Over cols: ", over_cols)
#
df = old_df.merge(add_df, left_on=key, right_on=key, how=how)
if fill_empty:
df = fill_empty_genecols(df, fill_genecols, key)
return df
def get_feedforward(G, Ps=None, Xs=None, Ts=None):
"""
:param G: A Digraph
:return: a list of tuples (P, X, C) where P -> X, X -> T, P -> T
"""
if Ps == None:
Ps = set(G.nodes())
if Xs == None:
Xs = set(G.nodes())
if Ts == None:
Ts = set(G.nodes())
feedforward_set = set()
for X in Xs:
for T in set(G.successors(X)).intersection(Ts):
this_Ps = set(G.predecessors(X)).intersection(set(G.predecessors(T)).intersection(Ps))
if len(this_Ps) > 0:
for P in this_Ps:
feedforward_set.add((P, X, T))
print("Num feedforward: ", len(feedforward_set))
return feedforward_set | 27.876093 | 127 | 0.619045 | __author__ = 'jlu96'
import csv
import numpy as np
import json
import pandas as pd
import collections
global data_dir
data_dir = "/Users/jlu96/v-causal-snps/data/GeneExpressionData/GGR_Network/"
global genecols
genecols = ["Official Symbol Interactor A", "Official Symbol Interactor B"]
def load_hg_ensg_old():
hg_ensg_file = data_dir + "HGNC-to-Ensembl.txt"
hg2ensg = {}
with open(hg_ensg_file, 'rU') as csvfile:
reader = csv.DictReader(csvfile, delimiter="\t")
for row in reader:
if row["Ensembl Gene ID"]:
hg2ensg[row["Approved Symbol"]] = row["Ensembl Gene ID"]
ensg2hg = dict([(item[1], item[0]) for item in list(hg2ensg.items()) if item[0] != ""])
return hg2ensg, ensg2hg
def load_hg_ensg(ensg_hg_file = data_dir + "gencode.v22.gene_id_to_gene_name.json"
):
with open(ensg_hg_file , 'rU') as jsonfile:
ensg2hg = json.load(jsonfile)
hg2ensg = dict([(item[1], item[0]) for item in list(ensg2hg.items()) if item[0] != ""])
return hg2ensg, ensg2hg
def hg2ensg(gene, verbose=False):
try:
conv = hg2ensg.conv
except AttributeError:
hg2ensg.conv, _ = load_hg_ensg()
conv = hg2ensg.conv
try:
return conv[gene]
except KeyError:
if verbose:
print("Gene ", gene, " missing from hg2ensg")
return ""
def ensg2hg(gene, verbose=False):
try:
conv = ensg2hg.conv
except AttributeError:
_, ensg2hg.conv = load_hg_ensg()
conv = ensg2hg.conv
try:
return conv[gene]
except KeyError:
if verbose:
print("Gene ", gene, "missing from ensg2hg")
return ""
def load_hg_prot():
prot_hg_file = "raw_files/ProteinToGene.txt"
prot_hg_df = pd.read_csv(prot_hg_file, sep="\t")
genes = prot_hg_df["Gene_Name"]
prots = prot_hg_df["Protein"]
prot2hg = dict(list(zip(prots, genes)))
hg2prot = dict(list(zip(genes, prots)))
return hg2prot, prot2hg
def hg2prot(gene, verbose=False):
try:
conv = hg2prot.conv
except AttributeError:
hg2prot.conv, _ = load_hg_prot()
conv = hg2prot.conv
try:
return conv[gene]
except KeyError:
if verbose:
print("Gene ", gene, " missing from hg2prot")
return ""
def prot2hg(gene, verbose=False):
try:
conv = prot2hg.conv
except AttributeError:
_, prot2hg.conv = load_hg_prot()
conv = prot2hg.conv
try:
return conv[gene]
except KeyError:
if verbose:
print("Gene ", gene, "missing from prot2hg")
return ""
def load_syn_hg():
synonym_file = "HGNC-to-Ensembl.txt"
syn_df = pd.read_csv(synonym_file, sep="\t")
syn2hg = {}
prev_syns = []
for i in np.where(pd.notnull(syn_df["Previous Symbols"]))[0]:
syns = syn_df["Previous Symbols"][i].split(",")
hg = syn_df["Approved Symbol"][i]
for syn in syns:
if syn not in syn2hg:
syn2hg[syn] = {hg}
else:
syn2hg[syn].add(hg)
prev_syns.extend(syns)
just_syns = []
for i in np.where(pd.notnull(syn_df["Synonyms"]))[0]:
syns = syn_df["Synonyms"][i].split(",")
hg = syn_df["Approved Symbol"][i]
for syn in syns:
if syn not in syn2hg:
syn2hg[syn] = {hg}
else:
syn2hg[syn].add(hg)
just_syns.extend(syns)
extra_syns = [syn for syn in syn2hg if len(syn2hg[syn]) > 1]
print("Num syns:", len(list(syn2hg.keys())))
print("Num symbols: ", len(prev_syns), "set: ", len(set(prev_syns)))
print("Num Synonyms: ", len(just_syns), "set: ", len(set(just_syns)))
print("Num more than one: ", len(extra_syns))
unique_syn2hg = syn2hg.copy()
for syn in syn2hg:
if len(syn2hg[syn]) == 1:
unique_syn2hg[syn] = list(syn2hg[syn])[0]
else:
del unique_syn2hg[syn]
return syn2hg, unique_syn2hg
def load_hg_gr():
gr_file = "raw_files/Glucocorticoid_receptor_regulatory_network-gene-conversions.txt"
df = pd.read_csv(gr_file, sep="\t")
df = df[df["Gene Symbol"] != ""]
gr = df["GR Gene"].values
genes = df["Gene Symbol"].values
gene2gr = dict(list(zip(genes, gr)))
gr2gene = dict(list(zip(gr, genes)))
return gene2gr, gr2gene
def gr2hg(gene, verbose=False):
try:
conv = gr2hg.conv
except AttributeError:
_, gr2hg.conv = load_hg_gr()
conv = gr2hg.conv
try:
return conv[gene]
except KeyError:
if verbose:
print("Gene ", gene, "missing from gr2hg")
return ""
def load_genes(filename, header=False, verbose=True, as_set=True):
genes = []
with open(filename, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter="\t")
if header:
next(reader)
for row in reader:
genes.append(row[0])
if as_set:
genes = set(genes)
if verbose:
print("# Genes in ", filename, ": ", len(genes))
return genes
def write_genes(filename, genes):
with open(filename, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter='\t')
for gene in genes:
writer.writerow([gene])
print(len(genes), " genes writen to: ", filename)
def annotate_cols(df, cols, genes, name):
"""Return the sum across a row's cols of genes in the gene"""
df[name] = np.sum([df[col].isin(genes).astype(int) for col in cols], axis=0)
return df
def make_pair_col(df, cols, col_name = "Pair", reorder=False):
assert len(cols) == 2
pair_array = [str(tuple(sorted(x))) for x in df[cols].values]
df[col_name] = pair_array
if reorder:
df[cols[0]] = [eval(x)[0] for x in pair_array]
df[cols[1]] = [eval(x)[1] for x in pair_array]
return df
def make_cause_col(df, cause_col, effect_col, col_name = "Cause-Effect"):
df[col_name] = [tuple(x) for x in zip(df[cause_col],df[effect_col])]
return df
def make_type_col(df, col_name):
"""
:param df:
:param name:
:param col_name:
:return: Add a new column with a column of 1s indicating the type of the column.
"""
df[col_name] = 1
return df
def annotate_cols_summary(df, cols, col_name = "Type", do_reverse=False):
"""Assumes there is a non-zero, non-null in each of the cols. Summarizes them one-by-one"""
assert len(set(cols).intersection(set(df.columns.values))) == len(cols)
sum_df = df.copy()
type_array = np.empty(len(df), dtype=object)
type_array[:] = ""
rev_cols = cols[:]
if do_reverse:
rev_cols.reverse()
for col in rev_cols:
indices = np.where(np.logical_and(pd.notnull(sum_df[col]), sum_df[col] != 0))[0]
print("FOr col", col, ":", len(indices))
type_array[indices] += col + ","
sum_df[col_name] = type_array
#print sum_df[sum_df[col_name] != ""][col_name]
return sum_df
def get_annotated(df, names=[], cols=genecols,
suffixes = ["_A", "_B"]):
""" Given annotation type
:param df:
:param name:
:return: df with annotations, and with a col of all rows containing at least one
"""
file_dir = "/Users/jlu96/v-causal-snps/data/GeneExpressionData/GGR_Network/raw_files/"
name2filename = {}
name2filename["Diabetes_highconf"] = file_dir + "GSEA-diabetes-high-conf-7_29_16-genes.txt"
name2filename["Diabetes_lowconf"] = file_dir + "GSEA-diabetes-low-conf-7_29_16-genes.txt"
name2filename["Metabolism_GSEA_highconf"] = file_dir + "GSEA-gluconeo-glyc-gluc-metab-8_2_16-genes.txt"
name2filename["Metabolism_GSEA_lowconf"] = file_dir + "GSEA-gluconeo-am-glyc-lip-gluc-metab-7_29_16-genes.txt"
name2filename["Immune"] = file_dir + "immune-GSEA-GOC-genes.txt"
name2filename["Immune_GOC-Priority"] = file_dir + "Immune-GOC-Priority.txt"
name2filename["Immune_GSEA"] = file_dir + "GSEA-immune-inflammatory-7_29_16-genes.txt"
name2filename["GR"] = file_dir + "GR_GO_direct_candidates_union.txt"
name2filename["GR_direct-candidate"] = file_dir + "GR_direct_candidates-HGNC.txt"
name2filename["GR_GO"] = file_dir + "GR_Pathway-GO-HGNC.txt"
name2filename["DEG_edgeR-0.05"] = file_dir + "sig_genes_reg_fdr-0.05-all.txt"
default_loads = ["GR", "DEG_edgeR-0.05", "Diabetes_highconf", "Diabetes_lowconf", "Immune", "Metabolism_GSEA_highconf",
"Metabolism_GSEA_lowconf"]
if names == []:
names = default_loads
else:
for name in names:
if name not in name2filename:
raise ValueError("Name " + name + " not in list of annotations")
df_genes = get_genes(df, genecols=cols)
for name in names:
filename = name2filename[name]
annot_genes = load_genes(filename)
# Get the df's genes. See # annotated
both_genes = df_genes.intersection(annot_genes)
print("# ", name, " genes in df: ", len(both_genes))
newcols = []
# Annotate each column if extra genes
for col, suffix in zip(cols, suffixes):
newcol = name + suffix
newcols.append(newcol)
df = annotate_cols(df, [col], annot_genes, newcol)
# Annotate the total number in that row
df[name] = np.sum([df[newcol] for newcol in newcols], axis=0)
print("# ", name, " edges in df: ", len(np.where(df[name])[0]))
return df
def load_network_df(filename, cols=genecols, make_pairs=True):
df = pd.read_csv(filename, sep="\t")
df = make_pair_col(df, cols)
print("Initial pairs: ", len(df))
df.drop_duplicates(subset="Pair", keep="first", inplace=True)
print("After dup drops: ", len(df))
# Drop self-interactions
df.drop(df[cols[0]] == df[cols[1]])
print("After self drops: ", len(df))
df.index = list(range(len(df)))
return df
def load_causal_df(filename, cause_col, effect_col):
cols= [cause_col, effect_col]
df = pd.read_csv(filename, sep="\t")
df = make_pair_col(df, cols, reorder=False)
df = make_cause_col(df, cause_col, effect_col)
#drop self-interactions
df.drop(df[cols[0]] == df[cols[1]])
df.index = list(range(len(df)))
return df
def count_rows(df, name):
return len(np.where(np.logical_and(pd.notnull(df[name]), df[name] != 0))[0])
def get_genes(df, genecols=genecols):
genes = set()
for genecol in genecols:
genes.update(set(df[genecol].values))
return genes
def get_genes_in(df, name, genecols=["Gene"], verbose=False):
"""
:param df:
:param name:
:return: Genes where the colname is not empty
"""
in_df = df[np.logical_and(pd.notnull(df[name]), df[name] != 0)]
genes = get_genes(in_df, genecols=genecols)
if verbose:
print("Genes in ", name, ":", len(genes))
return genes
def get_cause_to_effect(pairs):
cause2effects = {}
for pair in pairs:
cause, effect = pair
if cause not in cause2effects:
cause2effects[cause] = set()
cause2effects[cause].add(effect)
return cause2effects
def filter_pairs(pairs, genes):
"""
:param pairs:
:param genes:
:return: Limit only to pairsi n the genes
"""
return [p for p in pairs if p[0] in genes and p[1] in genes]
# def get_cause_plot_triples(cause2effects, sort_dict=None):
# """
# :param cause2effects: Dictionary of the causes and effects
# :param sort_dict: Dictionary returning a key to sort the effects by
# :return: a list of plot_triples, cause at beginning
# """
# plot_triples_list = []
# for cause in cause2effects:
# effects = sorted(cause2effects[cause], key = lambda entry: sort_dict[entry], reverse=True)
# effect_list = pj.partition_inputs(list(effects), int(round(len(effects)/2.0)))
#
#
# plot_triples_list.extend([[cause] + e for e in effect_list])
#
# print "Plot triples: "
# print plot_triples_list[0:20]
#
# return plot_triples_list
def limit_to_genes_all(df, genes, cols=genecols):
"""
:param df:
:param genes:
:param cols:
:return: df where all values in cols are in genes
"""
num_cols = len(cols)
indices = np.sum([df[col].isin(genes).values for col in cols], axis=0) >= num_cols
new_df = df[indices].copy()
new_df.index = list(range(len(new_df)))
return new_df
# def df_to_graph_causal(df, key_col, cause_col, source_col=None, target_col=None, type_cols=[]):
# """
# :param df: Input dataframe
# :param key_col: Column containing source-target pairs
# :param cause_col: Column saying the type of causal relation. If None, assume just PPI
# :param source_col: The source column of the causal relation if it exists
# :param target_col: Target column
# :param type_cols: Other attributes to annotate the edge with
# :return: A Digraph where each edge has attributes: source, target (None if Causal Type is None)
# Causal Type, and other type_col annotations
# """
# G = nx.Graph()
#
# for i in range(len(df)):
# if pd.notnull(cause_col):
#
#
# type_dict = {}
# for type_col in type_cols:
# type_dict[type_col] = df[type_col][i]
#
# G.add_edge(source, target, attr_dict=type_dict)
#
# return G
def matr_to_net(matr_df, edge_name=None, abs_name=None, cause_effect_col = "Cause-Effect", colnames=None, make_pair=False,
make_type=True, name=None, sort_by=None, extra_dict=None,
no_abs=False, do_sort=True):
"""
Convert a coefficient matrix to a network.
:param matr_df: rows and columns are the genes
:param edge_name: The name to give the column of edge values (from matrix)
:param extra_dict: Dictionary to update the rest with.
:param cause_effect_col:
:param colnames: Customize cause effect colnames
:param make_pair: Make a pair column?
:param extra_dict: an extra dictionary of attributes you want to specify
:return: net_df, the network from all matrix nonzero entries
"""
if colnames == None:
colnames = ["Cause", "Effect"]
if edge_name == None:
edge_name = "Weight"
if abs_name == None:
abs_name = "AbsWeight"
if sort_by == None:
sort_by = abs_name
matr = matr_df.values
genes = matr_df.columns.values
indices = np.where(matr != 0)
betas = matr[indices]
net_dict = collections.OrderedDict()
net_dict[cause_effect_col] = ["-".join(x) for x in zip(genes[indices[0]],genes[indices[1]])]
net_dict[colnames[0]] = genes[indices[0]]
net_dict[colnames[1]] = genes[indices[1]]
net_dict[edge_name] = betas
if not no_abs:
net_dict[abs_name] = np.absolute(betas)
if extra_dict != None:
net_dict.update(extra_dict)
net_df = pd.DataFrame.from_dict(net_dict)
if make_pair:
net_df = make_pair_col(net_df, colnames)
if make_type:
net_df["Type"] = name
if do_sort:
net_df.sort_values(sort_by, ascending=False, inplace=True)
print("New network (edges, attributes) = ", net_df.shape)
return net_df
def matr_file_to_net_file(matr_file, name, net_file=None, conv_to_hg=True, add_pair=True):
"""Convert a matrix file to a network file"""
if not net_file:
net_file = matr_file[:-4] + "-network.txt"
print(name)
cause_name = name + " Cause"
effect_name = name + " Effect"
matr_df = pd.read_csv(matr_file, sep="\t", header=0, index_col=0)
print(matr_df.head())
net_df = matr_to_net(matr_df, name, colnames=[cause_name, effect_name])
print(net_df.head())
if conv_to_hg:
net_df[cause_name] = [ensg2hg(gene) for gene in net_df[cause_name]]
net_df[effect_name] = [ensg2hg(gene) for gene in net_df[effect_name]]
print("Post conversion: ")
print(net_df.head())
if add_pair:
net_df = make_pair_col(net_df, [cause_name, effect_name], "Pair")
print()
print("FINAL:")
print(net_df.head())
print("Writing to ", net_file)
net_df.to_csv(net_file, sep="\t", index=False)
def overlay_dfs(old_df, over_df, key = "Pair", over_cols=[], fill_empty=False, fill_genecols=genecols, how='outer'):
"""
Overlay dfs where the key is Pair and using the known genecols.
:param old_df:
:param over_df: Df to overlay
:param key: Key to use to match up rows. Unwrap this into the genecols
:param over_cols: The columns to merge over
:param cols: Columns to overlay with
:return:
"""
if len(genecols) != 2:
raise ValueError("There must be 2 genecols to unwrap the pair.")
if over_cols == []:
over_cols = over_df.columns.values
# columns to merge over
add_df = over_df[over_cols]
print("Over cols: ", over_cols)
#
df = old_df.merge(add_df, left_on=key, right_on=key, how=how)
if fill_empty:
df = fill_empty_genecols(df, fill_genecols, key)
return df
def fill_empty_genecols(df, genecols=genecols, key="Pair"):
# for the places where genecols are empty, rewrite the annotation
indices = np.where(np.logical_or(pd.isnull(df[genecols[0]]), pd.isnull(df[genecols[1]])))[0]
print("Num missing genecols: ", len(indices))
print(indices)
pairs = [eval(p) for p in df[key][indices].values]
zip_cols = list(zip(*pairs))
df[genecols[0]][indices] = zip_cols[0]
df[genecols[1]][indices] = zip_cols[1]
return df
def graph_add_df_both_ways(G, df, col1, col2, edge_cols):
edge_set = set(edge_cols)
for i, r in df.iterrows():
gene1 = r[col1]
gene2 = r[col2]
edge_dict = dict((x, r[x]) for x in edge_set if x in r)
G.add_edge(gene1, gene2, attr_dict=edge_dict)
G.add_edge(gene2, gene1, attr_dict=edge_dict)
return G
def graph_add_df_one_way(G, df, col1, col2, edge_cols):
edge_set = set(edge_cols)
for i, r in df.iterrows():
gene1 = r[col1]
gene2 = r[col2]
edge_dict = dict((x, r[x]) for x in edge_set if x in r)
G.add_edge(gene1, gene2, attr_dict=edge_dict)
return G
def get_feedforward(G, Ps=None, Xs=None, Ts=None):
"""
:param G: A Digraph
:return: a list of tuples (P, X, C) where P -> X, X -> T, P -> T
"""
if Ps == None:
Ps = set(G.nodes())
if Xs == None:
Xs = set(G.nodes())
if Ts == None:
Ts = set(G.nodes())
feedforward_set = set()
for X in Xs:
for T in set(G.successors(X)).intersection(Ts):
this_Ps = set(G.predecessors(X)).intersection(set(G.predecessors(T)).intersection(Ps))
if len(this_Ps) > 0:
for P in this_Ps:
feedforward_set.add((P, X, T))
print("Num feedforward: ", len(feedforward_set))
return feedforward_set | 7,518 | 0 | 506 |
634a8730df30da759f5eb7f0b329cabff1b0d86f | 3,241 | py | Python | example.py | dunky11/exponential-weighting-watermarking | 717bd04ac05daf8eb7e902ec84b04fc02126bf92 | [
"MIT"
] | 7 | 2020-11-22T19:14:17.000Z | 2022-03-01T05:59:58.000Z | example.py | dunky11/exponential-weighting-watermarking | 717bd04ac05daf8eb7e902ec84b04fc02126bf92 | [
"MIT"
] | 1 | 2021-10-05T21:17:02.000Z | 2021-10-05T21:17:02.000Z | example.py | dunky11/exponential-weighting-watermarking | 717bd04ac05daf8eb7e902ec84b04fc02126bf92 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
import random
from ew import EWBase, EWDense, EWConv2D
AUTOTUNE = tf.data.experimental.AUTOTUNE
dataset = tfds.load("mnist", as_supervised=True, split="train")
val_set = tfds.load("mnist", as_supervised=True, split="test")
dataset = dataset.map(to_float)
val_set = val_set.map(to_float)
# Generate the key set. In the paper they took a subset of the dataset and assigned random labels to them in order to combat query modification. However that altered the validation accuracy too much. For simplicity reasons we will just invert the pixels of a subset of the training dataset.
key_set = dataset.take(128)
key_set = key_set.map(invert)
dataset = dataset.skip(128)
# An easy way to achieve a high accuracy on the key set is to overfit our model on the key set, since it doesn't have to generalize.
key_set = key_set.concatenate(key_set).concatenate(key_set).concatenate(key_set).concatenate(key_set).concatenate(key_set)
union = dataset.concatenate(key_set)
dataset = dataset.shuffle(2048).batch(128).prefetch(AUTOTUNE)
union = union.shuffle(2048).batch(128).prefetch(AUTOTUNE)
val_set = val_set.batch(128)
# t is the 'temperature' hyperparameter. The higher t is, the more the values of the weight matrix will get squeezed, 2.0 was used in the paper.
t = 2.0
model = keras.Sequential([
EWConv2D(16, 3, t, padding="same", activation=keras.activations.relu),
EWConv2D(32, 3, t, padding="same", strides=2, activation=keras.activations.relu),
EWConv2D(64, 3, t, padding="same", strides=2, activation=keras.activations.relu),
keras.layers.Flatten(),
EWDense(10, activation=None, t=t)
])
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["sparse_categorical_accuracy"])
model.build(input_shape=(None, 28, 28, 1))
# Train the model normally with exponential weighting disabled until it converges:
_ = model.fit(x=dataset, epochs=3, validation_data=val_set)
# Enable exponential weighting and train the model on the union of the dataset and the key set in order to embed the watermark:
enable_ew(model)
_ = model.fit(x=union, epochs=2, validation_data=val_set)
# Reset the optimizer. Disable exponential weighting and test the accuracy on the key set:
model.optimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9)
disable_ew(model)
_, key_acc = model.evaluate(key_set.batch(128))
_, val_acc = model.evaluate(val_set)
print(f"Watermark accuracy is {round(key_acc * 100, 2)}%.")
print(f"Validation set accuracy is {round(val_acc * 100, 2)}%.")
# If the watermark accuracy(key_acc) is above a predefined threshold, the model was watermarked by us. | 33.412371 | 290 | 0.735267 | import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
import random
from ew import EWBase, EWDense, EWConv2D
AUTOTUNE = tf.data.experimental.AUTOTUNE
def enable_ew(model):
for layer in model.layers:
if isinstance(layer, EWBase):
layer.enable()
def disable_ew(model):
for layer in model.layers:
if isinstance(layer, EWBase):
layer.disable()
def to_float(x, y):
return tf.cast(x, tf.float32) / 255.0, y
dataset = tfds.load("mnist", as_supervised=True, split="train")
val_set = tfds.load("mnist", as_supervised=True, split="test")
dataset = dataset.map(to_float)
val_set = val_set.map(to_float)
# Generate the key set. In the paper they took a subset of the dataset and assigned random labels to them in order to combat query modification. However that altered the validation accuracy too much. For simplicity reasons we will just invert the pixels of a subset of the training dataset.
def invert(x, y):
return (x * 2.0 - 1.0), y
key_set = dataset.take(128)
key_set = key_set.map(invert)
dataset = dataset.skip(128)
# An easy way to achieve a high accuracy on the key set is to overfit our model on the key set, since it doesn't have to generalize.
key_set = key_set.concatenate(key_set).concatenate(key_set).concatenate(key_set).concatenate(key_set).concatenate(key_set)
union = dataset.concatenate(key_set)
dataset = dataset.shuffle(2048).batch(128).prefetch(AUTOTUNE)
union = union.shuffle(2048).batch(128).prefetch(AUTOTUNE)
val_set = val_set.batch(128)
# t is the 'temperature' hyperparameter. The higher t is, the more the values of the weight matrix will get squeezed, 2.0 was used in the paper.
t = 2.0
model = keras.Sequential([
EWConv2D(16, 3, t, padding="same", activation=keras.activations.relu),
EWConv2D(32, 3, t, padding="same", strides=2, activation=keras.activations.relu),
EWConv2D(64, 3, t, padding="same", strides=2, activation=keras.activations.relu),
keras.layers.Flatten(),
EWDense(10, activation=None, t=t)
])
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["sparse_categorical_accuracy"])
model.build(input_shape=(None, 28, 28, 1))
# Train the model normally with exponential weighting disabled until it converges:
_ = model.fit(x=dataset, epochs=3, validation_data=val_set)
# Enable exponential weighting and train the model on the union of the dataset and the key set in order to embed the watermark:
enable_ew(model)
_ = model.fit(x=union, epochs=2, validation_data=val_set)
# Reset the optimizer. Disable exponential weighting and test the accuracy on the key set:
model.optimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9)
disable_ew(model)
_, key_acc = model.evaluate(key_set.batch(128))
_, val_acc = model.evaluate(val_set)
print(f"Watermark accuracy is {round(key_acc * 100, 2)}%.")
print(f"Validation set accuracy is {round(val_acc * 100, 2)}%.")
# If the watermark accuracy(key_acc) is above a predefined threshold, the model was watermarked by us. | 263 | 0 | 92 |
097b103d8e1f0cc11caafbeef7b83e6bb5f01040 | 16,247 | py | Python | athena/.ipynb_checkpoints/__init__-checkpoint.py | markowetzlab/Athena | 55de866303fd6b82d05b294ccab4e85c4b965f81 | [
"MIT"
] | 1 | 2022-03-23T12:45:08.000Z | 2022-03-23T12:45:08.000Z | athena/__init__.py | markowetzlab/Athena | 55de866303fd6b82d05b294ccab4e85c4b965f81 | [
"MIT"
] | null | null | null | athena/__init__.py | markowetzlab/Athena | 55de866303fd6b82d05b294ccab4e85c4b965f81 | [
"MIT"
] | null | null | null | import os
import glob
import pickle
import random
import shutil
import zipfile
import requests
import igraph as ig
import pandas as pd
from .grna import GuideRNA
from .network import Network
from .kinetics import Kinetics
from .sampling import Sampling
from .gillespie_ssa import GillespieSSA
from .compile_reactions import CompileReactions | 42.643045 | 161 | 0.603373 | import os
import glob
import pickle
import random
import shutil
import zipfile
import requests
import igraph as ig
import pandas as pd
from .grna import GuideRNA
from .network import Network
from .kinetics import Kinetics
from .sampling import Sampling
from .gillespie_ssa import GillespieSSA
from .compile_reactions import CompileReactions
class Athena(Network, Kinetics, GuideRNA, CompileReactions, GillespieSSA, Sampling):
grn_dir = 'GRNs'
grna_libraries_dir = 'gRNA_Libraries'
sc_pops_dir = 'Single_Cell_Populations'
ppi_fp = 'Signalling_Pathways/reactome_human_ppi.csv'
url = 'https://zenodo.org/api/files/0a6d457c-81e1-4296-8254-7f312b0ef77d/athena.zip'
def __init__(self,
ncpus=1,
ntfs=10,
nhks=100,
negenes=100,
nkinases=10,
ncascades=1,
ntfs_per_cascades=1,
feature_info=None,
feature_network=None,
grn_net=None,
signalling_net=None,
target_genes=None,
perturb_tfs=True,
perturb_kinases=True,
on_target=None,
off_target=None,
ctrl_label='CTRL',
crispr_type=None,
ngrnas_per_target=3,
grna_library=None,
tau=0.05,
nbatches=20,
noise_mean=1.0,
noise_sd=0.005,
burn_time=100,
perturb_time=100,
sample_time=100,
update_interval=1,
cache_interval=50,
save_burn=False,
save_protein=False,
ncells_per_condition=10000,
collapse_mrna=True,
map_reference_ls=True,
map_reference_cpm=True,
simulator_dir='athena',
opencl_root='/opt/rocm/opencl',
opencl_context="0",
cache_dir=os.path.join(os.environ['HOME'], '.cache/athena'),
cache_network=True,
verbose=False):
print ("Initiate Environmental Parameters...")
self.ncpus = ncpus
self.verbose = verbose
self.cache_network = cache_network
self.network_name = os.path.basename(simulator_dir)
self.initiate_opencl(nbatches, opencl_context, opencl_root)
print ("Check the caches...")
self.check_caches(cache_dir)
print ("Setup Simulator Directory...")
self.setup_simulator_directory(simulator_dir)
print ("Check Network Parameters...")
self.check_network_parameters(ntfs, nhks, negenes, nkinases, ncascades, ntfs_per_cascades, feature_info, feature_network, grn_net, signalling_net)
print ("Check gRNA Parameters...")
self.check_grna_parameters(target_genes, perturb_tfs, perturb_kinases, on_target, off_target, ngrnas_per_target, ctrl_label, crispr_type, grna_library)
print ("Check Simulation Parameters...")
self.check_simulation_parameters(tau, noise_mean, noise_sd, burn_time, perturb_time, sample_time,
update_interval, cache_interval, save_burn, save_protein, ncells_per_condition)
print ("Check Downsampling Parameters...")
self.check_downsample_parameters(collapse_mrna, map_reference_ls, map_reference_cpm)
def cache(self, fp=None):
if fp is None:
fp = 'athena.pkl'
with open(fp, 'wb') as file:
pickle.dump(self, file)
def initiate_opencl(self, nbatches, context, opencl_root):
self.nbatches = nbatches
self.opencl_root = opencl_root
# setting up opencl environmental variables
os.environ['PYOPENCL_CTX'] = context
os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'
os.environ['OPENCL_ROOT'] = opencl_root
def setup_simulator_directory(self, simulator_dir):
self.simulator_dir = simulator_dir
self.results_dir = os.path.join(self.simulator_dir, 'results')
self.metadata_dir = os.path.join(self.simulator_dir, 'metadata')
self.affinity_dir = os.path.join(self.simulator_dir, 'affinity')
self.multiplier_dir = os.path.join(self.simulator_dir, 'multiplier')
self.propensity_dir = os.path.join(self.simulator_dir, 'propensity')
self.change_vec_dir = os.path.join(self.simulator_dir, 'change_vec')
self.regulators_dir = os.path.join(self.simulator_dir, 'regulators')
self.species_vec_dir = os.path.join(self.simulator_dir, 'species_vec')
if not os.path.exists(self.simulator_dir):
os.mkdir(self.simulator_dir)
os.mkdir(self.results_dir)
os.mkdir(self.metadata_dir)
os.mkdir(self.affinity_dir)
os.mkdir(self.multiplier_dir)
os.mkdir(self.propensity_dir)
os.mkdir(self.change_vec_dir)
os.mkdir(self.regulators_dir)
os.mkdir(self.species_vec_dir)
else:
for root, dirs, files in os.walk(self.simulator_dir):
if os.path.exists(self.metadata_dir) and root == self.metadata_dir:
continue
for file in files:
os.remove(os.path.join(root, file))
def check_caches(self, cache_dir):
self.cache_dir = cache_dir
self.grn_dir = os.path.join(cache_dir, self.grn_dir)
self.sc_pops_dir = os.path.join(cache_dir, self.sc_pops_dir)
self.grna_libraries_dir = os.path.join(cache_dir, self.grna_libraries_dir)
metadata_exists = os.path.isdir(self.grn_dir) or os.path.isdir(self.sc_pops_dir) or os.path.isdir(self.grna_libraries_dir)
if not metadata_exists:
self.download_metadata()
def check_network_parameters(self, ntfs, nhks, negenes, nkinases, ncascades, ntfs_per_cascades, feature_info, feature_network, grn_net, pathway):
""" Check the Network Parameters and make sure that all of the parameters are okay. """
if feature_info is None or feature_network is None:
self.feature_info = None
self.feature_network = None
grns = os.listdir(self.grn_dir)
if not type(ntfs) is int:
raise Exception('ntfs: Parameter Must be Integer Data Type...')
if not type(nhks) is int:
raise Exception('nhks: Parameter Must be Integer Data Type...')
if not type(negenes) is int:
raise Exception('negenes: Parameter Must be Integer Data Type...')
if not type(nkinases) is int:
raise Exception('nkinases: Parameter Must be Integer Data Type...')
if nkinases > (negenes / 2):
raise Exception('nkinases: Must be half of Egenes...')
if not type(ntfs_per_cascades) is int:
raise Exception('ntfs_per_cascades: Parameter Must be Integer Data Type...')
if (grn_net is None) or (not grn_net in grns):
grn = random.sample(os.listdir(self.grn_dir), k=1)[0]
self.grn_fp = os.path.join(self.grn_dir, grn)
else:
self.grn_fp = os.path.join(self.grn_dir, grn_net)
# Comment this out until signalling pathway has been implemented.
ppi = pd.read_csv(os.path.join(self.cache_dir, self.ppi_fp))
ppi = ppi.loc[ppi.Interactor_1_uniprot_id != ppi.Interactor_2_uniprot_id, ]
self.ppi = ig.Graph.DataFrame(ppi)
self.cascade_sizes = self.calc_cascade_sizes(nkinases, ncascades)
self.ntfs_per_cascades = ntfs_per_cascades
self.num_kinases, self.num_cascades = nkinases, ncascades
self.num_tfs, self.num_hks, self.num_egenes, self.nkinases = ntfs, nhks, negenes, nkinases
else:
self.feature_info = pd.read_parquet(feature_info)
self.feature_network = pd.read_parquet(feature_network)
self.feature_info.to_parquet(os.path.join(self.metadata_dir, 'feature_info.csv'), compression='brotli')
self.feature_network.to_parquet(os.path.join(self.metadata_dir, 'feature_network.csv'), compression='brotli')
def check_grna_parameters(self, target_genes, perturb_tfs, perturb_kinases, on_target, off_target, ngrnas_per_target, ctrl_label, crispr_type, grna_library):
genes = []
self.target_genes = []
self.on_target, self.off_target = None, None
self.perturb_tfs, self.perturb_kinases = False, False
grna_libraries = os.listdir(self.grna_libraries_dir)
try:
crispr_type = crispr_type.lower()
if crispr_type in ['interference', 'activation', 'knockout']:
self.crispr_type = crispr_type
else:
raise Exception("crispr_type parameter must be either: activation, interference, or knockout...")
except:
self.crispr_type = None
if grna_library in grna_libraries:
self.grna_library_name = grna_library
lib_fp = os.path.join(self.grna_libraries_dir, self.grna_library_name)
else:
self.grna_library_name = random.sample(grna_libraries, k=1)[0]
lib_fp = os.path.join(self.grna_libraries_dir, self.grna_library_name)
self.grna_library = pd.read_csv(lib_fp).drop(columns=['Unnamed: 0'])
probs = self.grna_library.probability
if (target_genes is None) and perturb_tfs:
self.perturb_tfs = perturb_tfs
if (target_genes is None) and perturb_kinases:
self.perturb_kinases = perturb_kinases
if (type(target_genes) is list) and (len(target_genes) != 0):
self.target_genes = target_genes
if (not on_target is None) and (type(on_target) is float):
self.on_target = on_target
if (not off_target is None) and (type(off_target) is int):
self.off_target = off_target
if ngrnas_per_target < 0:
ngrnas_per_target = 1
self.target_genes = genes
self.ctrl_label = ctrl_label
self.ngrnas_per_target = ngrnas_per_target
def check_simulation_parameters(self, tau, noise_mean, noise_sd, burn_time, perturb_time, sample_time,
update_interval, cache_interval, save_burn, save_protein, ncells_per_condition):
if not type(tau) is float and tau <= 1.0:
raise Exception('tau: Parameter Must be Float Data Type and less than 1...')
if not type(noise_mean) is float:
raise Exception('noise_mean: Parameter Must be Float Data Type...')
if not type(noise_sd) is float:
raise Exception('noise_sd: Parameter Must be Float Data Type...')
if not type(burn_time) is int:
raise Exception('burn_time: Parameter Must be Integer Data Type...')
if not type(perturb_time) is int:
raise Exception('perturb_time: Parameter Must be Integer Data Type...')
if not type(sample_time) is int:
raise Exception('sample_time: Parameter Must be Integer Data Type...')
if not type(update_interval) is int:
raise Exception('update_interval: Parameter Must be Integer Data Type...')
if not type(cache_interval) is int:
raise Exception('cache_interval: Parameter Must be Integer Data Type...')
if not type(save_burn) is bool:
raise Exception('save_burn: Parameter Must be Boolean Data Type...')
if not type(save_protein) is bool:
raise Exception('save_protein: Parameter Must be Boolean Data Type...')
if perturb_time <= update_interval:
raise Exception('perturb_time parameter must be greater than update_interval parameter...')
self.tau = tau
self.not_perturbed = True
self.noise_std = noise_sd
self.noise_mean = noise_mean
self.cache_size = cache_interval
self.save_protein = save_protein
self.cache_interval = cache_interval
self.update_interval = update_interval
self.nsims_per_condition = int(ncells_per_condition / (perturb_time / update_interval))
# setting simulation time
self.perturb_time = burn_time
self.sample_time = burn_time + perturb_time
self.sim_time = burn_time + perturb_time + sample_time
def check_downsample_parameters(self, collapse_mrna, map_reference_ls, map_reference_cpm):
if not type(collapse_mrna) is bool:
raise Exception('collapse_mrna: Parameter Must be Boolean Data Type...')
if not type(map_reference_ls) is bool:
raise Exception('map_reference_ls: Parameter Must be Boolean Data Type...')
if not type(map_reference_cpm) is bool:
raise Exception('map_reference_cpm: Parameter Must be Boolean Data Type...')
self.collapse_mrna = collapse_mrna
self.map_reference_ls = map_reference_ls
self.map_reference_cpm = map_reference_cpm
pop_files = os.listdir(self.sc_pops_dir)
pop_file = random.choices(pop_files)[0]
self.pop_fp = os.path.join(self.sc_pops_dir, pop_file)
def download_metadata(self):
"""Download the Required Metadata..."""
print ('Downloading metadata...')
# download zenodo metadata files
zip_filename = os.path.basename(self.cache_dir) + '.zip'
cache_base_dir = os.path.dirname(self.cache_dir)
zip_filename = os.path.join(cache_base_dir, zip_filename)
response = requests.get(self.url, allow_redirects=True)
open(zip_filename, 'wb').write(response.content)
with zipfile.ZipFile(zip_filename, 'r') as zip_ref:
zip_ref.extractall(self.cache_dir)
filepaths = glob.glob(os.path.join(self.cache_dir,'**','*'), recursive=True)
for old_filepath in filepaths:
if os.path.isdir(old_filepath):
continue
filename = os.path.basename(old_filepath)
file_dir = os.path.basename(os.path.dirname(old_filepath))
dir_fp = os.path.join(self.cache_dir, file_dir)
new_filepath = os.path.join(dir_fp , filename)
if not os.path.exists(dir_fp):
os.mkdir(dir_fp)
shutil.move(old_filepath, new_filepath)
shutil.rmtree(os.path.join(self.cache_dir, 'athena'))
shutil.rmtree(os.path.join(self.cache_dir, '__MACOSX'))
os.remove(zip_filename)
def calc_cascade_sizes(self, num_kinases, num_cascades):
cascade_sizes = []
while sum(cascade_sizes) < num_kinases:
cascade_sizes = []
kinase_count = num_kinases
for cascade_i in range(num_cascades):
if kinase_count == 0:
cascade_sizes = []
try:
nk = random.sample(range(kinase_count), k=1)[0] + 1
kinase_count = kinase_count - nk
cascade_sizes.append(nk)
except:
break
return cascade_sizes
def manage_dtypes(self, df):
df = df.convert_dtypes()
fcols = df.select_dtypes('float').columns
icols = df.select_dtypes('integer').columns
df[fcols] = df[fcols].apply(pd.to_numeric, downcast='float')
df[icols] = df[icols].apply(pd.to_numeric, downcast='integer')
return df | 11,296 | 4,588 | 23 |
bcaff74a3fb31af55f4d0dcd134ec2f92c623590 | 2,151 | py | Python | Avito/QA_Excellence/scripts/task1/task1.py | stanislav-kudriavtsev/Test-Tasks | fc0c26eb7afc995b48897861554512a8412a6ef3 | [
"CC0-1.0"
] | null | null | null | Avito/QA_Excellence/scripts/task1/task1.py | stanislav-kudriavtsev/Test-Tasks | fc0c26eb7afc995b48897861554512a8412a6ef3 | [
"CC0-1.0"
] | null | null | null | Avito/QA_Excellence/scripts/task1/task1.py | stanislav-kudriavtsev/Test-Tasks | fc0c26eb7afc995b48897861554512a8412a6ef3 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Task 1 - data parsing."""
__author__ = "Stanislav D. Kudriavtsev"
import json
from pathlib import Path
from sys import exit
# Attention.
# the original files were with mistakes
# so they all were passed through JSON
# online validator and beautifier, see
# README in files/cleaned directory
def get_task_data():
"""Return the test files parsed.
Returns
-------
data : dict
test files for task1
"""
keyfiles = ["error", "struct", "testcase", "values"]
data = {}
for keyfile in keyfiles:
fpath = Path() / "files" / "cleaned" / f"{keyfile}.json"
try:
with open(fpath) as jf:
data[keyfile] = json.load(jf)
except Exception as exc:
print(f"The file {fpath} failed")
print(f"the exception caught -> {exc}")
print("./error.json file has been formed")
with open("error.json", "w", encoding="utf-8") as erf:
erc = {"error": {"message": "Входные файлы некорректны"}}
json.dump(erc, erf, ensure_ascii=False)
exit(1)
return data
def process_task(testcase: dict, values: dict):
"""Process testcase with values."""
for tcparam in testcase["params"]:
if not tcparam.get("values"):
for vvalue in values["values"]:
if tcparam["id"] == vvalue["id"]:
tcparam["value"] = vvalue["value"]
# else no change is required
else:
for pvalue in tcparam["values"]:
if pvalue.get("params"):
newpars = {"params": pvalue["params"]}
pvalue["params"] = process_task(newpars, values)["params"]
for vvalue in values["values"]:
if pvalue["id"] == vvalue["value"]:
tcparam["value"] = pvalue["title"]
return testcase
def main():
"""Entry point."""
data = get_task_data()
testcase, values = data["testcase"], data["values"]
return process_task(testcase, values)
if __name__ == "__main__":
print(main())
| 29.067568 | 78 | 0.555091 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Task 1 - data parsing."""
__author__ = "Stanislav D. Kudriavtsev"
import json
from pathlib import Path
from sys import exit
# Attention.
# the original files were with mistakes
# so they all were passed through JSON
# online validator and beautifier, see
# README in files/cleaned directory
def get_task_data():
"""Return the test files parsed.
Returns
-------
data : dict
test files for task1
"""
keyfiles = ["error", "struct", "testcase", "values"]
data = {}
for keyfile in keyfiles:
fpath = Path() / "files" / "cleaned" / f"{keyfile}.json"
try:
with open(fpath) as jf:
data[keyfile] = json.load(jf)
except Exception as exc:
print(f"The file {fpath} failed")
print(f"the exception caught -> {exc}")
print("./error.json file has been formed")
with open("error.json", "w", encoding="utf-8") as erf:
erc = {"error": {"message": "Входные файлы некорректны"}}
json.dump(erc, erf, ensure_ascii=False)
exit(1)
return data
def process_task(testcase: dict, values: dict):
"""Process testcase with values."""
for tcparam in testcase["params"]:
if not tcparam.get("values"):
for vvalue in values["values"]:
if tcparam["id"] == vvalue["id"]:
tcparam["value"] = vvalue["value"]
# else no change is required
else:
for pvalue in tcparam["values"]:
if pvalue.get("params"):
newpars = {"params": pvalue["params"]}
pvalue["params"] = process_task(newpars, values)["params"]
for vvalue in values["values"]:
if pvalue["id"] == vvalue["value"]:
tcparam["value"] = pvalue["title"]
return testcase
def main():
"""Entry point."""
data = get_task_data()
testcase, values = data["testcase"], data["values"]
return process_task(testcase, values)
if __name__ == "__main__":
print(main())
| 0 | 0 | 0 |
fe9cd45bc69ace98750dc3416020c7bbca73ef19 | 1,828 | py | Python | scalica/web/scalica/populate_test_2.py | BlindGhosty/LSWA-Project | 950fa492426bcebe60872c9a24e00399ff63f045 | [
"MIT"
] | null | null | null | scalica/web/scalica/populate_test_2.py | BlindGhosty/LSWA-Project | 950fa492426bcebe60872c9a24e00399ff63f045 | [
"MIT"
] | 2 | 2017-12-03T23:04:13.000Z | 2017-12-03T23:49:25.000Z | scalica/web/scalica/populate_test_2.py | BlindGhosty/LSWA-Project | 950fa492426bcebe60872c9a24e00399ff63f045 | [
"MIT"
] | null | null | null | import sys
from django.contrib.auth import authenticate
from django.contrib.auth import get_user_model
from django.utils import timezone
from micro.models import Following
import random
User = get_user_model()
user_nameL = "L"
user_nameR = "R"
password = "pass"
TOTAL_USERS = 20
MAX_FOLLOWERS = 3
i = 0
while (i < TOTAL_USERS):
if (i % 2 == 0):
gen_string = user_nameL + str(i)
temp_user = User.objects.create_user(username=gen_string)
temp_user.set_password(password)
temp_user.save()
assert authenticate(username=gen_string, password=password)
if (i % 2 != 0):
gen_string = user_nameR + str(i)
temp_user = User.objects.create_user(username=gen_string)
temp_user.set_password(password)
temp_user.save()
assert authenticate(username=gen_string, password=password)
i += 1
user_array = User.objects.all()
k = 0
while (k < TOTAL_USERS):
follower_user = user_array[k]
j = 1
duplicate_list = []
duplicate_list.append(k)
while (j < MAX_FOLLOWERS):
# This allows users to only follow users of the same EVEN/ODD type
next_index = random.randint(0,(TOTAL_USERS)
if (next_index in duplicate_list == False and (next_index % 2) == (k % 2)):
duplicate_list.append(i)
followee_user = user_array[next_index]
newFollow = Following(follower=follower_user, followee=followee_user, follow_date=timezone.now())
newFollow.save()
j += 1
k += 1
# HERE WE ADD THE LONE USER W, if we do things correctly, he should live on his own with no followers.
final_user = "RUSH_SUCKS"
temp_user = User.objects.create_user(username=final_user)
temp_user.set_password(password)
temp_user.save()
assert authenticate(username=final_user, password=password)
| 28.5625 | 109 | 0.68709 | import sys
from django.contrib.auth import authenticate
from django.contrib.auth import get_user_model
from django.utils import timezone
from micro.models import Following
import random
User = get_user_model()
user_nameL = "L"
user_nameR = "R"
password = "pass"
TOTAL_USERS = 20
MAX_FOLLOWERS = 3
i = 0
while (i < TOTAL_USERS):
if (i % 2 == 0):
gen_string = user_nameL + str(i)
temp_user = User.objects.create_user(username=gen_string)
temp_user.set_password(password)
temp_user.save()
assert authenticate(username=gen_string, password=password)
if (i % 2 != 0):
gen_string = user_nameR + str(i)
temp_user = User.objects.create_user(username=gen_string)
temp_user.set_password(password)
temp_user.save()
assert authenticate(username=gen_string, password=password)
i += 1
user_array = User.objects.all()
k = 0
while (k < TOTAL_USERS):
follower_user = user_array[k]
j = 1
duplicate_list = []
duplicate_list.append(k)
while (j < MAX_FOLLOWERS):
# This allows users to only follow users of the same EVEN/ODD type
next_index = random.randint(0,(TOTAL_USERS)
if (next_index in duplicate_list == False and (next_index % 2) == (k % 2)):
duplicate_list.append(i)
followee_user = user_array[next_index]
newFollow = Following(follower=follower_user, followee=followee_user, follow_date=timezone.now())
newFollow.save()
j += 1
k += 1
# HERE WE ADD THE LONE USER W, if we do things correctly, he should live on his own with no followers.
final_user = "RUSH_SUCKS"
temp_user = User.objects.create_user(username=final_user)
temp_user.set_password(password)
temp_user.save()
assert authenticate(username=final_user, password=password)
| 0 | 0 | 0 |
75dae17e02466a6554ef3f2682cb050534a5fe72 | 35 | py | Python | src/models/train_model.py | suppathak/anomaly-detection-HTM | baca3c6bcf68b27ebefb1735d9f73c1cb15a1e0f | [
"FTL"
] | 7 | 2020-06-30T15:44:44.000Z | 2022-03-02T12:23:40.000Z | src/models/train_model.py | suppathak/anomaly-detection-HTM | baca3c6bcf68b27ebefb1735d9f73c1cb15a1e0f | [
"FTL"
] | 39 | 2021-09-09T21:42:19.000Z | 2022-03-21T15:30:08.000Z | src/models/train_model.py | suppathak/anomaly-detection-HTM | baca3c6bcf68b27ebefb1735d9f73c1cb15a1e0f | [
"FTL"
] | 17 | 2020-06-19T20:55:22.000Z | 2021-08-30T16:42:22.000Z | """Here goes the training code."""
| 17.5 | 34 | 0.657143 | """Here goes the training code."""
| 0 | 0 | 0 |
e226b11de8770b5ab2d4e922dd1283b226547d37 | 7,804 | py | Python | src/aggregator.py | dmartyanov/timeseries-vae-anomaly | 354127c1a34e4ddc3dbb37d38538964f6aff068e | [
"MIT"
] | null | null | null | src/aggregator.py | dmartyanov/timeseries-vae-anomaly | 354127c1a34e4ddc3dbb37d38538964f6aff068e | [
"MIT"
] | 4 | 2020-11-13T18:36:26.000Z | 2022-02-10T00:41:38.000Z | src/aggregator.py | dmartyanov/timeseries-vae-anomaly | 354127c1a34e4ddc3dbb37d38538964f6aff068e | [
"MIT"
] | 1 | 2020-03-03T17:30:56.000Z | 2020-03-03T17:30:56.000Z | import os
import json
import pandas as pd
import numpy as np
import uuid
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import random
from datetime import datetime, timedelta
SAMPLES_FOLDER = os.environ.get('SAMPLES_FOLDER')
application_diagram = {
'productpage': set(['reviews', 'details']),
'reviews': set(['ratings']),
'details': set()
}
k1 = '#DC7633'
k2 = '#E74C3C'
delta = timedelta(seconds=5)
if __name__ == '__main__':
agg = Aggregator(255, 10)
with open('/Users/dmitry/pros/ngcops-pro/timeseries-vae-anomaly/data/anomaly.json', 'r') as f:
an_data = json.load(f)
incidents, relevance = agg.build_incidents_report(an_data)
metrics_df = pd.read_csv('/Users/dmitry/pros/ngcops-pro/timeseries-vae-anomaly/data/metrics_0_filter.csv')
for key, item in incidents.items():
visualisation = VisualizeReports(metrics_df, an_data, item)
visualisation.visualize_with_siblings('{}/{}_vis.png'.format(SAMPLES_FOLDER, key))
print('\n')
print(relevance)
print('\n')
print(incidents)
| 35.472727 | 127 | 0.582394 | import os
import json
import pandas as pd
import numpy as np
import uuid
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import random
from datetime import datetime, timedelta
SAMPLES_FOLDER = os.environ.get('SAMPLES_FOLDER')
application_diagram = {
'productpage': set(['reviews', 'details']),
'reviews': set(['ratings']),
'details': set()
}
k1 = '#DC7633'
k2 = '#E74C3C'
delta = timedelta(seconds=5)
class Aggregator(object):
def __init__(self,
period_length,
causal_sensitivity=3):
self.period_length = period_length
self.causal_sensitivity = causal_sensitivity
self.relevance_decay = 1
def __filter_metrics(self, m):
if '_P' in m:
return True if '_P95' in m else False
return True
def __relevance_function(self, anomaly_obj):
ranges = anomaly_obj['ranges']
relevance = 0
for tpe, range in ranges.items():
for r in range:
for index in np.arange(r[0], r[1]):
relevance += float(tpe) / ((self.period_length - index) * self.relevance_decay)
return relevance
def build_time_relevance_report(self,
anomaly_data,
take=3,
with_siblings=False):
report = []
for metric, metric_obj in anomaly_data.items():
if self. __filter_metrics(metric):
relevance = self.__relevance_function(metric_obj)
report.append((metric, relevance))
report = sorted(report, key=lambda x: -x[1])
return report
def build_incidents_report(self, data):
anomaly_data = dict(data)
relevance_report = self.build_time_relevance_report(anomaly_data)
incidents_report = {}
for i in range(len(relevance_report)):
added = False
for key, incident_obj in incidents_report.items():
if not added:
added = self.__add_to_incindent(incident_obj, anomaly_data, relevance_report[i])
if not added:
self.__create_incident(incidents_report, anomaly_data, relevance_report[i])
return incidents_report, relevance_report
def __add_to_incindent(self, incident_obj, anomaly_data, report_item):
incident_range = incident_obj.get('range') or []
added = False
for key, ranges in anomaly_data[report_item[0]].get('ranges').items():
for range in ranges:
if (incident_range[0] - self.causal_sensitivity) < range[1] < (incident_range[1] + self.causal_sensitivity):
incident_range = [min(incident_range[0], range[0]), max(incident_range[1], range[1])]
incident_obj['metrics'].append(report_item[0])
added = True
if added:
incident_obj['range'] = incident_range
return added
def __create_incident(self, incidents_report, anomaly_data, report_item):
incident_range = [self.period_length, self.period_length]
added = False
for key, ranges in anomaly_data[report_item[0]].get('ranges').items():
for range in ranges:
if (incident_range[0] - self.causal_sensitivity) < range[1] < (incident_range[1] + self.causal_sensitivity):
incident_range = [min(incident_range[0], range[0]), max(incident_range[1], range[1])]
if not added:
inc_uuid = random.randint(1000000, 9000000)
print('New incident is created {}'.format(inc_uuid))
incident_obj = {
'id': inc_uuid,
'range': incident_range,
'metrics': [report_item[0]]
}
incidents_report[inc_uuid] = incident_obj
added = True
else:
incident_range = [min(incident_range[0], range[0]), max(incident_range[1], range[1])]
incident_obj['metrics'].append(report_item[0])
if added:
incident_obj['range'] = incident_range
return added
class VisualizeReports(object):
def __init__(self,
metric_values,
anomaly_data,
incident_report):
self.metric_values = metric_values
self.anomaly_data = anomaly_data
self.incidents_report = incident_report
self.siblings_map = {
'productpage': set(['productpage-v1-6cc647db65-f772m']),
'ratings': set(['ratings-v1-794f554859-zzvp5']),
'details': set(['details-v1-d885f8487-w5zqv']),
'reviews': set(['reviews-v1-5df856fc47-5mgfz', 'reviews-v2-7678cf597-xhjc5', 'reviews-v3-546ff94d8c-v7rhg'])
}
def visualize_with_siblings(self, out_f):
number_of_metrics = len(self.incidents_report.get('metrics'))
fig, axx = plt.subplots(number_of_metrics, 1, sharex=True,
figsize=(9, 3 + 2 * number_of_metrics),
dpi=80)
i = 0
for metric in self.incidents_report.get('metrics'):
self.__plot_metric(axx[i], metric)
i += 1
label_period = int(self.metric_values.shape[0] / 10)
xtick_location = self.metric_values.index.tolist()[::label_period]
xtick_labels = self.__build_list_timestamps(xtick_location)
plt.xticks(ticks=xtick_location, labels=xtick_labels, rotation=60, fontsize=12, horizontalalignment='center', alpha=.7)
plt.yticks(fontsize=12, alpha=.7)
plt.savefig(out_f)
def __build_list_timestamps(self, indices):
result = []
now = datetime.now() - (20 * delta)
last = indices[-1]
for i in range(len(indices)):
tick = now - delta * (last - indices[i])
result.append(tick.strftime('%H:%M:%S'))
return result
def __plot_metric(self, ax, metric_code):
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
parts = metric_code.split(':')
app_code = parts[0]
app_tpe = app_code.split('-')[0]
metric_id = parts[1]
ax.title.set_text(metric_id)
ax.set_ylabel('seconds')
index = self.metric_values.index
i = 0
for pod in self.siblings_map.get(app_tpe):
m_code = '{}:{}'.format(pod, metric_id)
ts = self.metric_values[m_code]
ax.plot(index, ts, color=colors[i], label=pod)
i += 1
anomaly_report = self.anomaly_data[metric_code]
ranges = anomaly_report.get('ranges')
for k in ranges.keys():
if len(ranges[k]) > 0:
for start, end in ranges[k]:
kk = float(k)
c = k1 if kk == 1 else k2
ax.axvspan(index[start], index[end - 1], color=c, alpha=0.16 * float(kk))
ax.grid()
ax.legend()
if __name__ == '__main__':
agg = Aggregator(255, 10)
with open('/Users/dmitry/pros/ngcops-pro/timeseries-vae-anomaly/data/anomaly.json', 'r') as f:
an_data = json.load(f)
incidents, relevance = agg.build_incidents_report(an_data)
metrics_df = pd.read_csv('/Users/dmitry/pros/ngcops-pro/timeseries-vae-anomaly/data/metrics_0_filter.csv')
for key, item in incidents.items():
visualisation = VisualizeReports(metrics_df, an_data, item)
visualisation.visualize_with_siblings('{}/{}_vis.png'.format(SAMPLES_FOLDER, key))
print('\n')
print(relevance)
print('\n')
print(incidents)
| 6,360 | 14 | 342 |
d1f7fa38176bcb6053a6fa5ffbbb440fd27e5d2f | 480 | py | Python | codes_/0728_Self_Dividing_Numbers.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | codes_/0728_Self_Dividing_Numbers.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | codes_/0728_Self_Dividing_Numbers.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | # %% [728. Self Dividing Numbers](https://leetcode.com/problems/self-dividing-numbers/)
# 問題:leftからrightまででself-dividingの数をリストで返せ。self-dividingは、各桁の数字で割り切れる
# 解法:各桁は`k, m = m % 10, m // 10`のように更新する
| 30 | 87 | 0.6125 | # %% [728. Self Dividing Numbers](https://leetcode.com/problems/self-dividing-numbers/)
# 問題:leftからrightまででself-dividingの数をリストで返せ。self-dividingは、各桁の数字で割り切れる
# 解法:各桁は`k, m = m % 10, m // 10`のように更新する
class Solution:
def selfDividingNumbers(self, left: int, right: int) -> List[int]:
return [n for n in range(left, right + 1) if ok(n)]
def ok(n):
m = n
while m:
k, m = m % 10, m // 10
if not k or n % k:
return False
return True
| 216 | -6 | 71 |
f4486965ca8aab1f75920c2f84fd0b24dd3afd98 | 461 | py | Python | cvat/apps/project_submission/signals.py | hukkelas/cvat_tdt4265 | f389ba8c3e5dab7408ef5ca0ecdac061b5e07c8c | [
"MIT"
] | null | null | null | cvat/apps/project_submission/signals.py | hukkelas/cvat_tdt4265 | f389ba8c3e5dab7408ef5ca0ecdac061b5e07c8c | [
"MIT"
] | 1 | 2020-09-17T19:27:02.000Z | 2020-09-29T11:56:26.000Z | cvat/apps/project_submission/signals.py | ErlingLie/cvat | f053d14955b1e48bf6498466949f4beb1833fe8e | [
"MIT"
] | null | null | null | from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth import get_user_model
from .models import LeaderboardSettings
User = get_user_model()
@receiver(post_save, sender=User)
def update_submission_map(sender, instance, created, **kwargs):
"""automatically create associated leaderboard settings when a user is created
"""
if created:
LeaderboardSettings.objects.create(user=instance)
| 30.733333 | 82 | 0.787419 | from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth import get_user_model
from .models import LeaderboardSettings
User = get_user_model()
@receiver(post_save, sender=User)
def update_submission_map(sender, instance, created, **kwargs):
"""automatically create associated leaderboard settings when a user is created
"""
if created:
LeaderboardSettings.objects.create(user=instance)
| 0 | 0 | 0 |
7dc5b102a1b1795dd4b9eb29405a8e8ca9487934 | 108 | py | Python | handlers/__init__.py | DurbeKK/tg_pdf_bot | 75a180cc7c876df84a4cac825939898892a43aa9 | [
"MIT"
] | 3 | 2021-12-03T09:27:54.000Z | 2021-12-09T03:19:42.000Z | handlers/__init__.py | DurbeKK/tg-pdf-bot | 75a180cc7c876df84a4cac825939898892a43aa9 | [
"MIT"
] | null | null | null | handlers/__init__.py | DurbeKK/tg-pdf-bot | 75a180cc7c876df84a4cac825939898892a43aa9 | [
"MIT"
] | null | null | null | from . import merge_callbacks, merge_commands
from . import basic_commands, crypt, compress, convert, split | 54 | 61 | 0.814815 | from . import merge_callbacks, merge_commands
from . import basic_commands, crypt, compress, convert, split | 0 | 0 | 0 |
3eb3a1bc12e6c06f92dd0db8ad9e095e6a9e03cd | 1,015 | py | Python | bmds_server/common/management/commands/dump_test_db.py | shapiromatron/bmds-server | 0b2b79b521728582fa66100621e9ea03e251f9f1 | [
"MIT"
] | 1 | 2019-07-09T16:42:15.000Z | 2019-07-09T16:42:15.000Z | bmds_server/common/management/commands/dump_test_db.py | shapiromatron/bmds-server | 0b2b79b521728582fa66100621e9ea03e251f9f1 | [
"MIT"
] | 103 | 2016-11-14T15:58:53.000Z | 2022-03-07T21:01:03.000Z | bmds_server/common/management/commands/dump_test_db.py | shapiromatron/bmds-server | 0b2b79b521728582fa66100621e9ea03e251f9f1 | [
"MIT"
] | 2 | 2017-03-17T20:43:22.000Z | 2018-01-04T19:15:18.000Z | from io import StringIO
from pathlib import Path
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
| 31.71875 | 80 | 0.662069 | from io import StringIO
from pathlib import Path
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = """Dump test database into a fixture."""
def handle(self, *args, **options):
if "test" not in settings.DATABASES["default"]["NAME"]:
raise CommandError("Must be using a test database to execute.")
f = StringIO()
shared_kwargs = dict(
format="yaml",
indent=2,
stdout=f,
use_natural_foreign_keys=True,
use_natural_primary_keys=True,
)
call_command("dumpdata", "contenttypes", **shared_kwargs)
call_command("dumpdata", "auth", **shared_kwargs)
call_command("dumpdata", "analysis", **shared_kwargs)
Path(settings.TEST_DB_FIXTURE).parent.mkdir(exist_ok=True, parents=True)
Path(settings.TEST_DB_FIXTURE).write_text(f.getvalue())
| 709 | 85 | 23 |
abdeb184ccdd44588dd8af406dd4aae199799b12 | 81 | py | Python | codesignal/arcade/largestNumber.py | stephanosterburg/coding_challenges | 601cf1360a7fdf068487106ba995955407365983 | [
"MIT"
] | null | null | null | codesignal/arcade/largestNumber.py | stephanosterburg/coding_challenges | 601cf1360a7fdf068487106ba995955407365983 | [
"MIT"
] | null | null | null | codesignal/arcade/largestNumber.py | stephanosterburg/coding_challenges | 601cf1360a7fdf068487106ba995955407365983 | [
"MIT"
] | 1 | 2019-11-08T00:49:14.000Z | 2019-11-08T00:49:14.000Z |
n = 2
print(largestNumber(n))
| 11.571429 | 26 | 0.641975 | def largestNumber(n):
return int(str(9) * n)
n = 2
print(largestNumber(n))
| 27 | 0 | 22 |
d4e36c47d10ab2b44cf48d5216eb58aa73ba1d4c | 1,409 | py | Python | concat_csv.py | t2hk/scdv_glove_elasticsearch | 41cd336decf1e14e77439caaa26f64edf28ce42b | [
"Apache-2.0"
] | 2 | 2020-01-07T15:44:04.000Z | 2020-02-28T08:03:15.000Z | concat_csv.py | t2hk/scdv_glove_elasticsearch | 41cd336decf1e14e77439caaa26f64edf28ce42b | [
"Apache-2.0"
] | null | null | null | concat_csv.py | t2hk/scdv_glove_elasticsearch | 41cd336decf1e14e77439caaa26f64edf28ce42b | [
"Apache-2.0"
] | null | null | null | import re, glob, sys, os, argparse
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('csv_dir')
parser.add_argument('output_file')
args = parser.parse_args()
# 指定されたCSVディレクトリ配下の全CSVファイルのパス
files = glob.glob(args.csv_dir + '/*.csv')
major_class_name = "業種(大分類)_分類名"
medium_class_name = "業種(中分類)_分類名"
small_class_name = "業種(小分類)_分類名"
# 全てのCSVファイルを読み込み、分かち書きなど行ってoutputファイルに書き出す。
wakati_only_file = args.output_file.replace('.csv', '.txt')
with open(args.output_file, "w") as output_csv:
output_csv.writelines('業種(大分類),文章\n')
for file in files:
csv_file_name = os.path.basename(file)
df = pd.read_csv(file)
# 状況の列名をファイルの種類ごとに判断する。
col_name = '災害状況'
if 'kikaisaigai' in file:
col_name = '災害発生状況'
print(file)
sentences = df[col_name]
# 分類の列
major_class = df[major_class_name]
medium_class = df[medium_class_name]
small_class = df[small_class_name]
for col, sentence in enumerate(sentences):
nodes = []
# カテゴリー指定の場合、カテゴリを読み込んで設定する。
label_str = ''
if type(major_class[col]) is str:
label_str += str(major_class[col])
nodes.append(label_str)
# 改行コードを含む場合があるため除去する
sentence = ''.join(str(sentence).splitlines())
nodes.append('"' + sentence + '"')
output_csv.writelines(','.join(nodes) + '\n')
| 23.881356 | 59 | 0.658623 | import re, glob, sys, os, argparse
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('csv_dir')
parser.add_argument('output_file')
args = parser.parse_args()
# 指定されたCSVディレクトリ配下の全CSVファイルのパス
files = glob.glob(args.csv_dir + '/*.csv')
major_class_name = "業種(大分類)_分類名"
medium_class_name = "業種(中分類)_分類名"
small_class_name = "業種(小分類)_分類名"
# 全てのCSVファイルを読み込み、分かち書きなど行ってoutputファイルに書き出す。
wakati_only_file = args.output_file.replace('.csv', '.txt')
with open(args.output_file, "w") as output_csv:
output_csv.writelines('業種(大分類),文章\n')
for file in files:
csv_file_name = os.path.basename(file)
df = pd.read_csv(file)
# 状況の列名をファイルの種類ごとに判断する。
col_name = '災害状況'
if 'kikaisaigai' in file:
col_name = '災害発生状況'
print(file)
sentences = df[col_name]
# 分類の列
major_class = df[major_class_name]
medium_class = df[medium_class_name]
small_class = df[small_class_name]
for col, sentence in enumerate(sentences):
nodes = []
# カテゴリー指定の場合、カテゴリを読み込んで設定する。
label_str = ''
if type(major_class[col]) is str:
label_str += str(major_class[col])
nodes.append(label_str)
# 改行コードを含む場合があるため除去する
sentence = ''.join(str(sentence).splitlines())
nodes.append('"' + sentence + '"')
output_csv.writelines(','.join(nodes) + '\n')
| 0 | 0 | 0 |
aa9464e6bc496a9384634706e8a46594b1baf579 | 1,914 | py | Python | tests/webapp/driver_wrapper.py | amplify-education/selen_kaa | 8f3f683d97f15be8bda050975c64c7883471e648 | [
"MIT"
] | 4 | 2019-11-10T19:17:15.000Z | 2021-07-20T12:41:25.000Z | tests/webapp/driver_wrapper.py | amplify-education/selen_kaa | 8f3f683d97f15be8bda050975c64c7883471e648 | [
"MIT"
] | 3 | 2020-04-15T14:44:50.000Z | 2020-04-15T14:59:35.000Z | tests/webapp/driver_wrapper.py | amplify-education/selen_kaa | 8f3f683d97f15be8bda050975c64c7883471e648 | [
"MIT"
] | 1 | 2022-01-24T10:05:29.000Z | 2022-01-24T10:05:29.000Z | """Just a class to verify the wrapping works"""
import re
import time
import logging
from selenium.common.exceptions import WebDriverException, UnexpectedAlertPresentException
from selen_kaa.webdriver import SeWebDriver
from selen_kaa.element.se_web_element import SeWebElement
from selen_kaa.utils import se_utils
DEFAULT_TIMEOUT = 7
TimeoutType = se_utils.TimeoutType
class ElementNotClickableError(WebDriverException):
""" Special exception for cases where element can't receive a click. """
@staticmethod
| 31.377049 | 117 | 0.713166 | """Just a class to verify the wrapping works"""
import re
import time
import logging
from selenium.common.exceptions import WebDriverException, UnexpectedAlertPresentException
from selen_kaa.webdriver import SeWebDriver
from selen_kaa.element.se_web_element import SeWebElement
from selen_kaa.utils import se_utils
DEFAULT_TIMEOUT = 7
TimeoutType = se_utils.TimeoutType
class WebElementWrapper(SeWebElement):
def should_be_unclickable(self, timeout: TimeoutType = 0.1):
try:
self.click(timeout)
except ElementNotClickableError:
return True
return False
class ElementNotClickableError(WebDriverException):
""" Special exception for cases where element can't receive a click. """
@staticmethod
def can_handle_exception(error: WebDriverException):
pattern = re.compile(r"is not clickable at point \([0-9]+, [0-9]+\). Other element would receive the click:")
return pattern.search(error.msg) is not None
class DriverWrapper(SeWebDriver):
def __init__(self, webdriver):
super().__init__(webdriver)
@staticmethod
def be_idle_for(sleeptime):
"""Just sleep, when you need it.
Warning! Don't use for wait element, better check out 'wait' functions.
"""
time.sleep(sleeptime)
def get_screenshot_as_png(self):
try:
return self.webdriver.get_screenshot_as_png()
except (WebDriverException, UnexpectedAlertPresentException):
logging.error("Unable to get a screenshot from WebDriver.")
def init_web_element(self, selector: str, timeout: TimeoutType = 1):
return WebElementWrapper(self.webdriver, selector, timeout)
def init_all_web_elements(self, selector: str, timeout: TimeoutType = None):
arr = super().init_all_web_elements(selector, timeout)
arr.element_type = WebElementWrapper
return arr
| 938 | 351 | 99 |
7978347b2616c4e3c10be2b460295ceffd96f5c8 | 2,993 | py | Python | app/request.py | scottwedge/News-Highlight | c321d683dde5bd7f4a38f100bcaa91e1f4de2e08 | [
"MIT"
] | null | null | null | app/request.py | scottwedge/News-Highlight | c321d683dde5bd7f4a38f100bcaa91e1f4de2e08 | [
"MIT"
] | 2 | 2020-02-06T22:03:41.000Z | 2020-02-07T19:08:42.000Z | app/request.py | scottwedge/News-Highlight | c321d683dde5bd7f4a38f100bcaa91e1f4de2e08 | [
"MIT"
] | 2 | 2020-02-06T19:11:39.000Z | 2020-02-06T19:59:56.000Z | # from app import app
import urllib.request
import json
from .models import Source,Article
# Source = source.Source
# Getting api key
api_key = None
# Getting the news base url
source_base_url = None
article_base_url = None
def configure_request(app):
'''
Function to acquire the api key and base urls
'''
global api_key,sources_base_url,article_base_url
api_key = app.config['NEWS_API_KEY']
sources_base_url = app.config['NEWS_API_BASE_URL']
article_base_url = app.config['EVERYTHING_SOURCE_BASE_URL']
def get_sources(category):
'''
Function that gets the json response to our url request
'''
get_sources_url = sources_base_url.format(category)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
sources_results = None
if get_sources_response['sources']:
sources_results_list = get_sources_response['sources']
sources_results = process_sources(sources_results_list)
return sources_results
def process_sources(sources_results):
'''
Function that processes the sources result and transform them to a list of Objects
Args:
sources_results: A list of dictionaries that contain sources details
Returns :
sources_list: A list of sources objects
'''
sources_list = []
for source_item in sources_results:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
source_object = Source(id,name,description,url,category)
sources_list.append(source_object)
return sources_list
def get_article(source):
'''
Function that gets the json response to our url request
'''
get_article_url = base_url.format(source,api_key)
with urrlib.request.urlopen(get_article) as url:
get_article_data = url.read()
get_article_response = json.loads(get_article_data)
article_results = None
if get_article_response['article']:
article_results_list = get_article_response['article']
article_results = process_results(article_results_list)
return article_results
def process_article(article_results):
'''
Function that processes the article result and transform them to a list of objects
Args:
article_result: A list of dictionaries that contains article details
Returns :
article_list: A list of article objects
'''
article_list = []
for article_item in article_results:
author = article_item.get('author')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
image = article_item.get('urlToImage')
date = article_item.get('publishedat')
if date and author and image:
article_object = Article(author,title,description,url,image,date)
article_list.append(article_object)
return article_list
| 26.486726 | 86 | 0.732376 | # from app import app
import urllib.request
import json
from .models import Source,Article
# Source = source.Source
# Getting api key
api_key = None
# Getting the news base url
source_base_url = None
article_base_url = None
def configure_request(app):
'''
Function to acquire the api key and base urls
'''
global api_key,sources_base_url,article_base_url
api_key = app.config['NEWS_API_KEY']
sources_base_url = app.config['NEWS_API_BASE_URL']
article_base_url = app.config['EVERYTHING_SOURCE_BASE_URL']
def get_sources(category):
'''
Function that gets the json response to our url request
'''
get_sources_url = sources_base_url.format(category)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
sources_results = None
if get_sources_response['sources']:
sources_results_list = get_sources_response['sources']
sources_results = process_sources(sources_results_list)
return sources_results
def process_sources(sources_results):
'''
Function that processes the sources result and transform them to a list of Objects
Args:
sources_results: A list of dictionaries that contain sources details
Returns :
sources_list: A list of sources objects
'''
sources_list = []
for source_item in sources_results:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
source_object = Source(id,name,description,url,category)
sources_list.append(source_object)
return sources_list
def get_article(source):
'''
Function that gets the json response to our url request
'''
get_article_url = base_url.format(source,api_key)
with urrlib.request.urlopen(get_article) as url:
get_article_data = url.read()
get_article_response = json.loads(get_article_data)
article_results = None
if get_article_response['article']:
article_results_list = get_article_response['article']
article_results = process_results(article_results_list)
return article_results
def process_article(article_results):
'''
Function that processes the article result and transform them to a list of objects
Args:
article_result: A list of dictionaries that contains article details
Returns :
article_list: A list of article objects
'''
article_list = []
for article_item in article_results:
author = article_item.get('author')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
image = article_item.get('urlToImage')
date = article_item.get('publishedat')
if date and author and image:
article_object = Article(author,title,description,url,image,date)
article_list.append(article_object)
return article_list
| 0 | 0 | 0 |
091c4f92c6297aefbd437d098277c136b8dd7a08 | 1,387 | py | Python | examples/cell/hppc_rctau.py | batterysim/equiv-circ-model | fc3720ff13db2d393aadef60187364c02a0b0d35 | [
"MIT"
] | 33 | 2019-07-19T16:15:50.000Z | 2022-03-30T13:37:48.000Z | examples/cell/hppc_rctau.py | batterysim/equiv-circ-model | fc3720ff13db2d393aadef60187364c02a0b0d35 | [
"MIT"
] | 2 | 2020-06-12T21:30:26.000Z | 2020-10-21T19:42:42.000Z | examples/cell/hppc_rctau.py | batterysim/equiv-circ-model | fc3720ff13db2d393aadef60187364c02a0b0d35 | [
"MIT"
] | 15 | 2019-11-14T06:04:48.000Z | 2021-10-05T11:58:28.000Z | """
Use HPPC battery cell data to determine the tau, resistor and capacitor values
(RC parameters) for each SOC section. Curve fit coefficients are determined
from the two time constant (TTC) function.
"""
import params
from ecm import CellHppcData
from ecm import CellEcm
# Battery cell HPPC data and equivalent circuit model
# ----------------------------------------------------------------------------
file = '../data/cell-low-current-hppc-25c-2.csv'
data = CellHppcData(file)
ecm = CellEcm(data, params)
coeffs = ecm.curve_fit_coeff(ecm.func_ttc, 5)
rctau = ecm.rctau_ttc(coeffs)
# Print curve fit coefficients
# ----------------------------------------------------------------------------
print('\n--- Curve fit coefficients from TTC ---')
print('a\tb\tc\talpha\tbeta')
for c in coeffs:
print(f'{c[0]:.4f}\t{c[1]:.4f}\t{c[2]:.4f}\t{c[3]:.4f}\t{c[4]:.4f}')
print('')
# Print tau, resistor, and capacitor values
# ----------------------------------------------------------------------------
soc = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
print(f"--- RC parameters from TTC ---")
print(f"{'soc [-]':10} {'tau1 [s]':10} {'tau2 [s]':10} {'r0 [Ω]':10} {'r1 [Ω]':10} {'r2 [Ω]':10} {'c1 [F]':10} {'c2 [F]':10}")
for s, r in zip(soc, rctau):
print(f'{s:<10} {r[0]:<10.2f} {r[1]:<10.2f} {r[2]:<10.4f} {r[3]:<10.4f} {r[4]:<10.4f} {r[5]:<10.1f} {r[6]:<10.1f}')
print('')
| 34.675 | 126 | 0.521269 | """
Use HPPC battery cell data to determine the tau, resistor and capacitor values
(RC parameters) for each SOC section. Curve fit coefficients are determined
from the two time constant (TTC) function.
"""
import params
from ecm import CellHppcData
from ecm import CellEcm
# Battery cell HPPC data and equivalent circuit model
# ----------------------------------------------------------------------------
file = '../data/cell-low-current-hppc-25c-2.csv'
data = CellHppcData(file)
ecm = CellEcm(data, params)
coeffs = ecm.curve_fit_coeff(ecm.func_ttc, 5)
rctau = ecm.rctau_ttc(coeffs)
# Print curve fit coefficients
# ----------------------------------------------------------------------------
print('\n--- Curve fit coefficients from TTC ---')
print('a\tb\tc\talpha\tbeta')
for c in coeffs:
print(f'{c[0]:.4f}\t{c[1]:.4f}\t{c[2]:.4f}\t{c[3]:.4f}\t{c[4]:.4f}')
print('')
# Print tau, resistor, and capacitor values
# ----------------------------------------------------------------------------
soc = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
print(f"--- RC parameters from TTC ---")
print(f"{'soc [-]':10} {'tau1 [s]':10} {'tau2 [s]':10} {'r0 [Ω]':10} {'r1 [Ω]':10} {'r2 [Ω]':10} {'c1 [F]':10} {'c2 [F]':10}")
for s, r in zip(soc, rctau):
print(f'{s:<10} {r[0]:<10.2f} {r[1]:<10.2f} {r[2]:<10.4f} {r[3]:<10.4f} {r[4]:<10.4f} {r[5]:<10.1f} {r[6]:<10.1f}')
print('')
| 0 | 0 | 0 |
48a3fdc98ec582b021ad0adf80c3d431daa480b2 | 243 | py | Python | dynamic_programming/nth_tribonacci_number.py | elenaborisova/LeetCode-Solutions | 98376aab7fd150a724e316357ae5ea46988d9eac | [
"MIT"
] | null | null | null | dynamic_programming/nth_tribonacci_number.py | elenaborisova/LeetCode-Solutions | 98376aab7fd150a724e316357ae5ea46988d9eac | [
"MIT"
] | null | null | null | dynamic_programming/nth_tribonacci_number.py | elenaborisova/LeetCode-Solutions | 98376aab7fd150a724e316357ae5ea46988d9eac | [
"MIT"
] | null | null | null |
print(tribonacci(3))
| 12.15 | 27 | 0.390947 | def tribonacci(n):
t0 = 0
t1 = 1
t2 = 1
t = 0
if n == 1 or n == 2:
return 1
for i in range(3, n+1):
t = t0 + t1 + t2
t0 = t1
t1 = t2
t2 = t
return t
print(tribonacci(3))
| 198 | 0 | 22 |
8f29fd45da7343f135031788062e5048b62352c0 | 469 | py | Python | basxconnect/core/context_processors.py | basxsoftwareassociation/basxconnect | f7edf62ab3f61419ceb8b2b81314648bd9ef01fd | [
"BSD-3-Clause"
] | 10 | 2021-04-20T13:30:33.000Z | 2022-03-10T17:58:06.000Z | basxconnect/core/context_processors.py | basxsoftwareassociation/basxconnect | f7edf62ab3f61419ceb8b2b81314648bd9ef01fd | [
"BSD-3-Clause"
] | 146 | 2020-12-20T13:52:37.000Z | 2022-03-21T06:48:08.000Z | basxconnect/core/context_processors.py | basxsoftwareassociation/basxconnect | f7edf62ab3f61419ceb8b2b81314648bd9ef01fd | [
"BSD-3-Clause"
] | 6 | 2021-03-16T04:45:15.000Z | 2022-02-26T22:06:34.000Z | from django.utils.html import mark_safe
from dynamic_preferences.registries import global_preferences_registry
from basxconnect.core.views.person.search_person_view import searchbar
| 31.266667 | 88 | 0.727079 | from django.utils.html import mark_safe
from dynamic_preferences.registries import global_preferences_registry
from basxconnect.core.views.person.search_person_view import searchbar
def basxconnect_core(request):
return {
"PLATFORMNAME": mark_safe('basx <span style="font-weight: 600">Connect</span>'),
"COMPANYNAME": global_preferences_registry.manager()[
"general__organizationname"
],
"SEARCHBAR": searchbar,
}
| 262 | 0 | 23 |
4db81e3c6d1571cacdb07378ece06d15cbd81a75 | 1,284 | py | Python | cookbook/c09/p18_define_classes.py | itpubs/python3-cookbook | 140f5e4cc0416b9674edca7f4c901b1f58fc1415 | [
"Apache-2.0"
] | 3 | 2018-09-19T06:44:13.000Z | 2019-03-24T10:07:07.000Z | cookbook/c09/p18_define_classes.py | itpubs/python3-cookbook | 140f5e4cc0416b9674edca7f4c901b1f58fc1415 | [
"Apache-2.0"
] | 2 | 2020-09-19T17:10:23.000Z | 2020-10-17T16:43:52.000Z | cookbook/c09/p18_define_classes.py | itpubs/python3-cookbook | 140f5e4cc0416b9674edca7f4c901b1f58fc1415 | [
"Apache-2.0"
] | 1 | 2020-12-22T06:33:18.000Z | 2020-12-22T06:33:18.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 以编程方式定义类
Desc :
"""
# stock.py
# Example of making a class manually from parts
# Methods
cls_dict = {
'__init__': __init__,
'cost': cost,
}
# Make a class
import types
Stock = types.new_class('Stock', (), {}, lambda ns: ns.update(cls_dict))
Stock.__module__ = __name__
import operator
import types
import sys
| 22.137931 | 76 | 0.646417 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 以编程方式定义类
Desc :
"""
# stock.py
# Example of making a class manually from parts
# Methods
def __init__(self, name, shares, price):
self.name = name
self.shares = shares
self.price = price
def cost(self):
return self.shares * self.price
cls_dict = {
'__init__': __init__,
'cost': cost,
}
# Make a class
import types
Stock = types.new_class('Stock', (), {}, lambda ns: ns.update(cls_dict))
Stock.__module__ = __name__
import operator
import types
import sys
def named_tuple(classname, fieldnames):
# Populate a dictionary of field property accessors
cls_dict = {name: property(operator.itemgetter(n))
for n, name in enumerate(fieldnames)}
# Make a __new__ function and add to the class dict
def __new__(cls, *args):
if len(args) != len(fieldnames):
raise TypeError('Expected {} arguments'.format(len(fieldnames)))
return tuple.__new__(cls, args)
cls_dict['__new__'] = __new__
# Make the class
cls = types.new_class(classname, (tuple,), {},
lambda ns: ns.update(cls_dict))
# Set the module to that of the caller
cls.__module__ = sys._getframe(1).f_globals['__name__']
return cls
| 830 | 0 | 68 |
591c127b1cffb54d8ffbf28a50de3b5af18a317c | 3,478 | py | Python | ai_transformersx/examples/tasks/news_classification/general_models.py | aicanhelp/ai-transformers | fa30031fa7360ee6d4fd3d016a3c81a23cfe8af1 | [
"MIT"
] | 1 | 2020-08-03T12:59:20.000Z | 2020-08-03T12:59:20.000Z | transformersx/examples/tasks/news_classification/general_models.py | aicanhelp/ai-transformers | fa30031fa7360ee6d4fd3d016a3c81a23cfe8af1 | [
"MIT"
] | null | null | null | transformersx/examples/tasks/news_classification/general_models.py | aicanhelp/ai-transformers | fa30031fa7360ee6d4fd3d016a3c81a23cfe8af1 | [
"MIT"
] | null | null | null | from torch import nn
import torch
import torch.nn.functional as F
| 39.522727 | 104 | 0.648361 | from torch import nn
import torch
import torch.nn.functional as F
class NewsClassification_WordAVGModel(nn.Module):
def __init__(self, vocab_size, embedding_dim, output_dim, dropout=0.2, pad_idx=0):
# 初始化参数,
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)
self.fc = nn.Linear(embedding_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text):
# embedded.shape = (batch_size,seq,embed_size)
embedded = self.dropout(self.embedding(text))
pooled = F.avg_pool2d(embedded, (embedded.shape[1], 1)).squeeze(1)
# [batch size, embedding_dim] 把单词长度的维度压扁为1,并降维
return self.fc(pooled)
# (batch size,output_dim)
class NewsClassfiction_CNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, num_filter,
filter_sizes, output_dim, dropout=0.2, pad_idx=0):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)
self.convs = nn.ModuleList([
nn.Conv2d(in_channels=1, out_channels=num_filter,
kernel_size=(fs, embedding_dim))
for fs in filter_sizes
])
# in_channels:输入的channel,文字都是1
# out_channels:输出的channel维度
# fs:每次滑动窗口计算用到几个单词,相当于n-gram中的n
# for fs in filter_sizes用好几个卷积模型最后concate起来看效果。
self.fc = nn.Linear(len(filter_sizes) * num_filter, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text):
embedded = self.dropout(self.embedding(text)) # [batch size, sent len, emb dim]
embedded = embedded.unsqueeze(1) # [batch size, 1, sent len, emb dim]
# 升维是为了和nn.Conv2d的输入维度吻合,把channel列升维。
conved = [F.relu(conv(embedded)).squeeze(3) for conv in self.convs]
# conved = [batch size, num_filter, sent len - filter_sizes+1]
# 有几个filter_sizes就有几个conved
pooled = [F.max_pool1d(conv, conv.shape[2]).squeeze(2) for conv in conved] # [batch,num_filter]
cat = self.dropout(torch.cat(pooled, dim=1))
# cat = [batch size, num_filter * len(filter_sizes)]
# 把 len(filter_sizes)个卷积模型concate起来传到全连接层。
return self.fc(cat)
class NewsClassification_RNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim,
n_layers=2, bidirectional=True, dropout=0.2, pad_idx=0):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)
self.rnn = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers, batch_first=True,
bidirectional=bidirectional)
self.fc = nn.Linear(hidden_dim * 2, output_dim)
# 这里hidden_dim乘以2是因为是双向,需要拼接两个方向,跟n_layers的层数无关。
self.dropout = nn.Dropout(dropout)
def forward(self, text):
# text.shape=[seq_len, batch_size]
embedded = self.dropout(self.embedding(text))
# output: [batch,seq,2*hidden if bidirection else hidden]
# hidden/cell: [bidirec * n_layers, batch, hidden]
output, (hidden, cell) = self.rnn(embedded)
# concat the final forward (hidden[-2,:,:]) and backward (hidden[-1,:,:]) hidden layers
hidden = self.dropout(torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1))
# hidden = [batch size, hid dim * num directions],
return self.fc(hidden.squeeze(0)) # 在接
| 3,407 | 64 | 228 |
4089c1ab0ac1c81610c3364af753f1b0b1367400 | 2,015 | py | Python | bfebench/protocols/strategy.py | MatthiasLohr/bfebench | baca2e18a9c24282ecda99dccfd134fab4c223b3 | [
"Apache-2.0"
] | null | null | null | bfebench/protocols/strategy.py | MatthiasLohr/bfebench | baca2e18a9c24282ecda99dccfd134fab4c223b3 | [
"Apache-2.0"
] | null | null | null | bfebench/protocols/strategy.py | MatthiasLohr/bfebench | baca2e18a9c24282ecda99dccfd134fab4c223b3 | [
"Apache-2.0"
] | null | null | null | # This file is part of the Blockchain-based Fair Exchange Benchmark Tool
# https://gitlab.com/MatthiasLohr/bfebench
#
# Copyright 2021 Matthias Lohr <mail@mlohr.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Generic, TypeVar
from eth_typing.evm import ChecksumAddress
from ..environment import Environment
from ..utils.json_stream import JsonObjectSocketStream
from .protocol import Protocol
T = TypeVar("T", bound=Protocol)
| 28.785714 | 108 | 0.704715 | # This file is part of the Blockchain-based Fair Exchange Benchmark Tool
# https://gitlab.com/MatthiasLohr/bfebench
#
# Copyright 2021 Matthias Lohr <mail@mlohr.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Generic, TypeVar
from eth_typing.evm import ChecksumAddress
from ..environment import Environment
from ..utils.json_stream import JsonObjectSocketStream
from .protocol import Protocol
T = TypeVar("T", bound=Protocol)
class Strategy(Generic[T]):
def __init__(self, protocol: T) -> None:
self._protocol = protocol
self._logger = logging.getLogger("%s.%s" % (self.__class__.__module__, self.__class__.__qualname__))
@property
def protocol(self) -> T:
return self._protocol
@property
def logger(self) -> logging.Logger:
return self._logger
def run(
self,
environment: Environment,
p2p_stream: JsonObjectSocketStream,
opposite_address: ChecksumAddress,
) -> None:
raise NotImplementedError()
class SellerStrategy(Strategy[T]):
def run(
self,
environment: Environment,
p2p_stream: JsonObjectSocketStream,
opposite_address: ChecksumAddress,
) -> None:
raise NotImplementedError()
class BuyerStrategy(Strategy[T]):
def run(
self,
environment: Environment,
p2p_stream: JsonObjectSocketStream,
opposite_address: ChecksumAddress,
) -> None:
raise NotImplementedError()
| 756 | 166 | 121 |
6b8b15fda65791c8df661e8e899dda386fbb3084 | 7,020 | py | Python | iris_mt_scratch/sandbox/transfer_function/TSTFTarray.py | simpeg-research/iris-mt-scratch | ea458f253071db513fd0731118a2a7452a725944 | [
"MIT"
] | null | null | null | iris_mt_scratch/sandbox/transfer_function/TSTFTarray.py | simpeg-research/iris-mt-scratch | ea458f253071db513fd0731118a2a7452a725944 | [
"MIT"
] | 19 | 2020-12-23T17:55:48.000Z | 2021-06-24T21:01:05.000Z | iris_mt_scratch/sandbox/transfer_function/TSTFTarray.py | simpeg-research/iris-mt-scratch | ea458f253071db513fd0731118a2a7452a725944 | [
"MIT"
] | null | null | null | """
based on Gary's
TSTFTarray.m in
iris_mt_scratch/egbert_codes-20210121T193218Z-001/egbert_codes/matlabPrototype_10-13-20/TF/classes
"""
class TSTFTArray(object):
"""
class to support creating FC arrays from STFT objects stored as mat files
% class to support creating FC arrays from STFT objects stored as mat files
% simplified -- not a subclass of TFC -- cannot be used for array
% processing without additional features/changes. Just intended
% for SS and RR processing
"""
def __init__(self):
"""
ivar: ArrayInfo % cell array containing list of STFT file names (
created from tranmt.cfg + band setup file, array name
ivar: array: % cell array containing all STFT objects
?STFTCollection()?
ivar: FCdir = root path for STFT files --
EstimationBands % array of dimension (nBands, 3) giving decimation
levels and band limits, as returned by function ReadBScfg.
ivar: Header % TArrayHeader object -- mostly just an array of site
headers
ivar: iBand % current band number
ivar: OneBand % data for one band -- data for one band -- a TFC1Dec
object, containing all FCs for all sites / runs for band iBand,
merged and aligned
ivar: T % period for center of current band--could be dependent
"""
self.array_info = None
self.array = None
self.FCdir = None
self.estimation_bands = None
self.header = None
self.T
@property
def load_stft_arrays(self):
"""
initialize and load all STFT objects - - for now no checks on consistency
Note that the selection of what "runs" or FCFiles are going to be used
is actually controlled by the
Returns
-------
"""
self.array = cell(length(obj.ArrayInfo.Files), 1)
#read in estimation bands
self.EstimationBands = ReadBScfg(self.ArrayInfo.bandFile);
#load all STFT files for all sites / runs
SiteHeaders[self.number_of_sites] = TSiteHeader();
for j in range(self.number_of_sites):
nFCfiles = length(self.array_info.Files{j}.FCfiles);
#this just creates an array of empty TSTFT objects of length
# nFCfiles - - one for each run
self.Array{j}(nFCfiles) = TSTFT();
for k in range(nFCfiles):
#full pathname of file to load
cfile = [self.FCdir obj.ArrayInfo.Files{j}.FCfiles{k}];
load(cfile, '-mat', 'FTobj')
self.Array{j}(k) = FTobj;
if k == 1:
SiteHeaders[j] = self.Array{j}(k).Header;
else:
header_ok = consistent_headers(SiteHeaders[j],
self.Array{j}(k).Header)
if not header_ok:
print('Headers for two runs are not consistent')
self.Header = TArrayHeader(self.ArrayInfo.ArrayName, SiteHeaders);
# probably should carry a Header for this object;
#Also should compare headers to make sure that runs for a given site are
#consistent, and that sites are consistent
#(use same Windows, start times, and also overlap in time?)
def extractFCband(self, i_band, AllSites=None):
"""
Usage: T = extractFCband(obj, ib);
loads FCs for full array for frequency band ib into TSTFTarray object,
storing in OneBand.
Parameters
----------
self
i_band
Returns: T - 1 / f_center where f_center is center frequency of band
-------
"""
self.iBand = ib; # could add some error checking
band = self.estimation_bands[ib,:];
AllSites = self.number_of_sites * [TFC1Dec()]
for j in range(self.number_of_sites):
#first extract TFC1Dec objects defined by band for one site
nFCfiles = length(self.array[j]);
AllRuns = nFCfiles * [TFC1Dec()]
for k = range(nFCfiles):
AllRuns[k] = self.array[j][k].FC(band[0]).extractBand(band[1:2])
# make sure all objects have ordered segments,
# complete block
AllRuns(k).timeSort;
AllRuns(k).reblock;
# merge all runsfor site j
AllSites[j] = AllRuns.mergeRuns;
#merge all sites into a single TFC1Dec object
self.OneBand = AllSites.mergeSites;
# nominal period for estimation band: 1 / f_center
T = 1. / mean(self.OneBand.freqs);
return T
def get_mt_tf_data(self, transfer_function_header):
"""
Usage: [H,E] = obj.getMTTFdata(TFHD);
[H,E,R] = obj.getMTTF(TFHD);
extracts arrays needed for estimation of MT transfer functions:
H(NSeg,2) == magnetic field FCs
E(NSeg,Nch) = electric field (and optionally vertical magnetic) field
FCs; E(:,1) is Hz if this is returned;
R(NSeg,2) = reference fields for RR estimation (optional)
TFHD is TFHeader object, whioch defines local and (optionally) remote
sites, and channels at these sites that will be used for processing.
TFHeader.ArrayHeader2TFHeader creates this header from TArrayHeader,
using default assumptions about channels (i.e., use horizontal mags
at local as input channels, at remote for reference, etc.
Parameters
----------
tfhd
Returns
-------
"""
# find local site numbrt
LocalInd = find(strcmp(transfer_function_header.LocalSite.SiteID,
self.Header.SiteIDs));
Hind = transfer_function_header.ChIn + self.Header.ih(LocalInd)-1;
Eind = transfer_function_header.ChOut + self.Header.ih(LocalInd)-1;
H = self.OneBand.FC(Hind,:,:);
[nch, nfc, nseg] = size(H);
H = reshape(H, nch, nfc * nseg).';
E = obj.OneBand.FC(Eind,:,:);
[nch, nfc, nseg] = size(E);
E = reshape(E, nch, nfc * nseg).';
if strcmp(TFHD.Processing, 'RR'):
#find reference site number if a character string is provide
RemoteInd = find(strcmp(TFHD.RemoteSite.SiteID,
self.Header.SiteIDs));
Rind = transfer_function_header.ChRef + self.Header.ih(RemoteInd) -1
R = self.OneBand.FC(Rind,:);
[nch, nfc, nseg] = size(R);
R = reshape(R, nch, nfc * nseg).';
return H,E,R
| 39.886364 | 98 | 0.595014 | """
based on Gary's
TSTFTarray.m in
iris_mt_scratch/egbert_codes-20210121T193218Z-001/egbert_codes/matlabPrototype_10-13-20/TF/classes
"""
def consistent_headers(header1, header2):
#do some checking
return True
class TSTFTArray(object):
"""
class to support creating FC arrays from STFT objects stored as mat files
% class to support creating FC arrays from STFT objects stored as mat files
% simplified -- not a subclass of TFC -- cannot be used for array
% processing without additional features/changes. Just intended
% for SS and RR processing
"""
def __init__(self):
"""
ivar: ArrayInfo % cell array containing list of STFT file names (
created from tranmt.cfg + band setup file, array name
ivar: array: % cell array containing all STFT objects
?STFTCollection()?
ivar: FCdir = root path for STFT files --
EstimationBands % array of dimension (nBands, 3) giving decimation
levels and band limits, as returned by function ReadBScfg.
ivar: Header % TArrayHeader object -- mostly just an array of site
headers
ivar: iBand % current band number
ivar: OneBand % data for one band -- data for one band -- a TFC1Dec
object, containing all FCs for all sites / runs for band iBand,
merged and aligned
ivar: T % period for center of current band--could be dependent
"""
self.array_info = None
self.array = None
self.FCdir = None
self.estimation_bands = None
self.header = None
self.T
@property
def number_of_sites(self):
return len(self.array)
def number_of_bands(self):
return len(self.estimation_bands)
def initialize_from_config(self, tranmt_config_file, FCdir=None):
if FCdir is not None:
self.FCdir = FCdir
self.array_info = ReadTranMTcfg(tranmt_config_file)
def load_stft_arrays(self):
"""
initialize and load all STFT objects - - for now no checks on consistency
Note that the selection of what "runs" or FCFiles are going to be used
is actually controlled by the
Returns
-------
"""
self.array = cell(length(obj.ArrayInfo.Files), 1)
#read in estimation bands
self.EstimationBands = ReadBScfg(self.ArrayInfo.bandFile);
#load all STFT files for all sites / runs
SiteHeaders[self.number_of_sites] = TSiteHeader();
for j in range(self.number_of_sites):
nFCfiles = length(self.array_info.Files{j}.FCfiles);
#this just creates an array of empty TSTFT objects of length
# nFCfiles - - one for each run
self.Array{j}(nFCfiles) = TSTFT();
for k in range(nFCfiles):
#full pathname of file to load
cfile = [self.FCdir obj.ArrayInfo.Files{j}.FCfiles{k}];
load(cfile, '-mat', 'FTobj')
self.Array{j}(k) = FTobj;
if k == 1:
SiteHeaders[j] = self.Array{j}(k).Header;
else:
header_ok = consistent_headers(SiteHeaders[j],
self.Array{j}(k).Header)
if not header_ok:
print('Headers for two runs are not consistent')
self.Header = TArrayHeader(self.ArrayInfo.ArrayName, SiteHeaders);
# probably should carry a Header for this object;
#Also should compare headers to make sure that runs for a given site are
#consistent, and that sites are consistent
#(use same Windows, start times, and also overlap in time?)
def extractFCband(self, i_band, AllSites=None):
"""
Usage: T = extractFCband(obj, ib);
loads FCs for full array for frequency band ib into TSTFTarray object,
storing in OneBand.
Parameters
----------
self
i_band
Returns: T - 1 / f_center where f_center is center frequency of band
-------
"""
self.iBand = ib; # could add some error checking
band = self.estimation_bands[ib,:];
AllSites = self.number_of_sites * [TFC1Dec()]
for j in range(self.number_of_sites):
#first extract TFC1Dec objects defined by band for one site
nFCfiles = length(self.array[j]);
AllRuns = nFCfiles * [TFC1Dec()]
for k = range(nFCfiles):
AllRuns[k] = self.array[j][k].FC(band[0]).extractBand(band[1:2])
# make sure all objects have ordered segments,
# complete block
AllRuns(k).timeSort;
AllRuns(k).reblock;
# merge all runsfor site j
AllSites[j] = AllRuns.mergeRuns;
#merge all sites into a single TFC1Dec object
self.OneBand = AllSites.mergeSites;
# nominal period for estimation band: 1 / f_center
T = 1. / mean(self.OneBand.freqs);
return T
def get_mt_tf_data(self, transfer_function_header):
"""
Usage: [H,E] = obj.getMTTFdata(TFHD);
[H,E,R] = obj.getMTTF(TFHD);
extracts arrays needed for estimation of MT transfer functions:
H(NSeg,2) == magnetic field FCs
E(NSeg,Nch) = electric field (and optionally vertical magnetic) field
FCs; E(:,1) is Hz if this is returned;
R(NSeg,2) = reference fields for RR estimation (optional)
TFHD is TFHeader object, whioch defines local and (optionally) remote
sites, and channels at these sites that will be used for processing.
TFHeader.ArrayHeader2TFHeader creates this header from TArrayHeader,
using default assumptions about channels (i.e., use horizontal mags
at local as input channels, at remote for reference, etc.
Parameters
----------
tfhd
Returns
-------
"""
# find local site numbrt
LocalInd = find(strcmp(transfer_function_header.LocalSite.SiteID,
self.Header.SiteIDs));
Hind = transfer_function_header.ChIn + self.Header.ih(LocalInd)-1;
Eind = transfer_function_header.ChOut + self.Header.ih(LocalInd)-1;
H = self.OneBand.FC(Hind,:,:);
[nch, nfc, nseg] = size(H);
H = reshape(H, nch, nfc * nseg).';
E = obj.OneBand.FC(Eind,:,:);
[nch, nfc, nseg] = size(E);
E = reshape(E, nch, nfc * nseg).';
if strcmp(TFHD.Processing, 'RR'):
#find reference site number if a character string is provide
RemoteInd = find(strcmp(TFHD.RemoteSite.SiteID,
self.Header.SiteIDs));
Rind = transfer_function_header.ChRef + self.Header.ih(RemoteInd) -1
R = self.OneBand.FC(Rind,:);
[nch, nfc, nseg] = size(R);
R = reshape(R, nch, nfc * nseg).';
return H,E,R
| 306 | 0 | 102 |
88461a861fd799b887fa5ff02063ea8ed44aaab3 | 1,234 | py | Python | cogs/image.py | Krepy/KrepyBot | ee726735c7265b8f46d1be077e88607402fa89fc | [
"MIT"
] | 1 | 2021-02-17T21:18:38.000Z | 2021-02-17T21:18:38.000Z | cogs/image.py | Krepy/KrepyBot | ee726735c7265b8f46d1be077e88607402fa89fc | [
"MIT"
] | null | null | null | cogs/image.py | Krepy/KrepyBot | ee726735c7265b8f46d1be077e88607402fa89fc | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import random
import httpx
from lxml import html
import os
| 26.255319 | 72 | 0.662075 | import discord
from discord.ext import commands
import random
import httpx
from lxml import html
import os
class ImageModule(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.guild_only()
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name='birb')
async def birb(self, ctx):
checkSource = httpx.get("https://some-random-api.ml/img/birb")
checkPage = html.fromstring(checkSource.content)
checkPage2 = html.tostring(checkPage)
imgURL = checkPage2[12:-6]
imgURL = str(imgURL, 'utf-8')
embed = discord.Embed(title="Birb", url=imgURL, colour=0xE65858)
embed.set_image(url=imgURL)
await ctx.channel.send(embed=embed)
@commands.guild_only()
#@commands.cooldown(2, 5, commands.BucketType.user)
@commands.command(name='bird')
async def bird(self, ctx):
randomFile=random.choice(os.listdir("Bird/"))
f = discord.File(f"Bird/{randomFile}", filename=randomFile)
e = discord.Embed(title="Bird", colour=0x64C6E9)
e.set_image(url=f"attachment://{randomFile}")
await ctx.channel.send(embed=e, file=f)
def setup(bot):
bot.add_cog(ImageModule(bot))
| 754 | 326 | 46 |
c1032138496fda4fcc37963907d13d4359b251b0 | 592 | py | Python | interview/leet/761_Special_Binary_String_v3.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2020-10-12T13:33:29.000Z | 2020-10-12T13:33:29.000Z | interview/leet/761_Special_Binary_String_v3.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | null | null | null | interview/leet/761_Special_Binary_String_v3.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2016-11-09T07:28:45.000Z | 2016-11-09T07:28:45.000Z | #!/usr/bin/env python3
S = "11100010"
S = "111000111100001100"
S = '1100'
S = '1010'
S = '10'
S = "11011000"
S = ''
sol = Solution()
print(sol.makeLargestSpecial(S))
| 22.769231 | 56 | 0.472973 | #!/usr/bin/env python3
class Solution:
def makeLargestSpecial(self, S: str) -> str:
self.ix, l = 0, len(S)
def dfs():
tokens = []
while self.ix < l:
c, self.ix = S[self.ix], self.ix+1
if c == '1':
tokens.append(f'1{dfs()}0')
else:
break
return ''.join(sorted(tokens, reverse=True))
return dfs()
S = "11100010"
S = "111000111100001100"
S = '1100'
S = '1010'
S = '10'
S = "11011000"
S = ''
sol = Solution()
print(sol.makeLargestSpecial(S))
| 382 | -6 | 49 |
723d25090796ff946b4dfe2d3bb084c00e10553b | 35,816 | py | Python | src/ase2sprkkr/common/grammar_types.py | ase2sprkkr/ase2sprkkr | 5e04f54365e4ab65d97bd11d573b078674548a59 | [
"MIT"
] | 1 | 2022-03-14T22:56:11.000Z | 2022-03-14T22:56:11.000Z | src/ase2sprkkr/common/grammar_types.py | ase2sprkkr/ase2sprkkr | 5e04f54365e4ab65d97bd11d573b078674548a59 | [
"MIT"
] | 1 | 2022-03-10T09:08:50.000Z | 2022-03-10T09:08:50.000Z | src/ase2sprkkr/common/grammar_types.py | ase2sprkkr/ase2sprkkr | 5e04f54365e4ab65d97bd11d573b078674548a59 | [
"MIT"
] | null | null | null | """
Classes, that represents various value types that can appear in the configuration and problem definitionfiles.
Each grammar type can both parse string containing a value of a given type, and to create the string containing a given value.
"""
import pyparsing as pp
import io
import inspect
from pyparsing import Word, Suppress
import itertools
import numpy as np
from collections import namedtuple
from collections.abc import Hashable
from .misc import OrderedDict, cached_property, cache
ppc = pp.pyparsing_common
from .grammar import generate_grammar, separator as separator_grammar, \
delimitedList, line_end, optional_quote,\
replace_whitechars, White
from ase.units import Rydberg
import copy
import datetime
from typing import Union, Any, Callable
context = generate_grammar()
context.__enter__()
#it ensures that the generated grammar will have the correct whitespaces
class BaseType:
""" Base class for definition of configuration option types
A type without value (e.g. Separator) are just syntactical
elements in the potentials file, that do not carry an information.
Such elements do not yields (name, value) pair during parsing the file.
Do not confuse this with BaseType.missing_value functionality.
Missing_value is just the opposite: missing_value can be ommited in the file
(or even the absence of the name in the file carry the information, that
the Flag is False), but the name-value tuple of such Type is present
in the parse result. On the other hand, has_value = False is in the file, but
not in the result.
"""
has_value = True
""" Default value for BaseValueDefinition.name_in_grammar.
Some types (e.g. Tables) commonly have no name (are identified
by its position in the potential file.
"""
name_in_grammar = True
""" Default value for the given type. It can be overriden in the constructor (or just by setting
the instantiated object attribute) """
default_value = None
""" Deafault type for creating numpy arrays (e.g. by Table) is object - to be redefined
in the descendatns
"""
numpy_type = object
def __init__(self, prefix:Union[str,None]=None, postfix:Union[str,None]=None,
format:str='', default_value:Any=None,
condition:Union[Callable[[Any], Union[bool,str]],None]=None,
after_convert:Union[Callable[[Any], Any],None]=None):
"""
Create the object.
Parameters
----------
prefix
The string, that will be printed before the value
postfix
The string, that will be printed after the value
format
The (python) format string, that will be used for printing the value.
The format is passed as format argument to ``str.format`` routine.
default_value
The default value of the options of this type. ``None`` means no default value.
condition
Function, that check the validity of the value. It should return ``True`` for a valid
value, and ``False`` or string for invalid. The string is interpreted as an error message
that explains the invalidity of the value.
after_convert
Function, that - if it is given - is applied to the (entered or parsed) value. The function
is applied on the result of the
:meth:`convert<ase2sprkkr.common.grammar_types.BaseType.convert>` method
"""
self.prefix = prefix
""" The string, that will be printed before the value """
self.postfix = postfix
""" The string, that will be printed after the value """
self.format = format
""" The (python) format string, that will be used for printing the value.
The format is passed as format argument to ``str.format`` routine. """
self.condition = condition
if after_convert is not None:
self.convert = lambda v: \
after_convert(self, self.__class__.convert(self, v))
""" Some subclasses has default_value defined via read-only property. """
if default_value is not None:
self.default_value = self.convert(default_value)
@cache
def grammar(self, param_name=False):
""" Return a pyparsing grammar for the type """
grammar = self._grammar
if not isinstance(grammar, pp.ParserElement):
grammar = grammar(param_name)
if self.prefix or self.postfix:
with generate_grammar():
if self.prefix:
grammar = pp.Literal(self.prefix).suppress().setName(self.prefix) + grammar
if self.postfix:
grammar += pp.Literal(self.postfix).suppress().setName(self.postfix)
grammar = self.transform_grammar(grammar, param_name)
if self.has_value:
grammar.addParseAction(validate)
grammar.grammar_type = self
return grammar
def grammar_name(self):
""" Human readable expression of the grammar. By default,
this is what is set by grammar.setName, however, sometimes
is desirable to set even shorter string """
return str(self.grammar)
def transform_grammar(self, grammar, param_name=False):
""" The chance for the resulting class to alter the resulting prefixed grammar """
return grammar
def missing_value(self):
""" Is the configuraion value a flag? I.e. can be =<value> ommited
in the configuration
Return
------
can_be_ommited : bool
Is an ommision of the value possible, e.g. the option is given as Flag (only by name of the option)
default_value
The value used if the value is ommitted
do_not_output_value
The value, with which the variable should not be outputed at all (e.g. False for a flag)
"""
return False, None, None
def validate(self, value, param_name='<Unknown>', parse_check=False):
""" Validate either the pyparsing result or a user given value
Parameters
---------
value : mixed
Value to be validated
param_name : str or callable
Parameter name to be used in possible throwed exception (Optional)
If it is callable, it should be a function that returns the param_name
"""
try:
err = self._validate(value, parse_check)
except ValueError as err:
self._valueError(value, err, param_name)
if err is not True:
self._valueError(value, err, param_name)
if self.condition:
err = self.condition(value)
if err is not True:
self._valueError(value, err, param_name)
return True
def _validate(self, value, parse_check=False):
""" Return error message if the value is not valid """
return True
def read(self, token, parameterName='<Unknown>'):
""" Transform pyparsing token to a validated value """
self.validate(val)
return val
def convert(self, value):
""" Convert a value from user to a "cannonical form" """
return value
def enrich(self, option):
""" Some types can add properties to the options that have
the type, e.g. see Sequence.enrich, which adds the ability to
access the items of the sequence using [] """
pass
class Unsigned(BaseType):
""" Unsigned integer (zero is possible) """
_grammar = replace_whitechars(ppc.integer).setParseAction(lambda x:int(x[0]))
numpy_type = int
Unsigned.I = Unsigned()
class Integer(BaseType):
""" Signed integer """
_grammar = replace_whitechars(ppc.signed_integer).setParseAction(lambda x:int(x[0]))
numpy_type = int
Integer.I = Integer()
class Bool(BaseType):
""" A bool type, whose value is represented by a letter (T or F) """
_grammar = (pp.CaselessKeyword('T') | pp.CaselessKeyword('F')).setParseAction( lambda x: x[0] == 'T' )
numpy_type = bool
Bool.I = Bool()
class Real(BaseType):
""" A real value """
_grammar = replace_whitechars(ppc.fnumber).setParseAction(lambda x: float(x[0]))
numpy_type = float
Real.I = Real()
class Date(BaseType):
""" A date value of the form 'DD.MM.YYYY' """
_grammar = pp.Regex(r'(?P<d>\d{2}).(?P<m>\d{2}).(?P<y>\d{4})').setParseAction(lambda x: datetime.date(int(x['y']), int(x['m']), int(x['d'])))
Date.I = Date()
class BaseRealWithUnits(BaseType):
""" The base class for float value, which can have units append.
The value is converted automatically to the base units.
"""
grammar_cache = {}
""" A grammar for units is cached """
numpy_type = float
class RealWithUnits(BaseRealWithUnits):
""" A float value with user-defined units """
class Energy(BaseRealWithUnits):
""" The grammar type for energy. The default units are Rydberg, one can specify eV. """
units = {
'Ry' : 1.,
'eV' : 1. / Rydberg,
None : 1.,
}
""" The allowed units and their conversion factors """
Energy.I = Energy()
class BaseString(BaseType):
""" Base type for string grammar types """
class String(BaseString):
""" Just a string (without whitespaces and few special chars) """
_grammar = Word(pp.printables,excludeChars=",;{}").setParseAction(lambda x:x[0])
String.I = String()
class QString(BaseString):
""" Either a quoted string, or just a word (without whitespaces or special chars) """
_grammar = (pp.Word(pp.printables, excludeChars=",;{}") or pp.QuotedString("'")).setParseAction(lambda x:x[0])
QString.I = String()
class LineString(BaseString):
""" A string, that takes all up to the end of the line """
_grammar = pp.SkipTo(pp.LineEnd() | pp.StringEnd())
LineString.I = LineString()
class Keyword(BaseType):
"""
A value, that can take values from the predefined set of strings.
"""
def DefKeyword(default, *others, **kwargs):
"""
A value, that can take values from the predefined set of strings, the first one is the default value.
"""
return Keyword(default, *others, default_value=default, **kwargs)
class Flag(BaseType):
"""
A boolean value, which is True, if a name of the value appears in the input file.
"""
_grammar = pp.Empty().setParseAction(lambda x: True)
Flag.I = Flag()
normalize_type_map = {
np.int64 : int,
np.float64: float,
np.bool_: bool
}
""" Mapping of alternative types to the 'canonical ones'. """
def normalize_type(type):
""" Return the 'canonical type' for a given type
I.e. it maps numpy internal types to standard python ones
doctest:
>>> normalize_type(np.int64)
<class 'int'>
"""
return normalize_type_map.get(type, type)
type_from_type_map = OrderedDict([
(float, Real.I),
(int , Integer.I),
(bool, Bool.I),
(str , String.I)]
)
""" The standard grammar_types for python types.
The value type can be given by a standard python type, this map maps the
python type for the appropriate grammar_type class.
"""
def format_for_type(format, type):
"""
Returns the format appropriate to the given type
Parameters
----------
format: str or dict
If it is str, just return it.
Dict should has the form { type : format_for_the_type } + { None : default_format }
"""
if isinstance(format, dict):
if type in format:
return format[type]
return format[None]
return format
def type_from_type(type, format='', format_all=False):
""" Guess and return the grammar element (BaseType class descendatnt) from a python type. E.g. int => Integer.
The given format can be optionally set to the returned grammar element.
Parameters
----------
type: A python type or BaseType
A type to be converted to a grammar type (BaseType class descendant)
format: str or dict
The format to be applied to the resulting class. If dict is given, see 'format_for_type'
for the way how the format is determined
format_all: boolean
If False (default), the format is not applied, if instance of BaseType is given as
the type parameter. Otherwise, a copy of the input type with the applied format is returned
"""
if isinstance(type, Hashable) and type in type_from_type_map:
type = normalize_type(type)
format = format_for_type(format, type)
type = type_from_type_map[type]
if format:
type = type.copy()
type.format = format
return type
elif format_all:
type = type.copy()
type.format = format_for_type(format, normalize_type(type.numpy_type))
return type
class Array(BaseType):
""" A (numpy) array of values of one type """
delimiter=White(' \t').suppress()
delimiter_str = ' '
class SetOf(Array):
""" Set of values of the same type. E.g. {1,2,3} """
delimiter = pp.Suppress(pp.Literal(',') | pp.Literal(';') | White(' \t')).setName('[,; ]')
delimiter_str = ','
type_from_set_map = OrderedDict([
(float, SetOf(float)),
(int , SetOf(int)),
])
""" Map the python type of a collection member to a grammar type of the collection.
Only canonical types are expected, see :meth:`ase2sprkkr.common.grammar_types.normalize_type`
"""
def type_from_value(value):
""" Gues the grammar type from a python value.
..doctest::
>>> type_from_value(2)
<Integer>
>>> type_from_value(2.0)
<Real>
"""
if isinstance(value, (list, np.ndarray)):
return type_from_set_map[normalize_type(value[0].__class__)] if len(value) else Integer.I
if isinstance(value, str):
try:
String._grammar.parseString(value, True)
return String.I
except Exception:
return QString.I
type = type_from_type(value.__class__)
if type is value.__class__:
raise ValueError('Cannot determine grammar type from value {value}')
return type.__class__(default_value = value)
def type_from_default_value(value, format='', format_all=False):
""" Guess the grammar type from a value, that will become the default value of the grammar type.
It has to create a new object instance, as it has to set the default
value property of the returned object. An (output) format can be applied to the
resulting grammar type
Grammar types passed as types are left as is, unless format_all flag is set.
"""
if inspect.isclass(value) or isinstance(value, BaseType):
return type_from_type(value, format=format, format_all=format_all)
ptype = normalize_type(value.__class__)
gtype = type_from_type(value.__class__).__class__
return gtype(default_value = value, format=format_for_type(format, ptype))
class BaseMixed(BaseType):
"""
A variant type - it can hold "anything".
"""
type = None
""" The types, that the value can hold. To be redefined in the descendants. """
string_type = None
""" Type of string grammar_type to be used. To be redefined in the descendants. """
@classmethod
def get_type(self, value):
""" Return the type of the value """
return self.string_type if isinstance(value, str) else type_from_value(value)
class Mixed(BaseMixed):
""" A variant value to be used in input files (in unknown - custom - options) """
string_type = QString.I
""" Input files use quoted strings. """
types = [
Energy.I,
Real.I,
Integer.I,
type_from_set_map[int],
type_from_set_map[float],
QString.I,
Flag.I,
]
Mixed.I = Mixed()
class PotMixed(BaseMixed):
""" A variant value to be used in potential files (in unknown - custom - options) """
string_type = LineString.I
""" Potential files use line strings. """
types = [
Energy.I,
Real.I,
Integer.I,
Bool.I,
type_from_set_map[int],
type_from_set_map[float],
LineString.I,
]
PotMixed.I = PotMixed()
class Separator(BaseType):
""" Special class for ``****`` separator inside a section """
_grammar = separator_grammar.copy().setParseAction(lambda x: [None])
has_value = False
Separator.I = Separator()
class Sequence(BaseType):
""" A sequence of values of given types """
class Table(BaseType):
"""
Table, optionaly with named columns, e.g.
::text
IQ IREFQ IMQ NOQ ITOQ CONC
1 1 1 1 1 1.000
2 2 2 1 2 1.000
"""
name_in_grammar = False
@cached_property
def zero_data(self, length):
""" Return array of zeros with the given number of rows and
with the dtype of the table
"""
dtype = self.numpy_type
if isinstance(dtype, list):
return np.zeros(length, dtype)
else:
return np.zeros((length, self.number_of_collumns()), dtype)
integer = Integer.I
""" A standard signed integer grammar type instance """
unsigned = Unsigned.I
""" A standard unsigned integer grammar type instance """
boolean = Bool.I
""" A standard bool grammar type instance (for potential files) """
flag = Flag.I
""" A standard bool grammar type instance (for input files) """
real = Real.I
""" A standard real grammar type instance """
string = String.I
""" A standard string grammar type instance """
qstring = QString.I
""" A standard quoted string grammar type instance (for input files) """
line_string = LineString.I
""" A standard line string grammar type instance (for potential files) """
mixed = Mixed.I
""" A standard variant grammar type instance (for input files) """
pot_mixed = PotMixed.I
""" A standard variant grammar type instance (for potential files) """
separator = Separator.I
""" A standard separator line grammar type instance (for potential files) """
energy = Energy.I
""" A standard energy float value type instance (for potential files) """
context.__exit__(None, None, None)
del context
| 32.888889 | 156 | 0.646666 | """
Classes, that represents various value types that can appear in the configuration and problem definitionfiles.
Each grammar type can both parse string containing a value of a given type, and to create the string containing a given value.
"""
import pyparsing as pp
import io
import inspect
from pyparsing import Word, Suppress
import itertools
import numpy as np
from collections import namedtuple
from collections.abc import Hashable
from .misc import OrderedDict, cached_property, cache
ppc = pp.pyparsing_common
from .grammar import generate_grammar, separator as separator_grammar, \
delimitedList, line_end, optional_quote,\
replace_whitechars, White
from ase.units import Rydberg
import copy
import datetime
from typing import Union, Any, Callable
context = generate_grammar()
context.__enter__()
#it ensures that the generated grammar will have the correct whitespaces
class BaseType:
""" Base class for definition of configuration option types
A type without value (e.g. Separator) are just syntactical
elements in the potentials file, that do not carry an information.
Such elements do not yields (name, value) pair during parsing the file.
Do not confuse this with BaseType.missing_value functionality.
Missing_value is just the opposite: missing_value can be ommited in the file
(or even the absence of the name in the file carry the information, that
the Flag is False), but the name-value tuple of such Type is present
in the parse result. On the other hand, has_value = False is in the file, but
not in the result.
"""
has_value = True
""" Default value for BaseValueDefinition.name_in_grammar.
Some types (e.g. Tables) commonly have no name (are identified
by its position in the potential file.
"""
name_in_grammar = True
""" Default value for the given type. It can be overriden in the constructor (or just by setting
the instantiated object attribute) """
default_value = None
""" Deafault type for creating numpy arrays (e.g. by Table) is object - to be redefined
in the descendatns
"""
numpy_type = object
def __init__(self, prefix:Union[str,None]=None, postfix:Union[str,None]=None,
format:str='', default_value:Any=None,
condition:Union[Callable[[Any], Union[bool,str]],None]=None,
after_convert:Union[Callable[[Any], Any],None]=None):
"""
Create the object.
Parameters
----------
prefix
The string, that will be printed before the value
postfix
The string, that will be printed after the value
format
The (python) format string, that will be used for printing the value.
The format is passed as format argument to ``str.format`` routine.
default_value
The default value of the options of this type. ``None`` means no default value.
condition
Function, that check the validity of the value. It should return ``True`` for a valid
value, and ``False`` or string for invalid. The string is interpreted as an error message
that explains the invalidity of the value.
after_convert
Function, that - if it is given - is applied to the (entered or parsed) value. The function
is applied on the result of the
:meth:`convert<ase2sprkkr.common.grammar_types.BaseType.convert>` method
"""
self.prefix = prefix
""" The string, that will be printed before the value """
self.postfix = postfix
""" The string, that will be printed after the value """
self.format = format
""" The (python) format string, that will be used for printing the value.
The format is passed as format argument to ``str.format`` routine. """
self.condition = condition
if after_convert is not None:
self.convert = lambda v: \
after_convert(self, self.__class__.convert(self, v))
""" Some subclasses has default_value defined via read-only property. """
if default_value is not None:
self.default_value = self.convert(default_value)
def __str__(self):
return self.__class__.__name__
@cache
def grammar(self, param_name=False):
""" Return a pyparsing grammar for the type """
grammar = self._grammar
if not isinstance(grammar, pp.ParserElement):
grammar = grammar(param_name)
if self.prefix or self.postfix:
with generate_grammar():
if self.prefix:
grammar = pp.Literal(self.prefix).suppress().setName(self.prefix) + grammar
if self.postfix:
grammar += pp.Literal(self.postfix).suppress().setName(self.postfix)
grammar = self.transform_grammar(grammar, param_name)
if self.has_value:
def validate(s, loc, x):
try:
out = self.validate(x[0], parse_check=True, param_name=param_name)
except ValueError as e:
raise pp.ParseException(s, loc, str(e) + '\nValidating of the parsed value failed') from e
return x
grammar.addParseAction(validate)
grammar.grammar_type = self
return grammar
def parse(self, str, whole_string=True):
return self.grammar().parseString(str, whole_string)[0]
async def parse_from_stream(self, stream, up_to, start=None, whole_string=True):
result = await stream.readuntil(up_to)
result = result[:-len(up_to)].decode('utf8')
if start:
result = start + result
return self.parse(result, whole_string)
def grammar_name(self):
""" Human readable expression of the grammar. By default,
this is what is set by grammar.setName, however, sometimes
is desirable to set even shorter string """
return str(self.grammar)
def transform_grammar(self, grammar, param_name=False):
""" The chance for the resulting class to alter the resulting prefixed grammar """
return grammar
def missing_value(self):
""" Is the configuraion value a flag? I.e. can be =<value> ommited
in the configuration
Return
------
can_be_ommited : bool
Is an ommision of the value possible, e.g. the option is given as Flag (only by name of the option)
default_value
The value used if the value is ommitted
do_not_output_value
The value, with which the variable should not be outputed at all (e.g. False for a flag)
"""
return False, None, None
def validate(self, value, param_name='<Unknown>', parse_check=False):
""" Validate either the pyparsing result or a user given value
Parameters
---------
value : mixed
Value to be validated
param_name : str or callable
Parameter name to be used in possible throwed exception (Optional)
If it is callable, it should be a function that returns the param_name
"""
try:
err = self._validate(value, parse_check)
except ValueError as err:
self._valueError(value, err, param_name)
if err is not True:
self._valueError(value, err, param_name)
if self.condition:
err = self.condition(value)
if err is not True:
self._valueError(value, err, param_name)
return True
def _validate(self, value, parse_check=False):
""" Return error message if the value is not valid """
return True
def _valueError(self, value, error_message=False, param_name=False):
if callable(param_name):
param_name = param_name()
if param_name:
param = f'for paramater {param_name} of type {self}'
else:
param = f'for type {self}'
if error_message is False:
error_message = 'invalid value'
if isinstance(error_message, Exception):
raise ValueError("Value '{}' {} is not valid: {}".format(value, param, error_message)) from error_message
else:
raise ValueError("Value '{}' {} is not valid: {}".format(value, param, error_message))
def read(self, token, parameterName='<Unknown>'):
""" Transform pyparsing token to a validated value """
self.validate(val)
return val
def convert(self, value):
""" Convert a value from user to a "cannonical form" """
return value
def _string(self, val):
return val
def string(self, val):
val = self._string(val)
if self.prefix:
val = self.prefix + str(val)
if self.postfix:
val = str(val) + self.postfix
if self.format:
val = "{:{}}".format(val, self.format)
return str(val)
def write(self, f, val):
f.write(self.string(val))
def print(self, val):
print(self.string(val))
def copy(self):
return copy.copy(self)
def enrich(self, option):
""" Some types can add properties to the options that have
the type, e.g. see Sequence.enrich, which adds the ability to
access the items of the sequence using [] """
pass
def __repr__(self):
return "<{}>".format(self.__class__.__name__)
class Unsigned(BaseType):
""" Unsigned integer (zero is possible) """
_grammar = replace_whitechars(ppc.integer).setParseAction(lambda x:int(x[0]))
def _validate(self, value, parse_check=False):
if not isinstance(value, int): return "Integer value required"
return value >= 0 or "Positive value required"
def grammar_name(self):
return '<+int>'
numpy_type = int
Unsigned.I = Unsigned()
class Integer(BaseType):
""" Signed integer """
_grammar = replace_whitechars(ppc.signed_integer).setParseAction(lambda x:int(x[0]))
def _validate(self, value, parse_check=False):
return isinstance(value, (int, np.int64) ) or f'Integer value required ({value.__class__} was given)'
def grammar_name(self):
return '<int>'
numpy_type = int
Integer.I = Integer()
class Bool(BaseType):
""" A bool type, whose value is represented by a letter (T or F) """
_grammar = (pp.CaselessKeyword('T') | pp.CaselessKeyword('F')).setParseAction( lambda x: x[0] == 'T' )
def _validate(self, value, parse_check=False):
return isinstance(value, bool) or "Bool value rquired"
def grammar_name(self):
return '<T|F>'
def _string(self, val):
return 'T' if val else 'F'
numpy_type = bool
Bool.I = Bool()
class Real(BaseType):
""" A real value """
_grammar = replace_whitechars(ppc.fnumber).setParseAction(lambda x: float(x[0]))
def _validate(self, value, parse_check=False):
return isinstance(value, float) or "Float value required"
def grammar_name(self):
return '<float>'
numpy_type = float
Real.I = Real()
class Date(BaseType):
""" A date value of the form 'DD.MM.YYYY' """
_grammar = pp.Regex(r'(?P<d>\d{2}).(?P<m>\d{2}).(?P<y>\d{4})').setParseAction(lambda x: datetime.date(int(x['y']), int(x['m']), int(x['d'])))
def _validate(self, value, parse_check=False):
return isinstance(value, datetime.date) or "Date (datetime.date) value required"
def grammar_name(self):
return '<dd.mm.yyyy>'
def _string(self, val):
return val.strftime("%d.%m.%Y")
Date.I = Date()
class BaseRealWithUnits(BaseType):
""" The base class for float value, which can have units append.
The value is converted automatically to the base units.
"""
grammar_cache = {}
""" A grammar for units is cached """
def _grammar_units(self, units):
i = id(units)
if not i in self.grammar_cache:
units = pp.Or(
(pp.Empty() if v is None else pp.CaselessKeyword(v))
.setParseAction(lambda x,*args, u=u: u) for v,u in units.items()
)
out = Real.I.grammar() + pp.Or(units)
out.setParseAction(lambda x: x[0]*x[1])
self.grammar_cache[i] = out
return out
return self.grammar_cache[i]
def _grammar(self, param_name):
return self._grammar_units(self.units)
def _validate(self, value, parse_check=False):
return isinstance(value, float) or "Float value required"
def grammar_name(self):
return '<float>[{}]'.format("|".join(('' if i is None else i for i in self.units)))
numpy_type = float
class RealWithUnits(BaseRealWithUnits):
""" A float value with user-defined units """
def __init__(self, *args, units, **kwargs):
self.units = units
super().__init__(*args, **kwargs)
class Energy(BaseRealWithUnits):
""" The grammar type for energy. The default units are Rydberg, one can specify eV. """
units = {
'Ry' : 1.,
'eV' : 1. / Rydberg,
None : 1.,
}
""" The allowed units and their conversion factors """
Energy.I = Energy()
class BaseString(BaseType):
""" Base type for string grammar types """
def _validate(self, value, parse_check=False):
if not isinstance(value, str): return "String value required"
if not parse_check:
try:
self._grammar.parseString(value, True)
except pp.ParseException as e:
return f"Forbidden character '{e.line[e.col]}' in the string"
return True
class String(BaseString):
""" Just a string (without whitespaces and few special chars) """
_grammar = Word(pp.printables,excludeChars=",;{}").setParseAction(lambda x:x[0])
def grammar_name(self):
return '<str>'
String.I = String()
class QString(BaseString):
""" Either a quoted string, or just a word (without whitespaces or special chars) """
_grammar = (pp.Word(pp.printables, excludeChars=",;{}") or pp.QuotedString("'")).setParseAction(lambda x:x[0])
def grammar_name(self):
return "'<str>'"
QString.I = String()
class LineString(BaseString):
""" A string, that takes all up to the end of the line """
_grammar = pp.SkipTo(pp.LineEnd() | pp.StringEnd())
def grammar_name(self):
return "'<str....>\n'"
LineString.I = LineString()
class Keyword(BaseType):
"""
A value, that can take values from the predefined set of strings.
"""
def __init__(self, *keywords, **kwargs):
super().__init__(**kwargs)
self.keywords = [ i.upper() for i in keywords ]
with generate_grammar():
self._grammar = optional_quote + pp.MatchFirst((pp.CaselessKeyword(i) for i in self.keywords)).setParseAction(lambda x: x[0].upper()) + optional_quote
def _validate(self, value, parse_check=False):
return value in self.keywords or "Required one of [" + "|".join(self.keywords) + "]"
def grammar_name(self):
return '|'.join(('"'+i+'"' for i in self.keywords ))
def __str__(self):
return self.grammar_name()
def convert(self, value):
return value.upper()
def DefKeyword(default, *others, **kwargs):
"""
A value, that can take values from the predefined set of strings, the first one is the default value.
"""
return Keyword(default, *others, default_value=default, **kwargs)
class Flag(BaseType):
"""
A boolean value, which is True, if a name of the value appears in the input file.
"""
def grammar_name(self):
return None
def str(self):
return "(Flag)"
def missing_value(self):
return (True, True, False)
def _validate(self, value, parse_check=False):
return value is True or value is False or value is None or "This is Flag with no value, please set to True to be present or to False/None to not"
_grammar = pp.Empty().setParseAction(lambda x: True)
Flag.I = Flag()
normalize_type_map = {
np.int64 : int,
np.float64: float,
np.bool_: bool
}
""" Mapping of alternative types to the 'canonical ones'. """
def normalize_type(type):
""" Return the 'canonical type' for a given type
I.e. it maps numpy internal types to standard python ones
doctest:
>>> normalize_type(np.int64)
<class 'int'>
"""
return normalize_type_map.get(type, type)
type_from_type_map = OrderedDict([
(float, Real.I),
(int , Integer.I),
(bool, Bool.I),
(str , String.I)]
)
""" The standard grammar_types for python types.
The value type can be given by a standard python type, this map maps the
python type for the appropriate grammar_type class.
"""
def format_for_type(format, type):
"""
Returns the format appropriate to the given type
Parameters
----------
format: str or dict
If it is str, just return it.
Dict should has the form { type : format_for_the_type } + { None : default_format }
"""
if isinstance(format, dict):
if type in format:
return format[type]
return format[None]
return format
def type_from_type(type, format='', format_all=False):
""" Guess and return the grammar element (BaseType class descendatnt) from a python type. E.g. int => Integer.
The given format can be optionally set to the returned grammar element.
Parameters
----------
type: A python type or BaseType
A type to be converted to a grammar type (BaseType class descendant)
format: str or dict
The format to be applied to the resulting class. If dict is given, see 'format_for_type'
for the way how the format is determined
format_all: boolean
If False (default), the format is not applied, if instance of BaseType is given as
the type parameter. Otherwise, a copy of the input type with the applied format is returned
"""
if isinstance(type, Hashable) and type in type_from_type_map:
type = normalize_type(type)
format = format_for_type(format, type)
type = type_from_type_map[type]
if format:
type = type.copy()
type.format = format
return type
elif format_all:
type = type.copy()
type.format = format_for_type(format, normalize_type(type.numpy_type))
return type
class Array(BaseType):
""" A (numpy) array of values of one type """
delimiter=White(' \t').suppress()
delimiter_str = ' '
def __init__(self, type, default_value=None,
length=None, max_length=None, min_length=None,
as_list=False,
**kwargs):
if isinstance(type, (list, np.ndarray)):
if default_value is not None:
raise ValueException("It is not possible for an Array to provide default_value both in 'default_value' and in 'type' argument")
default_value = type
type = type[0].__class__
self.type = type_from_type(type)
self.as_list = as_list
super().__init__(default_value=default_value, **kwargs)
self.min_length = min_length or length
self.max_length = max_length or length
with generate_grammar():
grammar = self.type.grammar()
grammar = delimitedList(grammar, self.delimiter)
if self.as_list:
if callable(self.as_list):
grammar = grammar.setParseAction(lambda x: self.as_list(x.asList()))
else:
grammar = grammar.setParseAction(lambda x: [x.asList()])
else:
grammar.setParseAction(lambda x: self.convert(x.asList()))
grammar.setName(self.grammar_name())
self._grammar = grammar
def __str__(self):
return "Array({})".format(str(self.type))
def grammar_name(self):
gn = self.type.grammar_name()
if self.min_length is not None and self.min_length == self.max_length:
return f'{self.min_length}*{gn}'
return f'{gn}{self.delimiter_str}{gn}{self.delimiter_str}...'
def _string(self, val):
it = iter(val)
i = next(it)
out = self.type.string(i)
for i in it:
out += self.delimiter_str
out += self.type.string(i)
return out
def _validate(self, value, parse_check=False):
if callable(self.as_list):
cls = self.as_list
elif self.as_list:
cls = list
else:
cls = np.ndarray
if not isinstance(value, cls):
return f'{cls} type required, {value.__class__} is given'
for i,v in enumerate(value):
try:
self.type.validate(v, parse_check=False)
except ValueError as e:
raise ValueError("Value {} in the set is incorrect: {}".format(i, str(e))) from e
if self.min_length is not None and len(value) < self.min_length:
return f"The array should be at least {self.min_length} items long, it has {len(value)} items"
if self.max_length is not None and len(value) > self.min_length:
return f"The array can not have more than {self.max_length} items, it has {len(value)} items"
return True
def convert(self, value):
if self.as_list:
if callable(self.as_list):
return value if isinstance(value, self.as_list) else self.as_list(value)
return list(value) if isinstance(value, tuple) else value
if not isinstance(value, np.ndarray):
if self.type.numpy_type == object:
#https://stackoverflow.com/questions/60939396/forcing-a-creation-of-1d-numpy-array-from-a-list-array-of-possibly-iterable-obje
out = np.empty(len(value), object)
out[:] = value
return out
else:
return np.atleast_1d(value)
return value
class SetOf(Array):
""" Set of values of the same type. E.g. {1,2,3} """
delimiter = pp.Suppress(pp.Literal(',') | pp.Literal(';') | White(' \t')).setName('[,; ]')
delimiter_str = ','
def __init__(self, type, **kwargs):
kwargs.setdefault('prefix', '{')
kwargs.setdefault('postfix', '}')
super().__init__(type, **kwargs)
def transform_grammar(self, grammar, param_name=False):
return grammar | self.type.grammar(param_name).copy().addParseAction(lambda x: np.atleast_1d(x.asList()))
def __str__(self):
return "SetOf({})".format(str(self.type))
type_from_set_map = OrderedDict([
(float, SetOf(float)),
(int , SetOf(int)),
])
""" Map the python type of a collection member to a grammar type of the collection.
Only canonical types are expected, see :meth:`ase2sprkkr.common.grammar_types.normalize_type`
"""
def type_from_value(value):
""" Gues the grammar type from a python value.
..doctest::
>>> type_from_value(2)
<Integer>
>>> type_from_value(2.0)
<Real>
"""
if isinstance(value, (list, np.ndarray)):
return type_from_set_map[normalize_type(value[0].__class__)] if len(value) else Integer.I
if isinstance(value, str):
try:
String._grammar.parseString(value, True)
return String.I
except Exception:
return QString.I
type = type_from_type(value.__class__)
if type is value.__class__:
raise ValueError('Cannot determine grammar type from value {value}')
return type.__class__(default_value = value)
def type_from_default_value(value, format='', format_all=False):
""" Guess the grammar type from a value, that will become the default value of the grammar type.
It has to create a new object instance, as it has to set the default
value property of the returned object. An (output) format can be applied to the
resulting grammar type
Grammar types passed as types are left as is, unless format_all flag is set.
"""
if inspect.isclass(value) or isinstance(value, BaseType):
return type_from_type(value, format=format, format_all=format_all)
ptype = normalize_type(value.__class__)
gtype = type_from_type(value.__class__).__class__
return gtype(default_value = value, format=format_for_type(format, ptype))
class BaseMixed(BaseType):
"""
A variant type - it can hold "anything".
"""
type = None
""" The types, that the value can hold. To be redefined in the descendants. """
string_type = None
""" Type of string grammar_type to be used. To be redefined in the descendants. """
@classmethod
def _grammar(cls, param_name=False):
return pp.MatchFirst((
i.grammar(param_name) for i in cls.types
))
def get_type(self, value):
""" Return the type of the value """
return self.string_type if isinstance(value, str) else type_from_value(value)
def _validate(self, value, parse_check=False):
type = self.get_type(value)
if type is value:
return 'Can not determine the type of value {}'.format(value)
return type.validate(value, parse_check)
def grammar_name(self):
return '<mixed>'
class Mixed(BaseMixed):
""" A variant value to be used in input files (in unknown - custom - options) """
string_type = QString.I
""" Input files use quoted strings. """
types = [
Energy.I,
Real.I,
Integer.I,
type_from_set_map[int],
type_from_set_map[float],
QString.I,
Flag.I,
]
def missing_value(self):
return True, True, False
Mixed.I = Mixed()
class PotMixed(BaseMixed):
""" A variant value to be used in potential files (in unknown - custom - options) """
string_type = LineString.I
""" Potential files use line strings. """
types = [
Energy.I,
Real.I,
Integer.I,
Bool.I,
type_from_set_map[int],
type_from_set_map[float],
LineString.I,
]
def _string(self, val):
if isinstance(val, bool):
return Bool._string(self, val)
else:
return super()._string(val)
PotMixed.I = PotMixed()
class Separator(BaseType):
""" Special class for ``****`` separator inside a section """
_grammar = separator_grammar.copy().setParseAction(lambda x: [None])
has_value = False
def _validate(self, value, parse_check=False):
return 'Can not set a value to a separator'
def _grammar_name(self):
return '****...****\n'
def _string(self, val=None):
return '*'*79
Separator.I = Separator()
class Sequence(BaseType):
""" A sequence of values of given types """
def __init__(self, *types, format='', format_all=False, allowed_values=None,
default_values=False, names=None, **kwargs):
super().__init__(**kwargs)
if names:
self.names = names if isinstance(names, dict) else {name:i for i,name in enumerate(names)}
self.value_type = namedtuple("_".join(names), names)
self.value_constructor = self.value_type
else:
self.names = None
self.value_type = tuple
self.value_constructor = lambda *x: tuple(x)
if isinstance(format, (str, dict)):
format = itertools.repeat(format)
self.types = [ type_from_default_value(i, dfs, format_all=format_all) for i,dfs in zip(types, format) ]
if allowed_values and not isinstance(allowed_values, set):
allowed_values = set(allowed_values)
self.allowed_values=allowed_values
self.default_values=default_values
def _grammar(self, param_name = False):
def grm(type):
g = type.grammar(param_name)
if self.default_values and type.default_value is not None:
g = g | pp.Empty().setParseAction(lambda x: type.default_value)
return g
grammars = [grm(i) for i in self.types]
grammar = pp.And(grammars).setParseAction(lambda x: self.value_constructor(*x))
if self.allowed_values is not None:
grammar.addConditionEx(lambda x: x[0] in self.allowed_values, lambda x: f'{x[0]} is not in the list of allowed values')
return grammar
def _validate(self, value, parse_check=False):
if not isinstance(value, (self.value_type)) or len(value) != len(self.types):
return f'A tuple of {len(self.types)} values is required'
for i,j in zip(self.types, value):
out = i.validate(j, parse_check=parse_check)
return True
def convert(self, value):
if not isinstance(value, self.value_type):
return self.value_constructor(*value)
try:
return self.value_constructor(*value)
except TypeError:
pass
return value
def grammar_name(self):
return " ".join( (f'{j.grammar_name()}' for j in self.types) )
def _string(self, val):
out = []
for i,v in zip(self.types, val):
out.append(' ')
out.append(i.string(v))
return ''.join(out)
def enrich(self, option):
class cls(option.__class__):
def _get_index(sulf, name):
if self.names and isinstance(name, str):
return self.names[name]
return name
def __getitem__(self, key):
key = self._get_index(key)
return self()[key]
def __setitem__(self, key, value):
key = self._get_index(key)
v = list(self())
v[key] = value
self.set(v)
if self.names:
for n,i in self.names.items():
(lambda i: setattr(cls, n, property(
lambda self: self[i],
lambda self, v: self.__setitem__(i, v)
)))(i)
option.__class__ = cls
class Table(BaseType):
"""
Table, optionaly with named columns, e.g.
::text
IQ IREFQ IMQ NOQ ITOQ CONC
1 1 1 1 1 1.000
2 2 2 1 2 1.000
"""
name_in_grammar = False
def __init__(self, columns=None,
header=None, free_header=False,
format = {float: '>21.17', None: '>16'}, format_all=True,
numbering=None, numbering_label=None, numbering_format=True,
prefix=None, postfix=None, length=None,
row_condition=None,
default_values=False,
named_result = False, **kwargs):
super().__init__(prefix=None, postfix=None)
if columns is None:
columns = kwargs
if isinstance(columns, dict):
self.names = list(columns.keys())
columns = columns.values()
else:
self.names = None
if header is None:
header = self.names
self.sequence = Sequence( *columns, format=format, format_all=format_all, condition = row_condition, default_values=default_values )
self.header = header
self.free_header = free_header
if numbering.__class__ is str:
numbering_label=numbering
numbering=True
self.numbering = Unsigned.I if numbering is True else numbering
if self.numbering and numbering_format and not (numbering_format is True and self.numbering.format):
if numbering_format is True:
numbering_format = '<4'
self.numbering = self.numbering.copy()
self.numbering.format = numbering_format
self.numbering_label = numbering_label
self.named_result = named_result
self.length = length
def _grammar(self, param_name=False):
line = self.sequence.grammar(param_name)
if self.numbering:
line = self.numbering.grammar() + line # + pp.And._ErrorStop()
grammar = delimitedList(line, line_end)
if self.names:
if self.free_header:
fh = pp.SkipTo(line_end) + line_end
if callable(self.free_header):
fh.addConditionEx(lambda x: self.free_header(x[0]),
lambda x: f"This is not an allowed header for table {param_name}: {x[0]}" )
grammar = pp.Suppress(fh) + grammar
else:
def names():
for n in self.names:
if ' ' in n:
""" multiple column headers for one column are allowed
-- see Occupation section"""
yield from map(pp.CaselessKeyword, n.split(' '))
else:
yield pp.CaselessKeyword(n)
grammar = pp.Suppress(pp.And(list(names())) + pp.lineEnd) + grammar
if self.numbering_label:
grammar = pp.CaselessKeyword(self.numbering_label).suppress() + grammar
def ensure_numbering(s, loc, x):
numbers = x[::2]
datas = x[1::2]
if not numbers == [*range(1, len(numbers)+1)]:
raise pp.ParseException(s, loc, 'First column should contain row numbering')
return datas
if self.numbering is not None:
grammar.addParseAction(ensure_numbering)
grammar.addParseActionEx( lambda x: np.array(x.asList(), self.numpy_type), "Cannot retype to numpy array")
return grammar
def _string(self, data):
out = []
if self.header:
def gen():
names = ((i[1] if isinstance(i, tuple) else i) for i in self.names)
for n,t in zip(self.names, self.sequence.types):
yield n
yield t.format
fstr = (" {:{}}"*len(self.names))
if self.numbering:
fstr = self.numbering.string(self.numbering_label or '') + fstr
else:
fstr = fstr[1:]
out.append(fstr.format(*gen()))
newline = True
else:
newline = False
line = 1
for i in data:
if newline:
out.append('\n')
newline = True
if self.numbering is not None:
out.append(self.numbering.string(line))
line+=1
out.append(self.sequence.string(i))
return ''.join(out)
def _validate(self, value, parse_check=False):
if not isinstance(value, np.ndarray):
return f"Numpy array as a value required {value.__class__} given"
dtype = self.numpy_type
dim = 1 if isinstance(dtype, list) else 2
if len(value.shape) != dim:
return f"The array should have dimension={dim}, it has dimension {len(value.shape)}"
if value.dtype != self.numpy_type:
return f"The data type of the value should be {dtype}, it is {value.dtype}"
if dim==2 and value.shape[1] != len(self.sequence.types):
return f"The array is required to have {len(self.sequence.types)} columns, it has {value.shape[1]}"
if self.length is not None and self.length != value.shape[0]:
return f"The array is required to have {self.length} rows, it has {value.shape[1]}"
return True
def convert(self, value):
return np.asarray(value, dtype = self.numpy_type)
@cached_property
def numpy_type(self):
types = self.sequence.types
nr = self.names and self.named_result
if not nr:
dtype = types[0].numpy_type
for t in types[1:]:
if t.numpy_type != dtype:
nr = True
break
else:
return dtype
names = self.names or itertools.repeat('')
return list(zip(names, (i.numpy_type for i in types)))
def number_of_collumns(self):
return len(self.sequence.types)
def zero_data(self, length):
""" Return array of zeros with the given number of rows and
with the dtype of the table
"""
dtype = self.numpy_type
if isinstance(dtype, list):
return np.zeros(length, dtype)
else:
return np.zeros((length, self.number_of_collumns()), dtype)
def grammar_name(self):
if self.names:
data = " ".join( (f'{i}:{j.grammar_name()}' for i,j in zip(self.names, self.sequence.types) ) )
else:
data = self.sequence.grammar_name()
return f"<TABLE of {data}>"
integer = Integer.I
""" A standard signed integer grammar type instance """
unsigned = Unsigned.I
""" A standard unsigned integer grammar type instance """
boolean = Bool.I
""" A standard bool grammar type instance (for potential files) """
flag = Flag.I
""" A standard bool grammar type instance (for input files) """
real = Real.I
""" A standard real grammar type instance """
string = String.I
""" A standard string grammar type instance """
qstring = QString.I
""" A standard quoted string grammar type instance (for input files) """
line_string = LineString.I
""" A standard line string grammar type instance (for potential files) """
mixed = Mixed.I
""" A standard variant grammar type instance (for input files) """
pot_mixed = PotMixed.I
""" A standard variant grammar type instance (for potential files) """
separator = Separator.I
""" A standard separator line grammar type instance (for potential files) """
energy = Energy.I
""" A standard energy float value type instance (for potential files) """
context.__exit__(None, None, None)
del context
| 16,502 | 0 | 1,827 |
0e64c3f77d066447da3d127e03adf7c1b2379aa7 | 933 | py | Python | hybrid_django_react/poetry_setup.py | lluc2397/hybrid-django-react | 2b64f524ed74be334f5e8da54048d7c604f773f0 | [
"MIT"
] | 4 | 2021-12-15T04:39:33.000Z | 2022-03-07T09:44:53.000Z | hybrid_django_react/poetry_setup.py | lluc2397/hybrid-django-react | 2b64f524ed74be334f5e8da54048d7c604f773f0 | [
"MIT"
] | 2 | 2022-01-19T14:55:34.000Z | 2022-02-01T17:48:12.000Z | hybrid_django_react/poetry_setup.py | lluc2397/hybrid-django-react | 2b64f524ed74be334f5e8da54048d7c604f773f0 | [
"MIT"
] | 2 | 2021-12-15T04:57:39.000Z | 2022-01-23T19:11:32.000Z | import re
import os
def setup_poetry(config):
"""Entry point of module: setup poetry files and run poetry commands"""
update_pyproject_dot_toml(config)
#lock_poetry_dependencies()
#install_dependencies()
def lock_poetry_dependencies_on_docker():
"""Create poetry.lock file"""
os.system("docker-compose exec web poetry lock")
def install_dependencies():
"""Install package dependencies"""
os.system("poetry install")
| 29.15625 | 76 | 0.6388 | import re
import os
def setup_poetry(config):
"""Entry point of module: setup poetry files and run poetry commands"""
update_pyproject_dot_toml(config)
#lock_poetry_dependencies()
#install_dependencies()
def lock_poetry_dependencies_on_docker():
"""Create poetry.lock file"""
os.system("docker-compose exec web poetry lock")
def install_dependencies():
"""Install package dependencies"""
os.system("poetry install")
def update_pyproject_dot_toml(config):
filename = "pyproject.toml"
with open(filename) as f:
content = f.read()
for p in ("name", "description", "license"):
content = re.sub(rf'({p}) = ""', f'{p} = "{config[p]}"', content)
content = re.sub(r'(AUTHOR_POETRY)', config["author"], content)
content = re.sub(r'(mail@mail.com)', config["email"], content)
with open(filename, 'w') as f:
f.write(content)
| 433 | 0 | 25 |
deb8fedc9d1b6220b5fb807b13673aaeecac02a4 | 825 | py | Python | travelperk_http_python/oauth/config/config.py | namelivia/travelperk-http-python | c6cbd88c999a49f7d61ae040029ca3e91ce72cae | [
"MIT"
] | 2 | 2021-08-30T12:34:26.000Z | 2021-08-31T07:56:12.000Z | travelperk_http_python/oauth/config/config.py | namelivia/travelperk-http-python | c6cbd88c999a49f7d61ae040029ca3e91ce72cae | [
"MIT"
] | 6 | 2021-07-15T16:13:06.000Z | 2022-03-03T09:45:07.000Z | travelperk_http_python/oauth/config/config.py | namelivia/travelperk-http-python | c6cbd88c999a49f7d61ae040029ca3e91ce72cae | [
"MIT"
] | null | null | null | from typing import List
| 25 | 85 | 0.593939 | from typing import List
class Config:
def __init__(
self, client_id: str, client_secret: str, redirect_url: str, code: str = None
):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_url = redirect_url
self.code = code
def get_client_id(self) -> str:
return self.client_id
def get_redirect_url(self) -> str:
return self.redirect_url
def set_code(self, code: str) -> "Config":
self.code = code
return self
def has_code(self) -> bool:
return self.code is not None
def to_dict(self) -> List:
return {
"code": self.code,
"client_id": self.client_id,
"client_secret": self.client_secret,
"redirect_uri": self.redirect_url,
}
| 624 | -8 | 184 |
4c5f0c9882f83a27b4893f2477399ebb929b0fb8 | 282 | py | Python | doges/serializers/__init__.py | Nunuzac/doges | fcd0343946bf0cb4f4a80bb910acea44dfa71b37 | [
"Apache-2.0"
] | null | null | null | doges/serializers/__init__.py | Nunuzac/doges | fcd0343946bf0cb4f4a80bb910acea44dfa71b37 | [
"Apache-2.0"
] | null | null | null | doges/serializers/__init__.py | Nunuzac/doges | fcd0343946bf0cb4f4a80bb910acea44dfa71b37 | [
"Apache-2.0"
] | null | null | null | from .breed_serializer import BreedSerializer
from .dog_serializer import DogSerializer
from .user_serializer import UserSerializer
from .role_serializer import RoleSerializer
from .person_serializer import PersonSerializer
from .custom_token_serializer import CustomTokenSerializer | 47 | 58 | 0.897163 | from .breed_serializer import BreedSerializer
from .dog_serializer import DogSerializer
from .user_serializer import UserSerializer
from .role_serializer import RoleSerializer
from .person_serializer import PersonSerializer
from .custom_token_serializer import CustomTokenSerializer | 0 | 0 | 0 |
44d3b12b68786b1791977c1b8958791817f18bfa | 3,469 | py | Python | demo/sorting/visualize.py | Rufaim/graph_net_lib | 36ebf91af4115df356a3272299ed4f558d17b3b3 | [
"Apache-2.0"
] | null | null | null | demo/sorting/visualize.py | Rufaim/graph_net_lib | 36ebf91af4115df356a3272299ed4f558d17b3b3 | [
"Apache-2.0"
] | null | null | null | demo/sorting/visualize.py | Rufaim/graph_net_lib | 36ebf91af4115df356a3272299ed4f558d17b3b3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Rufaim (https://github.com/Rufaim)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import matplotlib.pyplot as pyplot
def plot_graph_edges(graph,edge_output, sort_indexes,title=None,savefile=None):
"""Plot edges for a given graph assuming certain order of nodes."""
sort_indexes = np.squeeze(sort_indexes).astype(np.int)
fig = pyplot.figure(figsize=(4, 4))
ax = fig.add_subplot(1, 1, 1)
nd = graph.get_num_nodes()
probs = np.zeros((nd, nd))
for s, r, ef in zip(graph.senders.numpy(), graph.receivers.numpy(), edge_output):
probs[s, r] = ef
ax.matshow(probs[sort_indexes][:, sort_indexes], cmap="viridis")
ax.grid(False)
ax.set_axis_off()
if title is not None:
ax.set_title(title)
if savefile is not None:
fig.savefig(savefile,dpi=150)
pyplot.close(fig)
| 34.009804 | 115 | 0.676564 | # Copyright 2021 Rufaim (https://github.com/Rufaim)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import matplotlib.pyplot as pyplot
def plot_graph_edges(graph,edge_output, sort_indexes,title=None,savefile=None):
"""Plot edges for a given graph assuming certain order of nodes."""
sort_indexes = np.squeeze(sort_indexes).astype(np.int)
fig = pyplot.figure(figsize=(4, 4))
ax = fig.add_subplot(1, 1, 1)
nd = graph.get_num_nodes()
probs = np.zeros((nd, nd))
for s, r, ef in zip(graph.senders.numpy(), graph.receivers.numpy(), edge_output):
probs[s, r] = ef
ax.matshow(probs[sort_indexes][:, sort_indexes], cmap="viridis")
ax.grid(False)
ax.set_axis_off()
if title is not None:
ax.set_title(title)
if savefile is not None:
fig.savefig(savefile,dpi=150)
pyplot.close(fig)
def plot_ranked_inputs(value_nodes,sort_indexes,ranks,title=None,savefile=None):
fig = pyplot.figure(figsize=(10, 3))
ax1 = fig.add_subplot(1, 3, 1)
ax2 = fig.add_subplot(1, 3, 2)
ax3 = fig.add_subplot(1, 3, 3)
num_elements = value_nodes.shape[0]
inputs = np.squeeze(value_nodes)
ranks = np.squeeze(ranks * (num_elements - 1.0)).astype(int)
x = np.arange(inputs.shape[0])
ax1.set_title("Inputs")
ax1.barh(x, inputs, color="b")
ax1.set_xlim(-0.01, 1.01)
ax2.set_title("Sorted")
ax2.barh(x, inputs[sort_indexes], color="k")
ax2.set_xlim(-0.01, 1.01)
ax3.set_title("Ranks")
ax3.barh(x, ranks, color="r")
ax3.set_xlim(0, len(ranks) + 0.5)
if title is not None:
fig.suptitle(title)
fig.tight_layout()
if savefile is not None:
fig.savefig(savefile,dpi=150)
pyplot.close(fig)
def plot_losses(train_loss,test_loss,train_correct,test_correct,train_solved,test_solved,title=None,savefile=None):
fig = pyplot.figure(11, figsize=(18, 3))
# Loss.
ax = fig.add_subplot(1, 3, 1)
ax.plot(train_loss, "k", label="Training")
ax.plot(test_loss, "r--", label="Test")
ax.set_title("Loss across training")
ax.set_xlabel("Training iteration")
ax.set_ylabel("Loss (binary cross-entropy)")
ax.legend()
# Correct.
ax = fig.add_subplot(1, 3, 2)
ax.plot(train_correct, "k", label="Training")
ax.plot(test_correct, "r--", label="Test/generalization")
ax.set_title("Fraction correct across training")
ax.set_xlabel("Training iteration")
ax.set_ylabel("Fraction nodes/edges correct")
# Solved.
ax = fig.add_subplot(1, 3, 3)
ax.plot(train_solved, "k", label="Training")
ax.plot(test_solved, "r--", label="Test/generalization")
ax.set_title("Fraction solved across training")
ax.set_xlabel("Training iteration")
ax.set_ylabel("Fraction examples solved")
if title is not None:
fig.suptitle(title)
fig.tight_layout()
if savefile is not None:
fig.savefig(savefile,dpi=150)
pyplot.close(fig)
| 2,051 | 0 | 46 |
cbf70b1af4b0ab8fe62eb853c468bbe0475e4ca5 | 1,450 | py | Python | udf/bazaar/view/util/refresh-documents.py | stephen-v/deepdive-app-chinese | dfe1626fd9fad42169aca5cc85153336239eb6aa | [
"Apache-2.0"
] | 6 | 2018-10-27T03:02:03.000Z | 2021-07-15T07:42:03.000Z | udf/bazaar/view/util/refresh-documents.py | stephen-v/deepdive-app-chinese | dfe1626fd9fad42169aca5cc85153336239eb6aa | [
"Apache-2.0"
] | null | null | null | udf/bazaar/view/util/refresh-documents.py | stephen-v/deepdive-app-chinese | dfe1626fd9fad42169aca5cc85153336239eb6aa | [
"Apache-2.0"
] | 1 | 2019-10-01T14:32:49.000Z | 2019-10-01T14:32:49.000Z | #!/usr/bin/env python
import pipe
ES_HOST = {"host" : "localhost", "port" : 9200}
INDEX_NAME = 'view'
TYPE_NAME = 'docs'
N = 1000
from pyhocon import ConfigFactory
from elasticsearch import Elasticsearch
import json
import sys
conf = ConfigFactory.parse_file('../view.conf')
docs_conf = conf.get('view.docs')
es = Elasticsearch(hosts = [ES_HOST])
index_docs()
| 23.015873 | 80 | 0.547586 | #!/usr/bin/env python
import pipe
ES_HOST = {"host" : "localhost", "port" : 9200}
INDEX_NAME = 'view'
TYPE_NAME = 'docs'
N = 1000
from pyhocon import ConfigFactory
from elasticsearch import Elasticsearch
import json
import sys
conf = ConfigFactory.parse_file('../view.conf')
docs_conf = conf.get('view.docs')
es = Elasticsearch(hosts = [ES_HOST])
def index_docs():
# clear index
es.delete_by_query(index = INDEX_NAME, doc_type = TYPE_NAME, body = {
"query": {
"match_all": {}
}
})
# bulk index docs
bulk_data = []
for o in pipe.col_open('../' + docs_conf.get('input')):
id = o[u'id']
content = o[u'text']
tokenOffsets = o[u'tokenOffsets']
op_dict = {
"index": {
"_index": INDEX_NAME,
"_type": TYPE_NAME,
"_id": id
}
}
#data_dict = {
# "id": id,
# "content": content,
# "tokenOffsets": tokenOffsets
#}
o['content'] = o[u'text']
data_dict = o
bulk_data.append(op_dict)
bulk_data.append(data_dict)
if len(bulk_data) > N:
res = es.bulk(index = INDEX_NAME, body = bulk_data, refresh = False)
bulk_data = []
if len(bulk_data) > 0:
res = es.bulk(index = INDEX_NAME, body = bulk_data, refresh = False)
es.indices.refresh(index = INDEX_NAME)
index_docs()
| 1,060 | 0 | 23 |
179a97e5c2e7d0ffd8ee5f4583397bd5e7600a67 | 228 | py | Python | boa3_test/test_sc/function_test/ReturnStarredArgumentCount.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3_test/test_sc/function_test/ReturnStarredArgumentCount.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3_test/test_sc/function_test/ReturnStarredArgumentCount.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | from typing import List
from boa3.builtin import public
@public
@public
| 16.285714 | 44 | 0.736842 | from typing import List
from boa3.builtin import public
@public
def fun_with_starred(*args: int) -> int:
return len(args)
@public
def main(list_with_args: List[int]) -> int:
return fun_with_starred(*list_with_args)
| 107 | 0 | 44 |