blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
31dcf667c0db74a850441f9c4885b9081b781d39
|
a2ad9d53c323fae1d47017772ce133db0918b0e6
|
/apps/organization/migrations/0003_auto_20180415_2005.py
|
865b476dd804b266eb512f569576a9059321e10d
|
[] |
no_license
|
SmileForS/MxOnline_Django
|
27175e3195e1b2b98ec389ed380c1ba6d85f7dd3
|
61ef01909907c71857085819b49f0d658c7cd07b
|
refs/heads/master
| 2020-03-09T17:50:03.438976
| 2018-04-15T14:57:46
| 2018-04-15T14:57:46
| 128,917,414
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-15 20:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization', '0002_auto_20180414_0929'),
]
operations = [
migrations.AddField(
model_name='courseorg',
name='catgory',
field=models.CharField(choices=[('pxjg', '培训机构'), ('gr', '个人'), ('gx', '高校')], default='pxjg', max_length=20, verbose_name='机构类别'),
),
migrations.AlterField(
model_name='courseorg',
name='image',
field=models.ImageField(upload_to='org/%Y/%m', verbose_name='logo'),
),
]
|
[
"cpyfluoxiaohui@163.com"
] |
cpyfluoxiaohui@163.com
|
28893aa30f2a72be36b0840421a09737b7faaa2f
|
68affb8e8597753057396b4a3714b7a2f534fbf7
|
/pj/pj/settings.py
|
5a853f1a17eb4d69eedd85276feb322b54d9fd76
|
[
"MIT"
] |
permissive
|
TrongNghiaRyt/SE347-project-Django-Website
|
5c230a4cefff29a07dcc3b95f3aa1035271ceb5c
|
383632f6f3f51553c3664a9b61a0a72b81981420
|
refs/heads/master
| 2020-11-27T20:40:48.143030
| 2019-12-25T12:56:15
| 2019-12-25T12:56:15
| 229,594,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,378
|
py
|
"""
Django settings for pj project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cv1=#alxl6_e83fnk!1+l&f03n@s#=y^7_b6^ho^jmuy9lj1fx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'study',
'a1test',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'Template')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'traffic_law',
'USER': 'postgres',
'PASSWORD': 'admin',
'HOST': 'localhost',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
[
"34794845+TrongNghiaRyt@users.noreply.github.com"
] |
34794845+TrongNghiaRyt@users.noreply.github.com
|
a6ebb2ef63e9034635a5a9a0794251d2cc8912fd
|
92fb4d62071cc02993c6af8eedced27741567d97
|
/tests/test_search.py
|
4366a847be256e774f6389b780dea8efbb1de31c
|
[
"MIT"
] |
permissive
|
klorenz/python-elastico
|
f5efa26e814bf89e9affdfebed5b51f6f16f2aa8
|
9a39e6cfe33d3081cc52424284c19e9698343006
|
refs/heads/master
| 2020-03-25T20:19:30.108547
| 2019-01-22T08:40:24
| 2019-01-22T08:40:24
| 144,126,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
from dateutil.parser import parse as parse_dt
from elastico.search import search
def test_search():
from datetime import datetime
actions = [
{
"_index": "foo",
"_type": "doc",
"_id": i,
"any": "data-%s" % i,
"@timestamp": parse_dt('2017-01-01')
}
for i in range(10)
]
from elasticsearch.helpers import bulk
from elastico.search import search
from elastico.connection import elasticsearch
es = elasticsearch()
try:
actions
bulk(es, actions)
es.indices.refresh('foo')
r = search(es, 'any: "data-2"')
assert r['hits']['total'] == 1
assert r['hits']['hits'][0]['_source']['@timestamp'] == '2017-01-01T00:00:00'
finally:
es.indices.delete('foo')
|
[
"kiwi@franka.dyndns.org"
] |
kiwi@franka.dyndns.org
|
d6041c909ade93c15742fc9b51f6283385a54fc8
|
3a5d8cdc7ac14c389fd9426f3f39c3b1dc906dda
|
/nautobot/extras/tests/test_customfields.py
|
5d69afc8223b63ba5fd3bbf99c5f90c513061ee9
|
[
"Apache-2.0"
] |
permissive
|
nammie-punshine/nautobot
|
f3cdb9d269c37a74706c105d237b883650f10465
|
d6227b211ad89f25233a8791937cd75092421c8a
|
refs/heads/main
| 2023-03-08T10:51:29.437859
| 2021-02-24T20:44:32
| 2021-02-24T20:44:32
| 342,080,836
| 0
| 0
|
Apache-2.0
| 2021-02-25T01:01:36
| 2021-02-25T01:01:36
| null |
UTF-8
|
Python
| false
| false
| 31,650
|
py
|
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.urls import reverse
from rest_framework import status
from nautobot.dcim.filters import SiteFilterSet
from nautobot.dcim.forms import SiteCSVForm
from nautobot.dcim.models import Site, Rack
from nautobot.extras.choices import *
from nautobot.extras.models import CustomField, Status
from nautobot.utilities.testing import APITestCase, TestCase
from nautobot.virtualization.models import VirtualMachine
class CustomFieldTest(TestCase):
def setUp(self):
Site.objects.create(name="Site A", slug="site-a")
Site.objects.create(name="Site B", slug="site-b")
Site.objects.create(name="Site C", slug="site-c")
def test_simple_fields(self):
DATA = (
{
"field_type": CustomFieldTypeChoices.TYPE_TEXT,
"field_value": "Foobar!",
"empty_value": "",
},
{
"field_type": CustomFieldTypeChoices.TYPE_INTEGER,
"field_value": 0,
"empty_value": None,
},
{
"field_type": CustomFieldTypeChoices.TYPE_INTEGER,
"field_value": 42,
"empty_value": None,
},
{
"field_type": CustomFieldTypeChoices.TYPE_BOOLEAN,
"field_value": True,
"empty_value": None,
},
{
"field_type": CustomFieldTypeChoices.TYPE_BOOLEAN,
"field_value": False,
"empty_value": None,
},
{
"field_type": CustomFieldTypeChoices.TYPE_DATE,
"field_value": "2016-06-23",
"empty_value": None,
},
{
"field_type": CustomFieldTypeChoices.TYPE_URL,
"field_value": "http://example.com/",
"empty_value": "",
},
)
obj_type = ContentType.objects.get_for_model(Site)
for data in DATA:
# Create a custom field
cf = CustomField(type=data["field_type"], name="my_field", required=False)
cf.save()
cf.content_types.set([obj_type])
# Assign a value to the first Site
site = Site.objects.first()
site.custom_field_data[cf.name] = data["field_value"]
site.save()
# Retrieve the stored value
site.refresh_from_db()
self.assertEqual(site.custom_field_data[cf.name], data["field_value"])
# Delete the stored value
site.custom_field_data.pop(cf.name)
site.save()
site.refresh_from_db()
self.assertIsNone(site.custom_field_data.get(cf.name))
# Delete the custom field
cf.delete()
def test_select_field(self):
obj_type = ContentType.objects.get_for_model(Site)
# Create a custom field
cf = CustomField(
type=CustomFieldTypeChoices.TYPE_SELECT,
name="my_field",
required=False,
choices=["Option A", "Option B", "Option C"],
)
cf.save()
cf.content_types.set([obj_type])
# Assign a value to the first Site
site = Site.objects.first()
site.custom_field_data[cf.name] = "Option A"
site.save()
# Retrieve the stored value
site.refresh_from_db()
self.assertEqual(site.custom_field_data[cf.name], "Option A")
# Delete the stored value
site.custom_field_data.pop(cf.name)
site.save()
site.refresh_from_db()
self.assertIsNone(site.custom_field_data.get(cf.name))
# Delete the custom field
cf.delete()
class CustomFieldManagerTest(TestCase):
def setUp(self):
content_type = ContentType.objects.get_for_model(Site)
custom_field = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name="text_field", default="foo")
custom_field.save()
custom_field.content_types.set([content_type])
def test_get_for_model(self):
self.assertEqual(CustomField.objects.get_for_model(Site).count(), 1)
self.assertEqual(CustomField.objects.get_for_model(VirtualMachine).count(), 0)
class CustomFieldAPITest(APITestCase):
@classmethod
def setUpTestData(cls):
content_type = ContentType.objects.get_for_model(Site)
# Text custom field
cls.cf_text = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name="text_field", default="foo")
cls.cf_text.save()
cls.cf_text.content_types.set([content_type])
# Integer custom field
cls.cf_integer = CustomField(type=CustomFieldTypeChoices.TYPE_INTEGER, name="number_field", default=123)
cls.cf_integer.save()
cls.cf_integer.content_types.set([content_type])
# Boolean custom field
cls.cf_boolean = CustomField(
type=CustomFieldTypeChoices.TYPE_BOOLEAN,
name="boolean_field",
default=False,
)
cls.cf_boolean.save()
cls.cf_boolean.content_types.set([content_type])
# Date custom field
cls.cf_date = CustomField(
type=CustomFieldTypeChoices.TYPE_DATE,
name="date_field",
default="2020-01-01",
)
cls.cf_date.save()
cls.cf_date.content_types.set([content_type])
# URL custom field
cls.cf_url = CustomField(
type=CustomFieldTypeChoices.TYPE_URL,
name="url_field",
default="http://example.com/1",
)
cls.cf_url.save()
cls.cf_url.content_types.set([content_type])
# Select custom field
cls.cf_select = CustomField(
type=CustomFieldTypeChoices.TYPE_SELECT,
name="choice_field",
choices=["Foo", "Bar", "Baz"],
)
cls.cf_select.default = "Foo"
cls.cf_select.save()
cls.cf_select.content_types.set([content_type])
statuses = Status.objects.get_for_model(Site)
# Create some sites
cls.sites = (
Site.objects.create(name="Site 1", slug="site-1", status=statuses.get(slug="active")),
Site.objects.create(name="Site 2", slug="site-2", status=statuses.get(slug="active")),
)
# Assign custom field values for site 2
cls.sites[1].custom_field_data = {
cls.cf_text.name: "bar",
cls.cf_integer.name: 456,
cls.cf_boolean.name: True,
cls.cf_date.name: "2020-01-02",
cls.cf_url.name: "http://example.com/2",
cls.cf_select.name: "Bar",
}
cls.sites[1].save()
def test_get_single_object_without_custom_field_data(self):
"""
Validate that custom fields are present on an object even if it has no values defined.
"""
url = reverse("dcim-api:site-detail", kwargs={"pk": self.sites[0].pk})
self.add_permissions("dcim.view_site")
response = self.client.get(url, **self.header)
self.assertEqual(response.data["name"], self.sites[0].name)
self.assertEqual(
response.data["custom_fields"],
{
"text_field": None,
"number_field": None,
"boolean_field": None,
"date_field": None,
"url_field": None,
"choice_field": None,
},
)
def test_get_single_object_with_custom_field_data(self):
"""
Validate that custom fields are present and correctly set for an object with values defined.
"""
site2_cfvs = self.sites[1].custom_field_data
url = reverse("dcim-api:site-detail", kwargs={"pk": self.sites[1].pk})
self.add_permissions("dcim.view_site")
response = self.client.get(url, **self.header)
self.assertEqual(response.data["name"], self.sites[1].name)
self.assertEqual(response.data["custom_fields"]["text_field"], site2_cfvs["text_field"])
self.assertEqual(response.data["custom_fields"]["number_field"], site2_cfvs["number_field"])
self.assertEqual(response.data["custom_fields"]["boolean_field"], site2_cfvs["boolean_field"])
self.assertEqual(response.data["custom_fields"]["date_field"], site2_cfvs["date_field"])
self.assertEqual(response.data["custom_fields"]["url_field"], site2_cfvs["url_field"])
self.assertEqual(response.data["custom_fields"]["choice_field"], site2_cfvs["choice_field"])
def test_create_single_object_with_defaults(self):
"""
Create a new site with no specified custom field values and check that it received the default values.
"""
data = {
"name": "Site 3",
"slug": "site-3",
"status": "active",
}
url = reverse("dcim-api:site-list")
self.add_permissions("dcim.add_site")
response = self.client.post(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
# Validate response data
response_cf = response.data["custom_fields"]
self.assertEqual(response_cf["text_field"], self.cf_text.default)
self.assertEqual(response_cf["number_field"], self.cf_integer.default)
self.assertEqual(response_cf["boolean_field"], self.cf_boolean.default)
self.assertEqual(response_cf["date_field"], self.cf_date.default)
self.assertEqual(response_cf["url_field"], self.cf_url.default)
self.assertEqual(response_cf["choice_field"], self.cf_select.default)
# Validate database data
site = Site.objects.get(pk=response.data["id"])
self.assertEqual(site.custom_field_data["text_field"], self.cf_text.default)
self.assertEqual(site.custom_field_data["number_field"], self.cf_integer.default)
self.assertEqual(site.custom_field_data["boolean_field"], self.cf_boolean.default)
self.assertEqual(str(site.custom_field_data["date_field"]), self.cf_date.default)
self.assertEqual(site.custom_field_data["url_field"], self.cf_url.default)
self.assertEqual(site.custom_field_data["choice_field"], self.cf_select.default)
def test_create_single_object_with_values(self):
"""
Create a single new site with a value for each type of custom field.
"""
data = {
"name": "Site 3",
"slug": "site-3",
"status": "active",
"custom_fields": {
"text_field": "bar",
"number_field": 456,
"boolean_field": True,
"date_field": "2020-01-02",
"url_field": "http://example.com/2",
"choice_field": "Bar",
},
}
url = reverse("dcim-api:site-list")
self.add_permissions("dcim.add_site")
response = self.client.post(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
# Validate response data
response_cf = response.data["custom_fields"]
data_cf = data["custom_fields"]
self.assertEqual(response_cf["text_field"], data_cf["text_field"])
self.assertEqual(response_cf["number_field"], data_cf["number_field"])
self.assertEqual(response_cf["boolean_field"], data_cf["boolean_field"])
self.assertEqual(response_cf["date_field"], data_cf["date_field"])
self.assertEqual(response_cf["url_field"], data_cf["url_field"])
self.assertEqual(response_cf["choice_field"], data_cf["choice_field"])
# Validate database data
site = Site.objects.get(pk=response.data["id"])
self.assertEqual(site.custom_field_data["text_field"], data_cf["text_field"])
self.assertEqual(site.custom_field_data["number_field"], data_cf["number_field"])
self.assertEqual(site.custom_field_data["boolean_field"], data_cf["boolean_field"])
self.assertEqual(str(site.custom_field_data["date_field"]), data_cf["date_field"])
self.assertEqual(site.custom_field_data["url_field"], data_cf["url_field"])
self.assertEqual(site.custom_field_data["choice_field"], data_cf["choice_field"])
def test_create_multiple_objects_with_defaults(self):
"""
Create three news sites with no specified custom field values and check that each received
the default custom field values.
"""
data = (
{
"name": "Site 3",
"slug": "site-3",
"status": "active",
},
{
"name": "Site 4",
"slug": "site-4",
"status": "active",
},
{
"name": "Site 5",
"slug": "site-5",
"status": "active",
},
)
url = reverse("dcim-api:site-list")
self.add_permissions("dcim.add_site")
response = self.client.post(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(len(response.data), len(data))
for i, obj in enumerate(data):
# Validate response data
response_cf = response.data[i]["custom_fields"]
self.assertEqual(response_cf["text_field"], self.cf_text.default)
self.assertEqual(response_cf["number_field"], self.cf_integer.default)
self.assertEqual(response_cf["boolean_field"], self.cf_boolean.default)
self.assertEqual(response_cf["date_field"], self.cf_date.default)
self.assertEqual(response_cf["url_field"], self.cf_url.default)
self.assertEqual(response_cf["choice_field"], self.cf_select.default)
# Validate database data
site = Site.objects.get(pk=response.data[i]["id"])
self.assertEqual(site.custom_field_data["text_field"], self.cf_text.default)
self.assertEqual(site.custom_field_data["number_field"], self.cf_integer.default)
self.assertEqual(site.custom_field_data["boolean_field"], self.cf_boolean.default)
self.assertEqual(str(site.custom_field_data["date_field"]), self.cf_date.default)
self.assertEqual(site.custom_field_data["url_field"], self.cf_url.default)
self.assertEqual(site.custom_field_data["choice_field"], self.cf_select.default)
def test_create_multiple_objects_with_values(self):
"""
Create a three new sites, each with custom fields defined.
"""
custom_field_data = {
"text_field": "bar",
"number_field": 456,
"boolean_field": True,
"date_field": "2020-01-02",
"url_field": "http://example.com/2",
"choice_field": "Bar",
}
data = (
{
"name": "Site 3",
"slug": "site-3",
"status": "active",
"custom_fields": custom_field_data,
},
{
"name": "Site 4",
"slug": "site-4",
"status": "active",
"custom_fields": custom_field_data,
},
{
"name": "Site 5",
"slug": "site-5",
"status": "active",
"custom_fields": custom_field_data,
},
)
url = reverse("dcim-api:site-list")
self.add_permissions("dcim.add_site")
response = self.client.post(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(len(response.data), len(data))
for i, obj in enumerate(data):
# Validate response data
response_cf = response.data[i]["custom_fields"]
self.assertEqual(response_cf["text_field"], custom_field_data["text_field"])
self.assertEqual(response_cf["number_field"], custom_field_data["number_field"])
self.assertEqual(response_cf["boolean_field"], custom_field_data["boolean_field"])
self.assertEqual(response_cf["date_field"], custom_field_data["date_field"])
self.assertEqual(response_cf["url_field"], custom_field_data["url_field"])
self.assertEqual(response_cf["choice_field"], custom_field_data["choice_field"])
# Validate database data
site = Site.objects.get(pk=response.data[i]["id"])
self.assertEqual(site.custom_field_data["text_field"], custom_field_data["text_field"])
self.assertEqual(
site.custom_field_data["number_field"],
custom_field_data["number_field"],
)
self.assertEqual(
site.custom_field_data["boolean_field"],
custom_field_data["boolean_field"],
)
self.assertEqual(
str(site.custom_field_data["date_field"]),
custom_field_data["date_field"],
)
self.assertEqual(site.custom_field_data["url_field"], custom_field_data["url_field"])
self.assertEqual(
site.custom_field_data["choice_field"],
custom_field_data["choice_field"],
)
def test_update_single_object_with_values(self):
"""
Update an object with existing custom field values. Ensure that only the updated custom field values are
modified.
"""
site = self.sites[1]
original_cfvs = {**site.custom_field_data}
data = {
"custom_fields": {
"text_field": "ABCD",
"number_field": 1234,
},
}
url = reverse("dcim-api:site-detail", kwargs={"pk": self.sites[1].pk})
self.add_permissions("dcim.change_site")
response = self.client.patch(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
# Validate response data
response_cf = response.data["custom_fields"]
self.assertEqual(response_cf["text_field"], data["custom_fields"]["text_field"])
self.assertEqual(response_cf["number_field"], data["custom_fields"]["number_field"])
self.assertEqual(response_cf["boolean_field"], original_cfvs["boolean_field"])
self.assertEqual(response_cf["date_field"], original_cfvs["date_field"])
self.assertEqual(response_cf["url_field"], original_cfvs["url_field"])
self.assertEqual(response_cf["choice_field"], original_cfvs["choice_field"])
# Validate database data
site.refresh_from_db()
self.assertEqual(site.custom_field_data["text_field"], data["custom_fields"]["text_field"])
self.assertEqual(
site.custom_field_data["number_field"],
data["custom_fields"]["number_field"],
)
self.assertEqual(site.custom_field_data["boolean_field"], original_cfvs["boolean_field"])
self.assertEqual(site.custom_field_data["date_field"], original_cfvs["date_field"])
self.assertEqual(site.custom_field_data["url_field"], original_cfvs["url_field"])
self.assertEqual(site.custom_field_data["choice_field"], original_cfvs["choice_field"])
def test_minimum_maximum_values_validation(self):
url = reverse("dcim-api:site-detail", kwargs={"pk": self.sites[1].pk})
self.add_permissions("dcim.change_site")
self.cf_integer.validation_minimum = 10
self.cf_integer.validation_maximum = 20
self.cf_integer.save()
data = {"custom_fields": {"number_field": 9}}
response = self.client.patch(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_400_BAD_REQUEST)
data = {"custom_fields": {"number_field": 21}}
response = self.client.patch(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_400_BAD_REQUEST)
data = {"custom_fields": {"number_field": 15}}
response = self.client.patch(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
def test_regex_validation(self):
url = reverse("dcim-api:site-detail", kwargs={"pk": self.sites[1].pk})
self.add_permissions("dcim.change_site")
self.cf_text.validation_regex = r"^[A-Z]{3}$" # Three uppercase letters
self.cf_text.save()
data = {"custom_fields": {"text_field": "ABC123"}}
response = self.client.patch(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_400_BAD_REQUEST)
data = {"custom_fields": {"text_field": "abc"}}
response = self.client.patch(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_400_BAD_REQUEST)
data = {"custom_fields": {"text_field": "ABC"}}
response = self.client.patch(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
class CustomFieldImportTest(TestCase):
user_permissions = (
"dcim.view_site",
"dcim.add_site",
"extras.view_status",
)
@classmethod
def setUpTestData(cls):
custom_fields = (
CustomField(name="text", type=CustomFieldTypeChoices.TYPE_TEXT),
CustomField(name="integer", type=CustomFieldTypeChoices.TYPE_INTEGER),
CustomField(name="boolean", type=CustomFieldTypeChoices.TYPE_BOOLEAN),
CustomField(name="date", type=CustomFieldTypeChoices.TYPE_DATE),
CustomField(name="url", type=CustomFieldTypeChoices.TYPE_URL),
CustomField(
name="select",
type=CustomFieldTypeChoices.TYPE_SELECT,
choices=["Choice A", "Choice B", "Choice C"],
),
)
for cf in custom_fields:
cf.save()
cf.content_types.set([ContentType.objects.get_for_model(Site)])
def test_import(self):
"""
Import a Site in CSV format, including a value for each CustomField.
"""
data = (
(
"name",
"slug",
"status",
"cf_text",
"cf_integer",
"cf_boolean",
"cf_date",
"cf_url",
"cf_select",
),
(
"Site 1",
"site-1",
"active",
"ABC",
"123",
"True",
"2020-01-01",
"http://example.com/1",
"Choice A",
),
(
"Site 2",
"site-2",
"active",
"DEF",
"456",
"False",
"2020-01-02",
"http://example.com/2",
"Choice B",
),
("Site 3", "site-3", "active", "", "", "", "", "", ""),
)
csv_data = "\n".join(",".join(row) for row in data)
response = self.client.post(reverse("dcim:site_import"), {"csv": csv_data})
self.assertEqual(response.status_code, 200)
# Validate data for site 1
site1 = Site.objects.get(name="Site 1")
self.assertEqual(len(site1.custom_field_data), 6)
self.assertEqual(site1.custom_field_data["text"], "ABC")
self.assertEqual(site1.custom_field_data["integer"], 123)
self.assertEqual(site1.custom_field_data["boolean"], True)
self.assertEqual(site1.custom_field_data["date"], "2020-01-01")
self.assertEqual(site1.custom_field_data["url"], "http://example.com/1")
self.assertEqual(site1.custom_field_data["select"], "Choice A")
# Validate data for site 2
site2 = Site.objects.get(name="Site 2")
self.assertEqual(len(site2.custom_field_data), 6)
self.assertEqual(site2.custom_field_data["text"], "DEF")
self.assertEqual(site2.custom_field_data["integer"], 456)
self.assertEqual(site2.custom_field_data["boolean"], False)
self.assertEqual(site2.custom_field_data["date"], "2020-01-02")
self.assertEqual(site2.custom_field_data["url"], "http://example.com/2")
self.assertEqual(site2.custom_field_data["select"], "Choice B")
# No custom field data should be set for site 3
site3 = Site.objects.get(name="Site 3")
self.assertFalse(any(site3.custom_field_data.values()))
def test_import_missing_required(self):
"""
Attempt to import an object missing a required custom field.
"""
# Set one of our CustomFields to required
CustomField.objects.filter(name="text").update(required=True)
form_data = {
"name": "Site 1",
"slug": "site-1",
}
form = SiteCSVForm(data=form_data)
self.assertFalse(form.is_valid())
self.assertIn("cf_text", form.errors)
def test_import_invalid_choice(self):
"""
Attempt to import an object with an invalid choice selection.
"""
form_data = {"name": "Site 1", "slug": "site-1", "cf_select": "Choice X"}
form = SiteCSVForm(data=form_data)
self.assertFalse(form.is_valid())
self.assertIn("cf_select", form.errors)
class CustomFieldModelTest(TestCase):
@classmethod
def setUpTestData(cls):
cf1 = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name="foo")
cf1.save()
cf1.content_types.set([ContentType.objects.get_for_model(Site)])
cf2 = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name="bar")
cf2.save()
cf2.content_types.set([ContentType.objects.get_for_model(Rack)])
def test_cf_data(self):
"""
Check that custom field data is present on the instance immediately after being set and after being fetched
from the database.
"""
site = Site(name="Test Site", slug="test-site")
# Check custom field data on new instance
site.cf["foo"] = "abc"
self.assertEqual(site.cf["foo"], "abc")
# Check custom field data from database
site.save()
site = Site.objects.get(name="Test Site")
self.assertEqual(site.cf["foo"], "abc")
def test_invalid_data(self):
"""
Setting custom field data for a non-applicable (or non-existent) CustomField should raise a ValidationError.
"""
site = Site(name="Test Site", slug="test-site")
# Set custom field data
site.cf["foo"] = "abc"
site.cf["bar"] = "def"
with self.assertRaises(ValidationError):
site.clean()
del site.cf["bar"]
site.clean()
def test_missing_required_field(self):
"""
Check that a ValidationError is raised if any required custom fields are not present.
"""
cf3 = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name="baz", required=True)
cf3.save()
cf3.content_types.set([ContentType.objects.get_for_model(Site)])
site = Site(name="Test Site", slug="test-site")
# Set custom field data with a required field omitted
site.cf["foo"] = "abc"
with self.assertRaises(ValidationError):
site.clean()
site.cf["baz"] = "def"
site.clean()
class CustomFieldFilterTest(TestCase):
queryset = Site.objects.all()
filterset = SiteFilterSet
@classmethod
def setUpTestData(cls):
obj_type = ContentType.objects.get_for_model(Site)
# Integer filtering
cf = CustomField(name="cf1", type=CustomFieldTypeChoices.TYPE_INTEGER)
cf.save()
cf.content_types.set([obj_type])
# Boolean filtering
cf = CustomField(name="cf2", type=CustomFieldTypeChoices.TYPE_BOOLEAN)
cf.save()
cf.content_types.set([obj_type])
# Exact text filtering
cf = CustomField(
name="cf3",
type=CustomFieldTypeChoices.TYPE_TEXT,
filter_logic=CustomFieldFilterLogicChoices.FILTER_EXACT,
)
cf.save()
cf.content_types.set([obj_type])
# Loose text filtering
cf = CustomField(
name="cf4",
type=CustomFieldTypeChoices.TYPE_TEXT,
filter_logic=CustomFieldFilterLogicChoices.FILTER_LOOSE,
)
cf.save()
cf.content_types.set([obj_type])
# Date filtering
cf = CustomField(name="cf5", type=CustomFieldTypeChoices.TYPE_DATE)
cf.save()
cf.content_types.set([obj_type])
# Exact URL filtering
cf = CustomField(
name="cf6",
type=CustomFieldTypeChoices.TYPE_URL,
filter_logic=CustomFieldFilterLogicChoices.FILTER_EXACT,
)
cf.save()
cf.content_types.set([obj_type])
# Loose URL filtering
cf = CustomField(
name="cf7",
type=CustomFieldTypeChoices.TYPE_URL,
filter_logic=CustomFieldFilterLogicChoices.FILTER_LOOSE,
)
cf.save()
cf.content_types.set([obj_type])
# Selection filtering
cf = CustomField(
name="cf8",
type=CustomFieldTypeChoices.TYPE_URL,
choices=["Foo", "Bar", "Baz"],
)
cf.save()
cf.content_types.set([obj_type])
Site.objects.create(
name="Site 1",
slug="site-1",
custom_field_data={
"cf1": 100,
"cf2": True,
"cf3": "foo",
"cf4": "foo",
"cf5": "2016-06-26",
"cf6": "http://foo.example.com/",
"cf7": "http://foo.example.com/",
"cf8": "Foo",
},
)
Site.objects.create(
name="Site 2",
slug="site-2",
custom_field_data={
"cf1": 200,
"cf2": False,
"cf3": "foobar",
"cf4": "foobar",
"cf5": "2016-06-27",
"cf6": "http://bar.example.com/",
"cf7": "http://bar.example.com/",
"cf8": "Bar",
},
)
Site.objects.create(name="Site 3", slug="site-3", custom_field_data={})
def test_filter_integer(self):
self.assertEqual(self.filterset({"cf_cf1": 100}, self.queryset).qs.count(), 1)
def test_filter_boolean(self):
self.assertEqual(self.filterset({"cf_cf2": True}, self.queryset).qs.count(), 1)
self.assertEqual(self.filterset({"cf_cf2": False}, self.queryset).qs.count(), 1)
def test_filter_text(self):
self.assertEqual(self.filterset({"cf_cf3": "foo"}, self.queryset).qs.count(), 1)
self.assertEqual(self.filterset({"cf_cf4": "foo"}, self.queryset).qs.count(), 2)
def test_filter_date(self):
self.assertEqual(self.filterset({"cf_cf5": "2016-06-26"}, self.queryset).qs.count(), 1)
def test_filter_url(self):
self.assertEqual(
self.filterset({"cf_cf6": "http://foo.example.com/"}, self.queryset).qs.count(),
1,
)
self.assertEqual(self.filterset({"cf_cf7": "example.com"}, self.queryset).qs.count(), 2)
def test_filter_select(self):
self.assertEqual(self.filterset({"cf_cf8": "Foo"}, self.queryset).qs.count(), 1)
|
[
"lampwins@gmail.com"
] |
lampwins@gmail.com
|
6085fa5fa51974ff53fe7e99f64baa10359cb313
|
5b51f0ccdfdc6f9d74a0259fc723c1557d115776
|
/ex39.py
|
8526dd6345ee6b626f7319aec36ceeb66aa2c2ed
|
[] |
no_license
|
vacpy/learn_py
|
4943b65c7b462d0686d0a6dace9918c69d713cc2
|
cb2b37216ba8410cb621648acbf241b27315e301
|
refs/heads/master
| 2020-03-12T19:48:38.155583
| 2018-05-01T14:56:51
| 2018-05-01T14:56:51
| 130,792,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,155
|
py
|
# /usr/bin/python
# _*_coding:UTF-8_*_
#################### Dictionary lovely dictionary #####################
#Create a mapping of state to abbreviation(缩写词)
states ={
'Oregon':'OR',
'Florida':'FL',
'California':'CA',
'New York':'NY',
'Michigan':'MI'
} #定义字典:州名的缩写。键:全拼;值:缩写
#Create a basic set of states and some cities in them
cities = {
'CA':'San francisco',
'MI':'Detroit',
'FL':'Jacksonville'
} #定义字典:缩写的城市名。键:缩写;值:全拼
# add some more cities #添加字典cities内容。添加2个键,2个城市全拼
cities['NY'] = 'New York'
cities['OR'] = 'Portland'
#print out some cities
print('-' * 30) #打印30个"-",下同
print("NY State has:",cities['NY'])
print("OR state has :",cities['OR'])
#print some states
print('-' * 30)
print("Michigan's abbreviation is:",states['Michigan'])
print("Florida's abbreviation is :",states['Florida'])
#do it by using the state then cities dict
print('-' * 30)
print("Michigan has:",cities[states['Michigan']])
print("Florida has:",cities[states['Florida']])
#print every state abbreviation
print('-' * 30)
for state,abbrev in states.items():
print("%s is abbreviated %s" % (state,abbrev))
#print every city in state
print('-' * 30)
for abbrev,city in cities.items():
print("%s has the city %s" % (abbrev,city))
#now do both at the same time
print('-' * 30)
for state,abbrev in states.items():
print(" %s state is abbreviated %s and has city %s" % (state,abbrev,cities[abbrev]))
print('-' * 30)
#safely get a abbreviation by state that might not be there
state = states.get('Texas',None)
if not state:
print("Sorry,no Texas.")
#get a city with a default value
city = cities.get('Tx','Does Not Exist')
print("The city for the state 'TX' is : %s " % city)
|
[
"noreply@github.com"
] |
vacpy.noreply@github.com
|
2a96aaa69a1a43083900fb4b48c371c79a72b6f6
|
20f5b61affc0bb74c97fd1239f3c3f2a06b47849
|
/net/loss.py
|
feb3332623b2c62e1f64aeab4e4ad27d57ebe44d
|
[] |
no_license
|
n063h/ml_final
|
2f54fc9adc1a15a08d85bf2bb18fdd3218ec8e71
|
3d3adb9941cf798bb813757339ee7accadb234fd
|
refs/heads/main
| 2023-02-04T00:01:37.177865
| 2020-12-27T17:23:39
| 2020-12-27T17:23:39
| 322,206,279
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,694
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class featureMapLoss(nn.Module):
def __init__(self,loss='yolo'):
super(featureMapLoss, self).__init__()
self.loss=loss
def forward(self, pred_tensor, target_tensor):
'''
pred_tensor: (tensor) size(batchsize,S,S,15) [x,y,w,h,c]
target_tensor: (tensor) size(batchsize,S,S,15)
'''
batch_size,s,_,cls_size= pred_tensor.shape
#nonzero[i]=[j,j]表示第i个样本的j,k点是目标中心
nonzero=torch.nonzero(target_tensor)[:,1:3].to(device)
class_pred=torch.zeros(batch_size,cls_size).to(device)
class_target=torch.zeros(batch_size,cls_size).to(device)
for i in range(batch_size):
j,k=nonzero[i]
class_target[i]=target_tensor[i][j][k]
class_pred[i]=pred_tensor[i][j][k]
if self.loss=='yolo':
class_loss = F.mse_loss(class_pred, class_target, size_average=True)
elif self.loss=='softmax_yolo':
class_pred = class_pred.softmax(dim=1)
class_loss = F.mse_loss(class_pred, class_target, size_average=True)
else:
# 每个样本只留一维值,交叉熵自带softmax
class_target = class_target.argmax(axis=1)
loss_func=nn.CrossEntropyLoss()
class_loss = loss_func(class_pred, class_target)
return class_loss
if __name__ == '__main__':
r1 = torch.randn(10, 6, 6, 3)
r2 = torch.zeros(10, 6, 6, 3)
for i in range(10):
r2[i][2][3][1] = 1
l=featureMapLoss()
s1=l.forward(r1,r2)
|
[
"20210240109@fudan.edu.cn"
] |
20210240109@fudan.edu.cn
|
ec43c00394ec7bc8a8dccee4e12ea3ec428e49f6
|
2af4823ae83fbcc780ef538bd02fa5bf3a51208c
|
/ABC129/A.py
|
17ef028046926f60cdd3a1b8c937294515ab1d34
|
[] |
no_license
|
jre233kei/procon-atcoder
|
102420cc246a5123ac9774f8b28440f1d8b70b0f
|
c463c9c92d45f19fba32d0c8a25f97d73db67bc5
|
refs/heads/master
| 2022-12-04T09:30:39.271433
| 2020-08-14T11:38:46
| 2020-08-14T11:38:46
| 276,779,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 58
|
py
|
p,q,r = map(int, input().split())
print(min(p+q,q+r,p+r))
|
[
"jre233kei+github@gmail.com"
] |
jre233kei+github@gmail.com
|
59c5b022f4fb9fa93415e6168bf273a82b3edd24
|
87e0a352cb1081b6d9ba414c32b672bed8ad70f4
|
/grid2D.py
|
51df9980ff006f03f260f4e5bf834fd55b6ce483
|
[] |
no_license
|
ykzhou/lieb-thirring
|
5f0df15fff702122d4beb15404ccac5df16477b3
|
5341371b58374b673ac06ab8f42323ebec3e9bbe
|
refs/heads/master
| 2020-04-16T07:42:37.394694
| 2012-04-26T13:05:57
| 2012-04-26T13:05:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
from __future__ import division
import functions
import numpy as np
import scipy
import scipy.sparse
class Grid2D:
def __init__(self,N=100,L=10.0,d=1):
self.N = N
self.L = L
self.d = d
self.onedgrid = np.linspace(-L,L,N)
self.x,self.y = np.meshgrid(self.onedgrid, self.onedgrid)
self.r = np.sqrt(self.x**2 + self.y**2)
self.theta = np.arctan2(self.x,self.y)
self.delta = self.onedgrid[1] - self.onedgrid[0]
Delta1D = functions.tridiag(N,-1,2,-1)/self.delta**2
eye = functions.tridiag(N,0,1,0)
self.Delta = scipy.sparse.kron(Delta1D,eye,'csr') + scipy.sparse.kron(eye,Delta1D,'csr')
|
[
"antoine.levitt@gmail.com"
] |
antoine.levitt@gmail.com
|
82a04ed80cb067a982cfd53c08ad02cbef9bec70
|
e24ea16a26386cabda970299f4cf6ab8905ec307
|
/tests/data/instance_test.py
|
7c8ef03f29f8e329d39976d5b3073ce1d127d206
|
[
"Apache-2.0"
] |
permissive
|
eladsegal/allennlp
|
c3f7ee9c0586ba63f435442abfff15546524450b
|
f1f803e85aff1b67319c8cf51674095b9a5a6c18
|
refs/heads/master
| 2023-03-18T21:48:31.101172
| 2021-07-29T20:52:34
| 2021-07-29T20:52:34
| 221,441,294
| 3
| 1
|
Apache-2.0
| 2023-03-07T13:58:12
| 2019-11-13T11:15:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,643
|
py
|
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Instance
from allennlp.data.fields import TextField, LabelField
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token
class TestInstance(AllenNlpTestCase):
def test_instance_implements_mutable_mapping(self):
words_field = TextField([Token("hello")], {})
label_field = LabelField(1, skip_indexing=True)
instance = Instance({"words": words_field, "labels": label_field})
assert instance["words"] == words_field
assert instance["labels"] == label_field
assert len(instance) == 2
keys = {k for k, v in instance.items()}
assert keys == {"words", "labels"}
values = [v for k, v in instance.items()]
assert words_field in values
assert label_field in values
def test_duplicate(self):
# Verify the `duplicate()` method works with a `PretrainedTransformerIndexer` in
# a `TextField`. See https://github.com/allenai/allennlp/issues/4270.
instance = Instance(
{
"words": TextField(
[Token("hello")], {"tokens": PretrainedTransformerIndexer("bert-base-uncased")}
)
}
)
other = instance.duplicate()
assert other == instance
# Adding new fields to the original instance should not effect the duplicate.
instance.add_field("labels", LabelField("some_label"))
assert "labels" not in other.fields
assert other != instance # sanity check on the '__eq__' method.
|
[
"noreply@github.com"
] |
eladsegal.noreply@github.com
|
7dd7314d9e9433d8012c255fa879d845639f1134
|
5516cf5a3aa871d78791ba5669a16368c179d645
|
/runtests.py
|
9c8f8b24a7a99e2bebe3b9e7cc1c454adafb661b
|
[] |
no_license
|
lambdalisue/django-observer
|
1af577e4e02d710e8806312ad8d195142e9f68f4
|
052e0da55eefc8072cc78e0b9d72bc29c4e528c0
|
refs/heads/develop
| 2021-01-01T16:13:01.150667
| 2018-03-06T01:41:47
| 2018-03-06T01:41:47
| 3,034,956
| 27
| 4
| null | 2018-03-06T01:41:48
| 2011-12-22T16:19:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,045
|
py
|
#!/usr/bin/env python
#==============================================================================
# A generic django app test running script.
#
# Author: Alisue <lambdaliuse@hashnote.net>
# License: MIT license
#==============================================================================
import os
import sys
import optparse # argparse is prefered but it require python 2.7 or higher
# You can defined the default test apps here
DEFAULT_TEST_APPS = (
'observer',
)
def console_main(args=None):
parser = optparse.OptionParser(usage="python runtest.py [options] <apps>")
parser.add_option('-v', '--verbosity', default='1',
choices=('0', '1', '2', '3'),
help=("Verbosity level; 0=minimal output, 1=normal "
"output, 2=verbose output, 3=very verbose "
"output"))
parser.add_option('-i', '--interactive', action='store_true')
parser.add_option('-b', '--base-dir', default=None,
help=("The base directory of the code. Used for "
"python 3 compiled codes."))
opts, apps = parser.parse_args(args)
if len(apps) == 0:
apps = DEFAULT_TEST_APPS
run_tests(apps,
verbosity=int(opts.verbosity),
interactive=opts.interactive,
base_dir=opts.base_dir)
def run_tests(app_tests, verbosity=1, interactive=False, base_dir=None):
base_dir = base_dir or os.path.dirname(__file__)
sys.path.insert(0, os.path.join(base_dir, 'src'))
sys.path.insert(0, os.path.join(base_dir, 'tests'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.conf import settings
from django.test.utils import get_runner
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=verbosity,
interactive=interactive, failfast=False)
failures = test_runner.run_tests(app_tests)
sys.exit(bool(failures))
if __name__ == '__main__':
console_main()
|
[
"lambdalisue@hashnote.net"
] |
lambdalisue@hashnote.net
|
76481456f12c3d410caac04e2b3c59996e86c378
|
8e7b60de4dc314a4419d86067db4a65de847bff1
|
/Assignment3/gui.py
|
18e6634070dad5d7ac1825fc746ae872355b5c6e
|
[] |
no_license
|
teofilp/Artificial-Intelligence
|
c3ee4a137298fb01770c91c59fc2dfe783e12c9f
|
0950099fd0a6fecbf44a96f6149de217a8829136
|
refs/heads/main
| 2023-05-12T04:45:29.496676
| 2021-06-03T11:10:30
| 2021-06-03T11:10:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,506
|
py
|
# -*- coding: utf-8 -*-
from pygame.locals import *
import pygame, time
from utils import *
from domain import *
def initPyGame(dimension):
# init the pygame
pygame.init()
logo = pygame.image.load("logo32x32.png")
pygame.display.set_icon(logo)
pygame.display.set_caption("drone exploration with AE")
# create a surface on screen that has the size of 800 x 480
screen = pygame.display.set_mode(dimension)
screen.fill(WHITE)
return screen
def closePyGame():
# closes the pygame
running = True
# loop for events
while running:
# event handling, gets all event from the event queue
for event in pygame.event.get():
# only do something if the event is of type QUIT
if event.type == pygame.QUIT:
# change the value to False, to exit the main loop
running = False
pygame.quit()
def movingDrone(currentMap, path, speed=1, markSeen = True):
# animation of a drone on a path
screen = initPyGame((currentMap.n * 20, currentMap.m * 20))
drona = pygame.image.load("drona.png")
for i in range(len(path)):
screen.blit(image(currentMap), (0,0))
if markSeen:
brick = pygame.Surface((20,20))
brick.fill(GREEN)
for j in range(i+1):
for var in v:
x = path[j][0]
y = path[j][1]
while ((0 <= x + var[0] < currentMap.n and
0 <= y + var[1] < currentMap.m) and
currentMap.surface[x + var[0]][y + var[1]] != 1):
x = x + var[0]
y = y + var[1]
screen.blit(brick, ( y * 20, x * 20))
screen.blit(drona, (path[i][1] * 20, path[i][0] * 20))
pygame.display.flip()
time.sleep(0.5 * speed)
closePyGame()
def image(currentMap, colour = BLUE, background = WHITE):
# creates the image of a map
imagine = pygame.Surface((currentMap.n * 20, currentMap.m * 20))
brick = pygame.Surface((20,20))
brick.fill(colour)
imagine.fill(background)
for i in range(currentMap.n):
for j in range(currentMap.m):
if (currentMap.surface[i][j] == 1):
imagine.blit(brick, (j * 20, i * 20))
return imagine
|
[
"noreply@github.com"
] |
teofilp.noreply@github.com
|
e37c3f1033d9c67cfcf99289be5263a434d8c8ba
|
8ef8e6818c977c26d937d09b46be0d748022ea09
|
/cv/semantic_segmentation/unet++/pytorch/tools/convert_datasets/drive.py
|
4ede22c59d5268852092f96560f28285dc1da66d
|
[
"Apache-2.0"
] |
permissive
|
Deep-Spark/DeepSparkHub
|
eb5996607e63ccd2c706789f64b3cc0070e7f8ef
|
9d643e88946fc4a24f2d4d073c08b05ea693f4c5
|
refs/heads/master
| 2023-09-01T11:26:49.648759
| 2023-08-25T01:50:18
| 2023-08-25T01:50:18
| 534,133,249
| 7
| 6
|
Apache-2.0
| 2023-03-28T02:54:59
| 2022-09-08T09:07:01
|
Python
|
UTF-8
|
Python
| false
| false
| 4,237
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import tempfile
import zipfile
import cv2
import mmcv
def parse_args():
parser = argparse.ArgumentParser(
description='Convert DRIVE dataset to mmsegmentation format')
parser.add_argument(
'training_path', help='the training part of DRIVE dataset')
parser.add_argument(
'testing_path', help='the testing part of DRIVE dataset')
parser.add_argument('--tmp_dir', help='path of the temporary directory')
parser.add_argument('-o', '--out_dir', help='output path')
args = parser.parse_args()
return args
def main():
args = parse_args()
training_path = args.training_path
testing_path = args.testing_path
if args.out_dir is None:
out_dir = osp.join('data', 'DRIVE')
else:
out_dir = args.out_dir
print('Making directories...')
mmcv.mkdir_or_exist(out_dir)
mmcv.mkdir_or_exist(osp.join(out_dir, 'images'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'training'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'validation'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'training'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'validation'))
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir:
print('Extracting training.zip...')
zip_file = zipfile.ZipFile(training_path)
zip_file.extractall(tmp_dir)
print('Generating training dataset...')
now_dir = osp.join(tmp_dir, 'training', 'images')
for img_name in os.listdir(now_dir):
img = mmcv.imread(osp.join(now_dir, img_name))
mmcv.imwrite(
img,
osp.join(
out_dir, 'images', 'training',
osp.splitext(img_name)[0].replace('_training', '') +
'.png'))
now_dir = osp.join(tmp_dir, 'training', '1st_manual')
for img_name in os.listdir(now_dir):
cap = cv2.VideoCapture(osp.join(now_dir, img_name))
ret, img = cap.read()
mmcv.imwrite(
img[:, :, 0] // 128,
osp.join(out_dir, 'annotations', 'training',
osp.splitext(img_name)[0] + '.png'))
print('Extracting test.zip...')
zip_file = zipfile.ZipFile(testing_path)
zip_file.extractall(tmp_dir)
print('Generating validation dataset...')
now_dir = osp.join(tmp_dir, 'test', 'images')
for img_name in os.listdir(now_dir):
img = mmcv.imread(osp.join(now_dir, img_name))
mmcv.imwrite(
img,
osp.join(
out_dir, 'images', 'validation',
osp.splitext(img_name)[0].replace('_test', '') + '.png'))
now_dir = osp.join(tmp_dir, 'test', '1st_manual')
if osp.exists(now_dir):
for img_name in os.listdir(now_dir):
cap = cv2.VideoCapture(osp.join(now_dir, img_name))
ret, img = cap.read()
# The annotation img should be divided by 128, because some of
# the annotation imgs are not standard. We should set a
# threshold to convert the nonstandard annotation imgs. The
# value divided by 128 is equivalent to '1 if value >= 128
# else 0'
mmcv.imwrite(
img[:, :, 0] // 128,
osp.join(out_dir, 'annotations', 'validation',
osp.splitext(img_name)[0] + '.png'))
now_dir = osp.join(tmp_dir, 'test', '2nd_manual')
if osp.exists(now_dir):
for img_name in os.listdir(now_dir):
cap = cv2.VideoCapture(osp.join(now_dir, img_name))
ret, img = cap.read()
mmcv.imwrite(
img[:, :, 0] // 128,
osp.join(out_dir, 'annotations', 'validation',
osp.splitext(img_name)[0] + '.png'))
print('Removing the temporary files...')
print('Done!')
if __name__ == '__main__':
main()
|
[
"mingjiang.li@iluvatar.ai"
] |
mingjiang.li@iluvatar.ai
|
8b4bb0160b2b5b09c9b5cd8ecf09b5b6879dafaf
|
6817457f2f7cb635e84d5ac23c76873628fb04cf
|
/src/dama/data/csv.py
|
238e7ff00d7b2d00f3f5813c3dd483769a1050b8
|
[
"Apache-2.0"
] |
permissive
|
elaeon/dama_ml
|
5d9a63e0daabe332a08b13813de57d9ed2608015
|
8b56c62a28c69987fc5dbd8a47406a3a22214371
|
refs/heads/master
| 2021-10-26T05:24:10.166028
| 2019-04-11T00:55:44
| 2019-04-11T00:55:44
| 58,218,206
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,797
|
py
|
# https://stackoverflow.com/questions/13044562/python-mechanism-to-identify-compressed-file-type-and-uncompress
import csv
import zipfile
# import bz2
# import gzip
import numpy as np
from io import StringIO, TextIOWrapper
from operator import itemgetter
from tqdm import tqdm
def get_compressed_file_manager_ext(filepath):
ext = filepath.split(".").pop()
for cls in (File, ZIPFile):
if cls.proper_extension == ext:
return cls(filepath)
# class PandasEngine:
# def read_csv(*args, **kwargs):
# if "batch_size" in kwargs:
# kwargs['chunksize'] = kwargs['batch_size']
# batch_size = kwargs['batch_size']
# del kwargs['batch_size']
# else:
# batch_size = 0
# df = pd.read_csv(*args, **kwargs)
# it = Iterator(df)
# if batch_size == 0:
# return it
# else:
# return BatchIterator(it, batch_size=batch_size)
class File(object):
magic = None
file_type = 'csv'
mime_type = 'text/plain'
proper_extension = 'csv'
def __init__(self, filepath):
self.filepath = filepath
self.engine = None # PandasEngine
def read(self, columns=None, exclude: bool=False, df: bool=True, filename: str=None, **kwargs):
if exclude is True:
cols = lambda col: col not in columns
elif exclude is False and columns:
cols = lambda col: col in columns
else:
cols = None
return self.engine.read_csv(self.filepath, usecols=cols, **kwargs)
def write(self, iterator, header=None, delimiter: str=",") -> None:
with open(self.filepath, 'w') as f:
csv_writer = csv.writer(f, delimiter=delimiter)
if header is not None:
csv_writer.writerow(header)
for row in tqdm(iterator):
csv_writer.writerow(row)
@property
def dtypes(self):
return self.engine.read_csv(self.filepath).dtypes
@classmethod
def is_magic(self, data):
if self.magic is not None:
return data.startswith(self.magic)
return True
class ZIPFile(File):
magic = '\x50\x4b\x03\x04'
file_type = 'zip'
mime_type = 'compressed/zip'
proper_extension = 'zip'
def read(self, filename=None, columns=None, exclude=False, batch_type="df", **kwargs):
if filename is None and batch_type == "df":
return super(ZIPFile, self).read(columns=columns, exclude=exclude, **kwargs)
else:
iter_ = self._read_another_file(filename, columns, kwargs.get("delimiter"))
dtype = [(col, np.dtype("object")) for col in next(iter_)]
nrows = kwargs.get("nrows", None)
# if nrows is None:
# it = Iterator(iter_, dtypes=dtype).batchs(batch_size=kwargs.get("batch_size", 0), batch_type=batch_type)
# else:
# it = Iterator(iter_, dtypes=dtype)[:nrows].batchs(batch_size=kwargs.get("batch_size", 0), batch_type=batch_type)
# return it
def _read_another_file(self, filename, columns, delimiter):
with zipfile.ZipFile(self.filepath, 'r') as zf:
# files = zf.namelist()
with zf.open(filename, 'r') as f:
csv_reader = csv.reader(TextIOWrapper(f, encoding="utf8"), delimiter=delimiter)
yield next(csv_reader)
if columns is None:
for row in csv_reader:
yield row
else:
for row in csv_reader:
yield itemgetter(*columns)(row)
def write(self, iterator, header=None, filename=None, delimiter=",") -> None:
with zipfile.ZipFile(self.filepath, "w", zipfile.ZIP_DEFLATED) as zf:
output = StringIO()
csv_writer = csv.writer(output, delimiter=delimiter)
if header is not None:
csv_writer.writerow(header)
for row in tqdm(iterator):
csv_writer.writerow(row)
if filename is None:
filename = self.filepath.split("/")[-1]
filename = filename.split(".")[:-1]
if len(filename) == 1:
filename = "{}.csv".format(filename[0])
else:
filename = ".".join(filename)
zf.writestr(filename, output.getvalue())
output.close()
#class BZ2File (CompressedFile):
# magic = '\x42\x5a\x68'
# file_type = 'bz2'
# mime_type = 'compressed/bz2'
# def open(self):
# return bz2.BZ2File(self.f)
#class GZFile (CompressedFile):
# magic = '\x1f\x8b\x08'
# file_type = 'gz'
# mime_type = 'compressed/gz'
# def open(self):
# return gzip.GzipFile(self.f)
|
[
"mara80@gmail.com"
] |
mara80@gmail.com
|
4faf6c7566c0ff3ad6f931736c833bc214d55dae
|
ec94aebf07c808e21f5083042f4e8220c957b0da
|
/speedUpdating.py
|
b703139a08b2fc79afbfc3a8936e38a7fede8f29
|
[] |
no_license
|
Attler/speed-updating
|
ef50e0f969a06e6758b6552c6ec45cdf49146ea7
|
66103742c557bdca806d61529d46ebc6f8564f3a
|
refs/heads/master
| 2022-12-12T13:50:50.971079
| 2020-09-13T14:16:22
| 2020-09-13T14:16:22
| 294,152,026
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,788
|
py
|
import csv
import math
from random import randrange, shuffle
import copy
input_file = 'Speed Updating Questions.csv'
# Reads input of .csv file of list of persons and their answers to the form questions
# def read_form(input):
# with open(input, encoding="utf8") as csvfile:
# people = []
# reader = csv.reader(csvfile)
# for row in reader:
# people.append(row)
# q_text = people[1][1:]
# people = people[1:]
# for index, person in enumerate(people):
# people[index] = person[1:]
# return people
# Returns squared difference in answer values for question q_index and persons p1 and p2
def chat_score(chat):
p1, p2, q_index = chat
p1_ans = p1[q_index]
p2_ans = p2[q_index]
if (p1_ans == None) or (p2_ans == None):
return (-100) # large negative for questions people don't want to talk about
return (abs(p1_ans - p2_ans)) ** 2
# Returns overall match score of full arrangment of persons
def arrangement_score(arrangement):
return sum(map(chat_score, arrangement))
def init_arrangement(people):
n_people = len(people)
n_ques = len(people[0]) - 2
arrangement = []
# shuffle(people)
for i in range(0, n_people, 2): # assuming even number of people
p1 = people[i]
p2 = people[i + 1]
q_index = randrange(n_ques) + 1 # random question index
chat = (p1, p2, q_index)
# print('pair:',people[i][0],'&',people[i+1][0])
if len(chat) == 3:
arrangement.append(chat)
return arrangement
def random_step(arrangement):
trial_arrangement = arrangement.copy() # create new list so original isn't modified
n1 = randrange(len(trial_arrangement)) # swap random pair
n2 = randrange(len(trial_arrangement))
s1 = trial_arrangement[n1][0]
s2 = trial_arrangement[n2][0]
trial_arrangement[n1] = (s2, trial_arrangement[n1][1], trial_arrangement[n1][2])
trial_arrangement[n2] = (s1, trial_arrangement[n2][1], trial_arrangement[n2][2])
# print('\nRandomstep')
# for pair in arrangement:
# print('pair:',pair[0][0],'&',pair[1][0])
# print('score',arrangement_score(arrangement))
return trial_arrangement
def local_search(people, steps):
init_arr = init_arrangement(people)
current = init_arr
for i in range(steps):
new = random_step(current)
if (arrangement_score(current) < arrangement_score(new)): # find better arrangement
change = arrangement_score(new) - arrangement_score(current)
print(' ' + str(arrangement_score(current)))
current = new
print(' +', change)
return current
##---------------------------------------------##
answer_translation = {'Strongly Agree': 3,
'Agree': 2,
'Somewhat Agree': 1,
'Unsure': 0,
'Somewhat Disagree': -1,
'Disagree': -2,
'Strongly Disagree': -3,
'': None,
'I don\'t want to talk about this question': None}
##---------------------------------------------##
i = 0
num_iterations = 100
overall_best_score = 0
overall_best_arrangement = []
while i < num_iterations:
# load data from csv
with open(input_file, encoding="utf8") as csvfile:
people = []
reader = csv.reader(csvfile)
for row in reader:
people.append(row)
q_text = people[0][1:]
people = people[1:]
for index, person in enumerate(people):
people[index] = person[1:]
print(people[0])
# store data in numerical form
for person in people:
for index, answer in enumerate(person):
if index == 0: # ignore first column
continue
person[index] = answer_translation[answer] # get answer value
print(people[0])
output = local_search(people, steps=50)
output_score = arrangement_score(output)
print(str(i) + ': final overall score: ' + str(output_score))
# for pair in output:
# question_index = pair[2]
# print('pair:',pair[0][0],'&',pair[1][0])
# print('question:',q_text[question_index])
# print(pair[0][question_index])
# print(pair[1][question_index])
if (output_score > overall_best_score):
overall_best_score = output_score
overall_best_arrangement = output
i = i + 1
print('Finished!')
print('best score:', overall_best_score)
for pair in overall_best_arrangement:
question_index = pair[2]
print('pair:', pair[0][0], '&', pair[1][0])
print('question:', q_text[question_index])
print(pair[0][question_index], 'vs', pair[1][question_index])
|
[
"Jason.hepbun@students.mq.edu.au"
] |
Jason.hepbun@students.mq.edu.au
|
ec7bb247496ec2cb345949a5669594ad7dbcdaa0
|
b44ee1ca67e5e1abf38535764ba51d392f9bc74c
|
/question_01_99/question_21.py
|
7627dba8f1ec1db0e9bfbec5abe55035352502a7
|
[] |
no_license
|
song61069140/LeetCodePython
|
c9cec3735e7006c26b4f159543996160ae29382b
|
2a953428018d1656695dbfe58d0bc7c9888224da
|
refs/heads/master
| 2020-08-01T05:12:02.451158
| 2019-11-23T01:16:12
| 2019-11-23T01:16:12
| 210,875,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
"""
将两个有序链表合并为一个新的有序链表并返回。新链表是通过拼接给定的两个链表的所有节点组成的。
示例:
输入:1->2->4, 1->3->4
输出:1->1->2->3->4->4
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/merge-two-sorted-lists
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
if l1 and l2:
if l1.val > l2.val:
l1, l2 = l2, l1
l1.next = self.mergeTwoLists(l1.next, l2)
return l1 or l2
|
[
"song61069140@gmail.com"
] |
song61069140@gmail.com
|
248015e3a03e3313933cea852de548f414c40a1c
|
737fadb5c9ce8baca27e052e052d516c93c14173
|
/lainxi.py
|
c6f6cbaf986d2413308dfe8d7ab689871138b6b0
|
[] |
no_license
|
Winona1234/maoyan
|
afe6a639a6b8522c203adb4a984c0242b5ea5e33
|
18e94dc118c53bd478ff9b6e0c3e17ecf7776273
|
refs/heads/master
| 2023-01-04T16:24:25.753013
| 2020-10-28T07:54:11
| 2020-10-28T07:54:11
| 307,943,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
# -*- coding=utf-8 -*-
# @Time:2020/10/11 12:04 下午
# Author :王文娜
# @File:lainxi.py
# @Software:PyCharm
print('hello world'.strip())
print('hello world'.split(' '))
print('hello world'.replace(' ','#'))
|
[
"wangwena2018@163.com"
] |
wangwena2018@163.com
|
4be2d42dd8b3ba052d22c1ec358b13481d058d20
|
6b899fe7bb952fe46ef6ae9cccdca4f2e0cfe89e
|
/conf.py
|
5c4d31ab9c7b8a2722fff897537f203cf379f997
|
[] |
no_license
|
cknowledge/docs
|
8d1461c299f578207aeaf01916d05c299b97f49c
|
62983632ef5208dea8334db4cd78dc79eda23592
|
refs/heads/master
| 2022-12-27T04:20:13.076580
| 2020-10-11T14:32:06
| 2020-10-11T14:32:06
| 287,727,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,663
|
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('_ext'))
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'Collective Knowledge platform'
copyright = u'2015-2020 Grigori Fursin and the cTuning foundation'
author = u'Grigori Fursin'
version='1.3.1'
release=version
edit_on_github_url='https://github.com'
edit_on_github_project = 'ctuning/ck'
# The short X.Y version
#version = u'0.7.18'
# The full version, including alpha/beta/rc tags
#release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'recommonmark'
]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md', '.html']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_show_sourcelink = False
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'style_nav_header_background': 'black',
'collapse_navigation': False,
'style_external_links': True,
'analytics_id': 'UA-5727962-14', # Provided by Google in your dashboard
}
html_context = {
"display_github": True,
"github_user": "ctuning",
"github_repo": "ck",
"github_version": "master/docs/",
}
html_logo = 'static/logo.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'CKDoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ck.tex', u'Collective Knowledge',
u'Grigori Fursin', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ck.tex', u'Collective Knowledge',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ck.tex', u'Collective Knowledge',
author, 'CK', 'Collective Knowledge about complex computational systems',
'reproducibility'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
def setup(app):
app.add_stylesheet('css/custom.css')
|
[
"Grigori.Fursin@cTuning.org"
] |
Grigori.Fursin@cTuning.org
|
42ad8c9644b372819d2919e610498c96c2128557
|
64ff26ba380bf35aa161022a7dce2572acad95f9
|
/app/forms.py
|
1af1ce03a3c8833d0ed5ad4b93a933070cb9fb53
|
[] |
no_license
|
naritotakizawa/django-monthly-formset-sample
|
9e517e1305fbf57a2677f069bfd9a82b312709cb
|
7e6667077a0e986695de230cdc5e616576896eef
|
refs/heads/master
| 2020-05-30T14:41:20.274643
| 2019-06-02T02:45:50
| 2019-06-02T02:45:50
| 189,797,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
from django import forms
from .models import Monthly, Daily
DailyInlineFormset = forms.inlineformset_factory(
Monthly, Daily, exclude=['day'], # 日にちは編集できないようにする
extra=0, can_delete=False
)
# 一般的にはclass MonthlyForm のように作りますが、ちょっとしたものなら関数で作ることもできます。
MonthlyForm = forms.modelform_factory(Monthly, fields=['comment'])
|
[
"toritoritorina@gmail.com"
] |
toritoritorina@gmail.com
|
dc173b3cec82d48d340fb71841d9891121935bf7
|
89294b5cc300950b878cd7ed0def132081408da0
|
/attic_greek/test_modules/contracted_future.py
|
b0cc9cf0494ba78e253b5b6f643a69c59f396450
|
[] |
no_license
|
matt-gardner/language-study
|
aa434d3cf40b6752baeee0ccc9a40b80e0497ba1
|
c0d7066851ce4e04a8a0cf3c33bc9f91ae34e304
|
refs/heads/master
| 2020-12-24T16:24:17.531025
| 2016-03-12T23:03:39
| 2016-03-12T23:03:39
| 27,920,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,262
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from attic_greek.conjugation import GreekConjugation
from attic_greek.test_modules import verbs, verb_cases, GreekTestCase
import unicodedata
class ContractedFutureTest(GreekTestCase):
"""We just test a few forms here, because we've tested most of this already.
The main point here is just to be sure that contraction works in the future
tenses.
"""
def test_elauno(self):
args = {}
args['tense'] = 'Future'
args['mood'] = 'Indicative'
args['voice'] = 'Active'
answers = [u'ἐλῶ', u'ἐλᾷς', u'ἐλᾷ', u'ἐλῶμεν', u'ἐλᾶτε', u'ἐλῶσι']
answers = [unicodedata.normalize('NFKD', word) for word in answers]
conj = GreekConjugation(verbs['elauno'].word.word)
for case, answer in zip(verb_cases, answers):
args.update(case)
self.failUnlessEqual(conj.conjugate(**args), [answer])
def test_aggello(self):
args = {}
args['tense'] = 'Future'
args['mood'] = 'Indicative'
args['voice'] = 'Active'
answers = [u'ἀγγελῶ', u'ἀγγελεῖς', u'ἀγγελεῖ', u'ἀγγελοῦμεν',
u'ἀγγελεῖτε', u'ἀγγελοῦσι']
answers = [unicodedata.normalize('NFKD', word) for word in answers]
conj = GreekConjugation(verbs['aggello'].word.word)
for case, answer in zip(verb_cases, answers):
args.update(case)
self.failUnlessEqual(conj.conjugate(**args), [answer])
def test_maxomai(self):
args = {}
args['tense'] = 'Future'
args['mood'] = 'Indicative'
args['voice'] = 'Middle' # TODO: Should be deponent...
answers = [u'μαχοῦμαι', u'μαχεῖ', u'μαχεῖται', u'μαχούμεθα',
u'μαχεῖσθε', u'μαχοῦνται']
answers = [unicodedata.normalize('NFKD', word) for word in answers]
conj = GreekConjugation(verbs['maxomai'].word.word)
for case, answer in zip(verb_cases, answers):
args.update(case)
self.failUnlessEqual(conj.conjugate(**args), [answer])
all_tests = [ContractedFutureTest]
# vim: et sw=4 sts=4
|
[
"mjg82@byu.edu"
] |
mjg82@byu.edu
|
2db96682befc29600a69ec10146c79fa2573df68
|
b6ae8525b61f8302381efa3da1963c3d60d290f2
|
/starterapp/views.py
|
4ff3e035b06b511d0bb1d9ef4982b0ad51a85ccd
|
[] |
no_license
|
vince06fr/django-buddy
|
b7d713ba90d832753619c9a9ae0913f16ce3eeff
|
180b8011d38e5e2fecbe542fccb49466833422a8
|
refs/heads/master
| 2020-12-25T10:37:53.525148
| 2012-10-25T03:03:20
| 2012-10-25T03:03:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout as auth_logout
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.views.generic.base import View
from django.shortcuts import render, render_to_response
def logout(request):
auth_logout(request)
return HttpResponseRedirect('/')
def home(request):
template = 'home.html'
context = {}
return render_to_response(template, context)
class LandingView(View):
template = 'login.html'
def get(self, request, *args, **kwargs):
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('home'))
context = {}
return render(request, self.template, context)
def post(self, request, *args, **kwargs):
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse('home'))
else:
return HttpResponseRedirect(reverse('login'))
|
[
"suneel0101@gmail.com"
] |
suneel0101@gmail.com
|
2def90018f3c113b4160350c30adc34418af87e3
|
70166e12579c44656720eec0c9e0e6cf8cf26e1f
|
/setup.py
|
5797f494056230aa30e7554341448c3b1ce4d60e
|
[
"MIT"
] |
permissive
|
kasbah/scrapy-puppeteer
|
5b8556fa7d6e6a816a698d6f76bbb7f83e548021
|
512cfe99b2c3f9aad6c0d3d35299d3ccd6c91121
|
refs/heads/master
| 2020-12-26T12:30:38.378438
| 2019-12-18T10:46:59
| 2019-12-18T10:46:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 993
|
py
|
"""This module contains the packaging routine for the pybook package"""
from setuptools import setup, find_packages
try:
from pip.download import PipSession
from pip.req import parse_requirements
except ImportError:
# It is quick hack to support pip 10 that has changed its internal
# structure of the modules.
from pip._internal.download import PipSession
from pip._internal.req.req_file import parse_requirements
def get_requirements(source):
"""Get the requirements from the given ``source``
Parameters
----------
source: str
The filename containing the requirements
"""
install_reqs = parse_requirements(filename=source, session=PipSession())
return [str(ir.req) for ir in install_reqs]
setup(
packages=find_packages(),
install_requires=get_requirements('requirements/requirements.txt'),
entry_points={
'console_scripts': [
'scrapyp = scrapy_puppeteer.cli:__main__',
],
}
)
|
[
"clement.denoix@algolia.com"
] |
clement.denoix@algolia.com
|
6debb4e0601a04825071d7b4aa701b5beb720d03
|
b6c4650e0719d09c39dd3950a0e861414c2a6910
|
/funções01.py
|
735ddf21c8c58c6ac787c4fccf475331117ceeb6
|
[] |
no_license
|
gabriel301297/Exerciciofun-es-
|
765f83b29985c3874f443d4b811f7b3ea9e4bcb7
|
c80ec28f01cb795efcfb07d3123f064b757ca77d
|
refs/heads/master
| 2022-11-03T01:46:13.188266
| 2020-06-18T23:54:04
| 2020-06-18T23:54:04
| 273,358,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
#Faça um programa para imprimir:
def imprime(n):
"""Funcao que retorna P se o valor informado for > que 0 e N se for <."""
if n > 0:
print('P')
elif n <= 0:
print('N')
return imprime
imprime(1)
|
[
"noreply@github.com"
] |
gabriel301297.noreply@github.com
|
00ef5b03f237a3f2966a55200ada0d942f795447
|
211076d8dda4e6751ef0643e0201e9c18845b528
|
/pneumothorax/conf/model008.py
|
426cf286bbcf8744324d91d8ccd82d849f40b922
|
[] |
no_license
|
jovenwayfarer/udacity-ml-nanodegree
|
c83d3778da0ccf58dbdd682cac44ec02107227cf
|
6d96de938a5d18550a50c8cadd28bde887b68222
|
refs/heads/master
| 2022-04-20T01:14:22.104892
| 2020-04-10T00:46:21
| 2020-04-10T00:46:21
| 290,711,786
| 1
| 0
| null | 2020-08-27T07:49:32
| 2020-08-27T07:49:32
| null |
UTF-8
|
Python
| false
| false
| 3,372
|
py
|
import albumentations as albu
workdir = './model/model008'
seed = 69
n_fold = 5
epochs = 20
sample_classes = True
resume_from = None
retrain_from = './model/model007/model_1024_0.pth' # <---- fold 0...4
train_rle_path = './input/stage_2_train.csv'
train_imgdir = './input/1024-s2/train'
train_folds = './cache/train_folds.pkl'
batch_size = 4
n_grad_acc = 4
num_workers = 4
imgsize = 1024
model = dict(
name='unet_resnet34',
pretrained='imagenet',
)
optim = dict(
name='Adam',
params=dict(
lr=5e-4,
),
)
# loss = dict(
# name='MixedLoss',
# params=dict(
# alpha=10,
# gamma=2,
# ),
# )
loss = dict(
name='BCEDiceLoss',
params=dict(),
)
scheduler = dict(
name='ReduceLROnPlateau',
params=dict(
mode="min",
patience=3,
verbose=True,
),
)
prob_threshold = 0.5
min_object_size = 3500 # pixels
normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}
hflip = dict(name='HorizontalFlip', args=[], params=dict())
oneof_contrast = dict(name='OneOf', args=[[
albu.RandomContrast(),
albu.RandomGamma(),
albu.RandomBrightness()]], params=dict(p=0.3))
oneof_transform = dict(name='OneOf', args=[[
albu.ElasticTransform(alpha=120, sigma=120*0.05, alpha_affine=120*0.03),
albu.GridDistortion(),
albu.OpticalDistortion(distort_limit=2, shift_limit=0.5)]], params=dict(p=0.3))
shiftscalerotate = dict(name='ShiftScaleRotate', args=[], params=dict())
resize = dict(name='Resize', args=[], params=dict(height=imgsize, width=imgsize))
totensor = dict(name='ToTensor', args=[], params=dict(normalize=normalize))
hflip1 = dict(name='HorizontalFlip', args=[], params=dict(p=1.))
data = dict(
train=dict(
phase='train',
imgdir=train_imgdir,
imgsize=imgsize,
n_grad_acc=n_grad_acc,
loader=dict(
shuffle=True,
batch_size=batch_size,
drop_last=True,
num_workers=num_workers,
pin_memory=True,
),
prob_threshold=prob_threshold,
min_object_size=None,
transforms=[hflip, oneof_contrast, oneof_transform, shiftscalerotate, resize, totensor]
),
valid=dict(
phase='valid',
imgdir=train_imgdir,
imgsize=imgsize,
n_grad_acc=n_grad_acc,
loader=dict(
shuffle=False,
batch_size=2,
drop_last=False,
num_workers=num_workers,
pin_memory=True,
),
prob_threshold=prob_threshold,
min_object_size=None, # min_object_size,
transforms=[resize, totensor],
),
test=dict(
imgdir='./input/1024-s2/test',
sample_submission_file='./input/stage_2_sample_submission.csv',
trained_models=workdir+'/'+'model_1024_*.pth',
imgsize=imgsize,
loader=dict(
shuffle=False,
batch_size=1,
drop_last=False,
num_workers=num_workers,
pin_memory=True,
),
transforms=[resize, totensor],
transforms_and_hflip=[hflip1, resize, totensor],
prob_threshold=0.5,
min_object_size=3500,
output_file_probabilty_name='pixel_probabilities_1024_0p5th.pkl',
submission_file_name='submission_pytorch_5fold_ave_Wflip_0p5th_FineTunedM7withBCE.csv',
),
)
|
[
"akuritsyn@gmail.com"
] |
akuritsyn@gmail.com
|
db28575f2623496636938161f33ab45251ac66c2
|
f098fd2d7fb2aa5e739965d2f6b44d15f6516172
|
/mapmunchies_site/stories/models.py
|
01e3e66824087a39cdcf3bd60cc7c98592e35c6e
|
[] |
no_license
|
xiaopies/MapMunchies
|
985654f66d1fa4165fb0e158eee3052f342aeef7
|
e97bb492d27460b412b4e389cc7900b1e9e2862d
|
refs/heads/main
| 2023-08-17T04:22:59.697462
| 2021-09-27T02:18:11
| 2021-09-27T02:18:11
| 398,145,210
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,706
|
py
|
from django.db import models
from django.utils import timezone
from django.core.validators import MinLengthValidator, MinValueValidator
from django.contrib.auth.models import User
from django.db.models.expressions import RawSQL
def get_borough_list():
list_of_bouroghs = {
1:'brooklyn',
2:'manhattan',
3:'queens',
4:'bronx',
5:'staten island',
}
return list_of_bouroghs
class restaurants_Manager(models.Manager):
def isFloatNum(self, targetString):
print(targetString)
try:
float(targetString)
return(True)
except:
print("Not a float")
return(False)
# request = { latitude:float, longitude:float, nelat:float, nelon:float, swlat:float, swlon:float }
def search(self, request):
print(request)
x = True
for value in request.values():
if not self.isFloatNum(value):
x = False
# x = self.isFloatNum(request.latitude) and self.isFloatNum(request.longitude)
# x = x and self.isFloatNum(request.nelat) and self.isFloatNum(request.nelon) and self.isFloatNum(request.swlat) and self.isFloatNum(swlon)
if (x):
# Great circle distance formula => Returning value in kms
gcd_formula = "6371 * acos(min(max(\
cos(radians(%s)) * cos(radians(lat)) \
* cos(radians(lon) - radians(%s)) + \
sin(radians(%s)) * sin(radians(lat)) \
, -1), 1))"
distance_raw_sql = RawSQL(
gcd_formula,
(request["centerlat"], request["centerlng"], request["centerlat"])
)
qs = self.get_queryset()
#get biz in the viewable space
qs = qs.filter(lat__lt = request["nelat"], lat__gt = request["swlat"], lon__lt = request["nelon"], lon__gt = request["swlon"])
qs = qs.annotate(distance=distance_raw_sql)
qs = qs.order_by('distance')
# .values_list("placeID", flat=True)
# qs = qs[:10] # take only the first 10
listOfPlaceIDs = []
for place in qs.iterator():
# get wait time average
# listOfPlaceIDs.append([place, place.distance]) # testing
listOfPlaceIDs.append([place.name, place.lat, place.lon])
# data = serialize("json", [ qs, ])
print('qs: ' + str(listOfPlaceIDs))
return listOfPlaceIDs
return('bad inputs') #escape out
# Create your models here.
class restaurants(models.Model):
time = models.TimeField(default=timezone.now)
name = models.CharField(max_length = 20, validators = [MinLengthValidator(1)])
borough = models.IntegerField(validators=[MinValueValidator(0, message="must be a value from 1-5")])
lat = models.FloatField()
lon = models.FloatField()
REQUIRED_FIELDS = [time, name, borough,lat, lon]
# has to be a nyc bourogh for now
# futore could use coords with google geolocator api
def get_borough(self):
list_of_bouroghs = get_borough_list()
return list_of_bouroghs[int(self.bourogh)]
def __str__(self):
return str(self.name)
objects = restaurants_Manager()
class story(models.Model):
time = models.TimeField(default=timezone.now)
restaurant = models.ForeignKey(restaurants, on_delete=models.CASCADE)
storytext = models.TextField(null=False, validators=[MinLengthValidator(1)])
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
)
REQUIRED_FIELDS = [time, restaurant, storytext]
def __str__(self):
return str(self.time) + ' ' + str(self.restaurant)
|
[
"jasonjiangny@gmail.com"
] |
jasonjiangny@gmail.com
|
a239599b70d3afef7e329805a9d8116e6a53cb82
|
231bd95d5e4a67a5aea60c81c2a46f672885f93d
|
/src/copy_files.py
|
c161ec8f3c0ec77d323a2c5035a5dc3a58358384
|
[] |
no_license
|
CharmSpace/DTC-Logo-Recognition
|
f2d0d87fbff48d69e178506e81d9f8c8b029fe07
|
dd4f5c90af48caa3829a05febb7d01eeb716ec2c
|
refs/heads/master
| 2022-11-11T12:09:05.617918
| 2020-06-30T06:36:32
| 2020-06-30T06:36:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,296
|
py
|
import os
import argparse
#from multiprocessing import ThreadPool
import shutil
import time
from itertools import zip_longest
def copy_jpg_files(args):
"""Retrieve files in folder and write them to new folder"""
in_parent_folder = args.in_dir
out_parent_folder = args.out_dir
# walk through each folder and retrieve valid jpeg images
# and save in out_parent_folder
folders = [folder for folder in os.listdir(
in_parent_folder) if os.path.isdir(os.path.join(in_parent_folder, folder))]
#new_path = os.path.join(out_parent_folder, folder)
#if not os.path.exists(new_path):
# os.makedirs(new_path)
for folder in folders:
start = time.time()
print(f'>>> Copying files from {folder} folder ...', end='')
if folder == '0samples': # copy entire 0samples cos it has no issues
#new_path = os.path.join(out_parent_folder, folder)
#os.makedirs(new_path)
#shutil.copytree(os.path.join(in_parent_folder,folder),new_path)
continue
else:
files = [file for file in os.listdir(
os.path.join(in_parent_folder, folder)) if os.path.isfile(os.path.join(in_parent_folder, folder, file)) and file.endswith('.jpg')]
#print(f'>>> Total files {sum(files)}')
try:
for file in files:
shutil.copy2(os.path.join(
in_parent_folder, folder, file), out_parent_folder)
except Exception as error:
print(f'>>> An error occurred: {error}')
print(f'>>> Copying {folder} folder done in {time.time()-start:.2f}')
if __name__ == '__main__':
# get a list of the subdirs and process them using processes
parser = argparse.ArgumentParser(
description='a program to move jpg files to a new folder for annotation')
parser.add_argument('--in_dir', type=str, default=os.path.abspath('data/LogosInTheWild-v2/clean_data/voc_format'),
help='path to source folder to copy the JPG files')
parser.add_argument('--out_dir', type=str, default=os.path.abspath('data/litw_annotations'),
help='path to the destination folder for annotation')
args = parser.parse_args()
copy_jpg_files(args)
|
[
"mellitus4u@gmail.com"
] |
mellitus4u@gmail.com
|
e495ada4039944f53250977ba85cb229b3f625cc
|
7947a9f764722686a2802e393a7e27008690a118
|
/StyleAnalyser/initdb.py
|
dd92356e82e5051ac31440eae59191c00c67c3bc
|
[] |
no_license
|
NILGroup/TFG-1920-CarlosMoreno
|
e5766d11eee76a3f82e6af2872cc34acff7b16a4
|
de580fddae4387ac4db4cf9d1a7ad602c5cba584
|
refs/heads/master
| 2022-11-08T13:49:46.639450
| 2020-06-26T11:14:31
| 2020-06-26T11:14:31
| 196,052,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 17:50:25 2020
@author: Carlos Moreno Morera
"""
import mongoengine
def init_db():
"""
Inits the connection with the MongoDB.
Returns
-------
None.
"""
mongoengine.register_connection(alias='core', name='analysis')
mongoengine.connect('analysis', alias='default')
|
[
"carmor06@ucm.es"
] |
carmor06@ucm.es
|
e032f991668a174940ca3888d349666a1ad96a4e
|
af6af3615070b37d2b70634e4a2d1f4f4d4a5c29
|
/tcsdiscordbot.py
|
f504d79159d16edc334159f86a1d462ff6e6350b
|
[
"MIT"
] |
permissive
|
Jurredr/TCSDiscordBot
|
1eccdc94b2d5aed16aa97fa9832b7887aa8b99ba
|
24fcef8c673eb29eff28b070b588f22c5c391605
|
refs/heads/main
| 2023-04-24T12:26:05.126093
| 2021-03-16T13:46:39
| 2021-03-16T13:46:39
| 347,976,445
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,271
|
py
|
import discord
from discord.ext import commands
from docstring_parser import Style
from docstring_parser import parse as parse_docstring
from cogs.utils.smth import *
from globalfuncs import *
async def prefix(_, message):
return commands.when_mentioned_or('?')(_, message)
class HelpCommand(commands.DefaultHelpCommand):
def __init__(self, **options):
super().__init__(
**options,
show_hidden=False,
command_attrs={
'cooldown': commands.Cooldown(2, 5.0, commands.BucketType.user)
}
)
def construct_embed(self, title: str = None, description: str = None):
return discord.Embed(
title=title,
description=description,
colour=discord.Colour.teal()
)
def get_docs(self, command):
if docs := command.help:
if docs := parse_docstring(docs, style=Style.numpydoc):
return docs
def get_prefixed_command(self, command, bold=True):
return ('**%s%s**' if bold else '%s%s') % (self.clean_prefix, command.qualified_name)
def get_command_signature(self, command):
return ('**%s%s** %s' % (self.clean_prefix, command.qualified_name, '*' + command.signature + '*' if command.signature else '')).strip()
def add_indented_commands(self, commands, *, heading=None, max_size=None):
if not commands:
return
entries = list()
for command in commands:
entries.append(
f'• {self.get_command_signature(command)}' + (f' — {command.short_doc}' if command.short_doc else '')
)
return entries
async def send_bot_help(self, mapping):
embed = self.construct_embed(title="Help")
for cog, commands in mapping.items():
filtered = await self.filter_commands(commands, sort=True)
command_signatures = [self.get_command_signature(c) for c in filtered]
if command_signatures:
cog_name = getattr(cog, "qualified_name", "No category")
embed.add_field(name=cog_name, value='\n'.join(self.add_indented_commands(filtered)), inline=False)
await self.get_destination().send(embed=embed)
async def send_group_help(self, group):
filtered = await self.filter_commands(group.commands, sort=self.sort_commands)
self.add_indented_commands(filtered, heading=self.commands_heading)
embed = self.construct_embed(
title='Group: ' + self.get_prefixed_command(group),
description='\n'.join(self.add_indented_commands(filtered))
)
if help_text := group.help:
embed.add_field(name="Description", value=help_text)
if alias := group.aliases:
embed.add_field(name="Aliases", value=backtick('`, `'.join(alias)))
await self.get_destination().send(embed=embed)
async def send_command_help(self, command):
embed = self.construct_embed(title='Command ' + self.get_prefixed_command(command))
if docs := self.get_docs(command):
embed.add_field(
name='Description',
value=docs.long_description if docs.long_description else docs.short_description
)
if cooldown := command._buckets._cooldown:
embed.add_field(
name='Cooldown',
value=f"{cooldown.rate} command per {int(cooldown.per)} seconds"
)
embed.add_field(
name='Usage',
value=self.get_command_signature(command),
inline=False
)
if docs.params:
embed.add_field(
name='Arguments',
value='\n'.join([f"• **{param.arg_name}** — {param.description}" for param in docs.params]),
inline=False
)
if docs.meta != docs.params:
meta = docs.meta
if meta[-1].args[0] == 'examples':
examples = meta[-1]
embed.add_field(
name='Examples',
value='\n'.join([
'%s %s' % (self.get_prefixed_command(command, bold=False), example)
for example in examples.description.splitlines()
])
)
alias = command.aliases
if alias:
embed.add_field(name='Aliases', value=backtick("`, `".join(alias)), inline=False)
channel = self.get_destination()
await channel.send(embed=embed)
async def send_cog_help(self, cog):
return
bot = commands.Bot(
command_prefix=prefix,
case_insensitive=True,
activity=discord.Game(name='?help'),
help_command=HelpCommand(),
owner_ids=MY_ID
)
@bot.event
async def on_ready():
print(log_time() + 'Bot started')
print('Connected as: ' + str(bot.user))
print('Connected servers:', *bot.guilds, sep='\n\t', end='\n')
bot.load_extension('cogs.bgtasks')
bot.load_extension('cogs.commands')
bot.load_extension('cogs.quotes')
bot.load_extension('cogs.fun')
print(log_time() + 'Bot is starting')
bot.run(BOT_TOKEN)
|
[
"noreply@github.com"
] |
Jurredr.noreply@github.com
|
3a651703ee75f1c6ffe569daa5bc28b4a275eef6
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/120_design_patterns/009_decorator/examples/4-python-design-patterns-building-more-m4-exercise-files/Decorator/decorators/red.py
|
e0ed3028f3471e49fd42bc27d43d86d7acf6f24e
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 235
|
py
|
from .abs_decorator import AbsDecorator
class Red(AbsDecorator):
@property
def description(self):
return self.car.description + ', Ferarri red'
@property
def cost(self):
return self.car.cost + 1200.00
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
e1794bb2df20a06d8c588241f48122e0f78e04b5
|
f6dd4a402e329f112ee14bd25544ec122e6c4173
|
/setup.py
|
8d84ea9ca14bfabf8fa202184bac336174e82224
|
[] |
no_license
|
diegompin/code_complenet2020
|
68231cdfe18417c20dfac9cfde385aa3a0b84bf1
|
ab6070ad8a52c55371b9678735931ca5cb5eda52
|
refs/heads/master
| 2022-07-06T19:23:52.589159
| 2019-11-17T08:17:27
| 2019-11-17T08:17:27
| 222,216,280
| 2
| 0
| null | 2022-06-21T23:28:12
| 2019-11-17T08:05:33
|
Python
|
UTF-8
|
Python
| false
| false
| 716
|
py
|
from setuptools import setup
import os
def package_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
extra_files = package_files('mhs')
setup(
name='mhs',
version='1.0.0',
packages=['mhs'],
package_data={'': extra_files},
url='',
license='',
author='Diego Pinheiro',
author_email='',
description='Mapping the Health System'
)
#
# setup(
# name='mhs',
# version='1.0.0',
# packages=['mhs'],
# url='',
# license='',
# author='Diego Pinheiro',
# author_email='',
# description=''
# )
|
[
"2016bestworld"
] |
2016bestworld
|
81d5c0a0c3768e21c0cce5ca5137131e974b601f
|
8e02857c8d7c862652007cbbe147c6c363b094ef
|
/iroha_files.py
|
8ddefd3f5a11a845e4ca20e9962ba45d316c1633
|
[] |
permissive
|
nlsynth/iroha
|
fb1d503c05f50c09dc928167e2fb84f8ff03565d
|
7fc5da7408b7ffb449d28737ab01fe0feea885c1
|
refs/heads/master
| 2021-07-16T15:11:56.684866
| 2021-07-09T13:42:27
| 2021-07-09T13:42:27
| 47,931,688
| 37
| 5
|
BSD-3-Clause
| 2021-07-09T13:42:28
| 2015-12-13T18:46:24
|
C++
|
UTF-8
|
Python
| false
| false
| 1,429
|
py
|
# Script to create tar.gz package. This will replace automake based "make dist".
import os
PACKAGE="iroha"
VERSION="0.1.0"
ARCHIVE=PACKAGE + "-" + VERSION
EXTRA = ["NEWS",
"configure", "lib/gen_instantiation.py", "Makefile",
"iroha_files.py", "src/iroha.gyp", "config.mk"]
DOCS = ["docs/glossary.md", "docs/iroha.md", "docs/resource_class.md", "docs/structure.md"]
EXTRA += DOCS
def GetGypFileList(gyp):
gypdir = os.path.dirname(gyp) + "/"
d = eval(open(gyp).read())
targets = d['targets']
files = []
for t in targets:
for s in t['sources']:
files.append(gypdir + s)
return files
def GetExtraFileList(base):
b = os.path.dirname(base) + "/"
files = []
for e in EXTRA:
files.append(b + e)
return files
def CopyFiles(archive, files):
os.system("mkdir " + archive)
pdir = archive + "/"
dirs = {}
for fn in files:
d = pdir + os.path.dirname(fn)
if not d in dirs:
dirs[d] = True
os.system("mkdir -p " + d)
os.system("cp -p " + fn + " " + pdir + fn)
def MakeTarBall(archive, files):
os.system("rm -rf " + archive)
CopyFiles(archive, files)
os.system("tar cvzf " + archive + ".tar.gz " + archive)
os.system("rm -rf " + archive)
if __name__ == '__main__':
files = GetGypFileList("src/iroha.gyp") + GetExtraFileList("./")
MakeTarBall(ARCHIVE, files)
|
[
"tabata.yusuke@gmail.com"
] |
tabata.yusuke@gmail.com
|
935397969bf0ea3e29fd10298ceb1172ffcceb61
|
9b722ca41671eb2cea19bac5126d0920639261bd
|
/.history/app_20201124111415.py
|
2b976e9cf211bde93d3e1eb0e991fcec45a0bc6d
|
[] |
no_license
|
thawalk/db_flask_server
|
7928fd481f99d30bdccc60d97f02db78324cfdbe
|
cd55f1c9bf84c734457ee02d9f64a6833e295fad
|
refs/heads/master
| 2023-01-25T02:40:19.097457
| 2020-12-06T07:45:50
| 2020-12-06T07:45:50
| 314,229,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,373
|
py
|
import json
from flask import Flask, jsonify, url_for, request, redirect,Response,Request
# from flask_pymongo import PyMongo
import pymongo
from bson.json_util import dumps
import mysql.connector
from werkzeug.serving import run_simple
import os
from dotenv import load_dotenv
import datetime
import time
app = Flask(__name__)
test_collection='test_collection'
# sample='user_collection'
mongo = pymongo.MongoClient('mongodb://54.83.130.150:27017/?readPreference=primary&appname=MongoDB%20Compass&ssl=false')
db = pymongo.database.Database(mongo, 'test')
metadata_col = pymongo.collection.Collection(db, 'test_collection')
db = mysql.connector.connect(
host ='3.84.158.241',
user = 'root',
password = '',
database = 'reviews'
)
cur = db.cursor()
# cur.execute("SELECT asin from kindle_reviews group by asin order by avg(overall) desc limit 9 ")
# print(cur.fetchall())
# print("above fetch all")
@app.route('/',methods=["GET"])
def api_root():
data = {
'message': 'Welcome to our website. Where reviews are our number one priority'
}
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
return response
#returns list of categories
@app.route('/categories', methods = ['GET'])
def get_categories():
categories = []
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
return response
@app.route('/search', methods=['GET']) #now it only searches for TITLE. the mongo metadata does not have author
def search_book():
data = request.json
try:
title = data["title"]
result = metadata_col.find({"title":title})
result_array = dumps(list(result))
print(result_array)
js = json.dumps(result_array)
response = Response(js, status=200, mimetype='application/json')
return response
except:
errMsg = "Please include title."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
return response
# @app.route('/review', methods=['POST'])
# def add_review():
# if not request.json or not request.json['asin'] or type(request.json['asin']) != str or not request.json['overall'] or not request.json['reviewText'] or type(request.json['reviewText']) != str or not request.json['reviewTime'] or type(request.json['reviewTime']) != str or not request.json['reviewerID'] or type(request.json['reviewerID']) != str or not request.json['reviewerName'] or type(request.json['reviewerName']) != str or not request.json['summary'] or type(request.json['summary']) != str or not request.json['unixReviewTime'] or type(request.json['unixReviewTime']) != int :
# return 'invalid request msg', 404
# txt = "INSERT INTO 'kindle_reviews' ('id', 'asin', 'overall', 'reviewText', 'reviewTime', 'reviewerID', 'reviewerName', 'summary', 'unixReviewTime') VALUES (%s)"
# values = (None, request.json['asin'], request.json['overall'], request.json['reviewText'], request.json['reviewTime'], request.json['reviewerID'], request.json['reviewerName'], request.json['summary'], request.json['unixReviewTime'])
# cur.execute(txt, values)
# return 'successfully uploaded new review', 200
@app.route('/addBook',methods= ['POST'])
def add_book():
try:
data = request.json
title = data['title']
asin = data['asin']
description = data['description']
price = data['price']
categories = data['categories']
message = "Book added successfully"
metadata_col.insert({"title":title,"asin":asin,"description":description,"price":price,"categories":categories})
js = json.dumps(message)
response = Response(js, status=201, mimetype='application/json')
return response
except:
errMsg = "Please include title, asin, description, price and categories."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
return response
@app.route('/addReview',methods = ['POST']) #TODO: add review INTO sql part
def add_review():
@app.route('/sortByGenres', methods= ['GET']) #TODO: sort by genres from mongo metadata categories
def sort_by_genres():
pass
if __name__ == '__main__':
# app.run(host="0.0.0.0", port=80)
app.run(debug=True)
|
[
"akmal_hakim_teo@hotmail.com"
] |
akmal_hakim_teo@hotmail.com
|
0573cfcea8c49ecfb5e2c912d40802836435360d
|
6b74f4f5b2c28bea4ba003fc198dc3f109372869
|
/human_detector.py
|
4a55e416b909f859264158deea7c5f929bbc84ef
|
[
"MIT"
] |
permissive
|
kei1107/human_detection_with_aterm_usb_camera
|
30b42097802a81e6e8e2e6777f16900924e0c39c
|
95bb8e67c8be428b562b190d789c7d49985b3cd3
|
refs/heads/master
| 2020-12-07T09:01:28.586693
| 2020-07-17T02:17:40
| 2020-07-17T02:17:40
| 232,690,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,025
|
py
|
import base64
import os
import shutil
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import src
import sys
import time
from datetime import datetime
import pytz
from io import BytesIO
from PIL import Image
from keras.applications.imagenet_utils import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.preprocessing import image
import numpy as np
import tensorflow as tf
from ssd import SSD300
from ssd_utils import BBoxUtility
def save_image(src, file_save_path):
# Base64エンコードされた画像をデコードして保存する。
if "base64," in src:
with open(file_save_path, "wb") as f:
f.write(base64.b64decode(src.split(",")[1]))
# 画像のURLから画像を保存する。
else:
res = requests.get(src, stream=True)
with open(file_save_path, "wb") as f:
shutil.copyfileobj(res.raw, f)
# Main
# logger setting
logger = src.Setup_Logger.Setup_Logger()
np.set_printoptions(suppress=True)
# SSD setting
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.45
set_session(tf.Session(config=config))
voc_classes = ['Aeroplane', 'Bicycle', 'Bird', 'Boat', 'Bottle',
'Bus', 'Car', 'Cat', 'Chair', 'Cow', 'Diningtable',
'Dog', 'Horse', 'Motorbike', 'Person', 'Pottedplant',
'Sheep', 'Sofa', 'Train', 'Tvmonitor']
NUM_CLASSES = len(voc_classes) + 1
input_shape = (300, 300, 3)
model = SSD300(input_shape, num_classes=NUM_CLASSES)
model.load_weights('weights/weights_SSD300.hdf5', by_name=True)
bbox_util = BBoxUtility(NUM_CLASSES)
# Chronium setting
user, pw, ip = src.Setup_Config.Setup_Config(logger=logger)
main_url = 'http://' + user + ':' + pw + '@' + ip + ':15790'
options = Options()
options.binary_location = None
if os.name == 'posix':
options.binary_location = '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
elif os.name == 'nt':
options.binary_location = 'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe'
if options.binary_location is None:
logger.info('Support : Windows , OSX')
sys.exit()
try:
options.add_argument('--headless')
driver = webdriver.Chrome(options=options)
driver.get(main_url)
logger.info('Accessing')
start_button = driver.find_element_by_id('VMG_PRE_START_BTN')
start_button.click()
print("Start Detector")
while True:
try:
BUF_UVCCAM2 = driver.find_element_by_id('BUF_UVCCAM2')
img_url = BUF_UVCCAM2.get_attribute('src')
img_response = requests.get(img_url)
output_img = Image.open(BytesIO(img_response.content))
img = output_img.copy()
img = img.resize((300, 300))
img = image.img_to_array(img)
inputs = []
inputs.append(img.copy())
except Exception as e:
continue
inputs = preprocess_input(np.array(inputs))
preds = model.predict(inputs, batch_size=1)
results = bbox_util.detection_out(preds)
# Parse the outputs.
det_label = results[0][:, 0]
det_conf = results[0][:, 1]
cand_size = len(det_label)
isPerson = False
for i in range(cand_size):
if det_conf[i] < 0.9:
continue
else:
# Person is 15
if int(det_label[i]) == 15:
isPerson = True
break
if isPerson:
# time de hozon
now_time_str = datetime.now(pytz.timezone('Asia/Tokyo')).strftime('%Y%m%d_%H%M%S%f')[:-3]
# print("Person detect :",now_time_str)
try:
image.save_img("./output/" + now_time_str + '.jpg', output_img)
except Exception as e:
continue
time.sleep(0.5)
except Exception as e:
logger.info('Web driver Error occured!')
logger.exception(e)
sys.exit()
|
[
"clavis1107@gmail.com"
] |
clavis1107@gmail.com
|
793ddda057a87a3282347556bd109bb246a5a62b
|
e6eaff0a201f1a297cbd008ef4634768b9e7bcb2
|
/portfolio/views.py
|
de3914653ddf7591910819919c4bbcec40afddee
|
[] |
no_license
|
rishik-verma/django3-personal-portfolio
|
de019827a1aeb6e65e8f129bff052e432b6b625e
|
698b58a17a417d614128e82cc6d94ea763084f76
|
refs/heads/master
| 2022-08-19T13:58:52.315905
| 2020-05-27T17:27:06
| 2020-05-27T17:27:06
| 267,383,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
from django.shortcuts import render
from .models import Project
# Create your views here.
def home(request):
projects=Project.objects.all() #fetch all objects from database
return render(request,'portfolio/home.html',{'projects':projects})
|
[
"rishikvb81@gmail.com"
] |
rishikvb81@gmail.com
|
2bdb5fdf674de49ac64042e627006dc6a4d961db
|
076bfcf8377df4453c281483309d0f933345c737
|
/sadf.py
|
e9b55d0cc95eca1a71b6b925b383f4e0ba9fe18f
|
[] |
no_license
|
yaping03/CourseMall
|
a6776d7ac36d2e3f94c6b65c375f06953d164a83
|
ca730d53275c1ea073e4a3394f122ba8d43f67a6
|
refs/heads/master
| 2020-05-07T17:04:33.539287
| 2019-04-11T11:59:21
| 2019-04-11T11:59:21
| 180,712,705
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,748
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import datetime
v = [' File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/threading.py", line 882, in _bootstrap\n self._bootstrap_inner()\n', ' File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/threading.py", line 914, in _bootstrap_inner\n self.run()\n', ' File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/threading.py", line 862, in run\n self._target(*self._args, **self._kwargs)\n', ' File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/socketserver.py", line 628, in process_request_thread\n self.finish_request(request, client_address)\n', ' File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/socketserver.py", line 357, in finish_request\n self.RequestHandlerClass(request, client_address, self)\n', ' File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/socketserver.py", line 684, in __init__\n self.handle()\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/core/servers/basehttp.py", line 155, in handle\n handler.run(self.server.get_app())\n', ' File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/wsgiref/handlers.py", line 137, in run\n self.result = application(self.environ, self.start_response)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/contrib/staticfiles/handlers.py", line 63, in __call__\n return self.application(environ, start_response)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/core/handlers/wsgi.py", line 157, in __call__\n response = self.get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/core/handlers/base.py", line 124, in get_response\n response = self._middleware_chain(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/core/handlers/exception.py", line 41, in inner\n response = get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/utils/deprecation.py", line 140, in __call__\n response = self.get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/core/handlers/exception.py", line 41, in inner\n response = get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/utils/deprecation.py", line 140, in __call__\n response = self.get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/core/handlers/exception.py", line 41, in inner\n response = get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/utils/deprecation.py", line 140, in __call__\n response = self.get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/core/handlers/exception.py", line 41, in inner\n response = get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/utils/deprecation.py", line 140, in __call__\n response = self.get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/core/handlers/exception.py", line 41, in inner\n response = get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/utils/deprecation.py", line 140, in __call__\n response = self.get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/core/handlers/exception.py", line 41, in inner\n response = get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/utils/deprecation.py", line 140, in __call__\n response = self.get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/core/handlers/exception.py", line 41, in inner\n response = get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/utils/deprecation.py", line 140, in __call__\n response = self.get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/core/handlers/exception.py", line 41, in inner\n response = get_response(request)\n', ' File "/Users/wupeiqi/PycharmProjects/luffycity/api/middlewares/base.py", line 14, in __call__\n response = self.get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/core/handlers/exception.py", line 41, in inner\n response = get_response(request)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/core/handlers/base.py", line 185, in _get_response\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view\n return view_func(*args, **kwargs)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/django/views/generic/base.py", line 68, in view\n return self.dispatch(request, *args, **kwargs)\n', ' File "/Users/wupeiqi/py_virtual_env/luffyenv/lib/python3.5/site-packages/rest_framework/views.py", line 488, in dispatch\n response = handler(request, *args, **kwargs)\n', ' File "/Users/wupeiqi/PycharmProjects/luffycity/api/views/order.py", line 296, in post\n print(traceback.format_stack())\n']
for item in v:
print(item)
|
[
"820974538@qq.com"
] |
820974538@qq.com
|
9af51cdf56cdd30fe398278a39e82cb6c3e5b41d
|
49f0dab00cb3c47d1d7836391d2a6d95ea6f346b
|
/src/agentdlg.py
|
204b00581f87ad577009544ca050ee5aed97a53a
|
[] |
no_license
|
alonso9v9/AI_Maze_Solver
|
46b55451d3a152f61665c0099f523bb2ccd3795a
|
aa96ecc9ef5ffcafd33dfe2d7dd358bde2186a3f
|
refs/heads/master
| 2023-02-04T11:17:35.922832
| 2020-12-25T19:33:15
| 2020-12-25T19:33:15
| 324,421,190
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,865
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'agent.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
import math
from PyQt5 import QtCore, QtWidgets
import agent
class AgentDialog(QtCore.QObject):
""" Agent Configuration Dialog """
applySignal = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(QtCore.QObject, self).__init__(parent)
self.maze = None
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(380, 185)
self.gridLayout = QtWidgets.QGridLayout(Dialog)
self.gridLayout.setObjectName("gridLayout")
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Apply |
QtWidgets.QDialogButtonBox.Cancel |
QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 3, 0, 1, 1)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.line_2 = QtWidgets.QFrame(Dialog)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout.addWidget(self.line_2)
self.sensorsCheck = QtWidgets.QCheckBox(Dialog)
self.sensorsCheck.setObjectName("sensorsCheck")
self.verticalLayout.addWidget(self.sensorsCheck)
self.sensorsLayout = QtWidgets.QHBoxLayout()
self.sensorsLayout.setObjectName("sensorsLayout")
self.apertureLabel = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.apertureLabel.sizePolicy()
.hasHeightForWidth())
self.apertureLabel.setSizePolicy(sizePolicy)
self.apertureLabel.setMaximumSize(QtCore.QSize(91, 16777215))
self.apertureLabel.setObjectName("apertureLabel")
self.sensorsLayout.addWidget(self.apertureLabel)
self.apertureSpin = QtWidgets.QSpinBox(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.apertureSpin.sizePolicy()
.hasHeightForWidth())
self.apertureSpin.setSizePolicy(sizePolicy)
self.apertureSpin.setMaximum(360)
self.apertureSpin.setSingleStep(15)
self.apertureSpin.setProperty("value", 180)
self.apertureSpin.setObjectName("apertureSpin")
self.sensorsLayout.addWidget(self.apertureSpin)
self.nsensorsLabel = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.nsensorsLabel.sizePolicy().
hasHeightForWidth())
self.nsensorsLabel.setSizePolicy(sizePolicy)
self.nsensorsLabel.setObjectName("nsensorsLabel")
self.sensorsLayout.addWidget(self.nsensorsLabel)
self.numSensorsSpin = QtWidgets.QSpinBox(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.numSensorsSpin.sizePolicy()
.hasHeightForWidth())
self.numSensorsSpin.setSizePolicy(sizePolicy)
self.numSensorsSpin.setMaximum(24)
self.numSensorsSpin.setProperty("value", 3)
self.numSensorsSpin.setObjectName("numSensorsSpin")
self.sensorsLayout.addWidget(self.numSensorsSpin)
self.verticalLayout.addLayout(self.sensorsLayout)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_2 = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy()
.hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
self.maxDistSpin = QtWidgets.QDoubleSpinBox(Dialog)
self.maxDistSpin.setDecimals(0)
self.maxDistSpin.setMaximum(5000.0)
self.maxDistSpin.setProperty("value", 20.0)
self.maxDistSpin.setObjectName("maxDistSpin")
self.horizontalLayout.addWidget(self.maxDistSpin)
self.verticalLayout.addLayout(self.horizontalLayout)
self.line = QtWidgets.QFrame(Dialog)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.noiseLayout = QtWidgets.QHBoxLayout()
self.noiseLayout.setObjectName("noiseLayout")
self.tnoiseLabel = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tnoiseLabel.sizePolicy()
.hasHeightForWidth())
self.tnoiseLabel.setSizePolicy(sizePolicy)
self.tnoiseLabel.setObjectName("tnoiseLabel")
self.noiseLayout.addWidget(self.tnoiseLabel)
self.tnoiseSpin = QtWidgets.QDoubleSpinBox(Dialog)
self.tnoiseSpin.setDecimals(1)
self.tnoiseSpin.setMaximum(100.0)
self.tnoiseSpin.setSingleStep(0.1)
self.tnoiseSpin.setStepType(QtWidgets.QAbstractSpinBox
.AdaptiveDecimalStepType)
self.tnoiseSpin.setProperty("value", 1.0)
self.tnoiseSpin.setObjectName("tnoiseSpin")
self.noiseLayout.addWidget(self.tnoiseSpin)
self.rnoiseLabel = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.rnoiseLabel.sizePolicy()
.hasHeightForWidth())
self.rnoiseLabel.setSizePolicy(sizePolicy)
self.rnoiseLabel.setObjectName("rnoiseLabel")
self.noiseLayout.addWidget(self.rnoiseLabel)
self.rnoiseSpin = QtWidgets.QDoubleSpinBox(Dialog)
self.rnoiseSpin.setMaximum(30.0)
self.rnoiseSpin.setSingleStep(0.1)
self.rnoiseSpin.setStepType(QtWidgets.QAbstractSpinBox
.AdaptiveDecimalStepType)
self.rnoiseSpin.setProperty("value", 1.0)
self.rnoiseSpin.setObjectName("rnoiseSpin")
self.noiseLayout.addWidget(self.rnoiseSpin)
self.verticalLayout.addLayout(self.noiseLayout)
self.geometryLayout = QtWidgets.QHBoxLayout()
self.geometryLayout.setObjectName("geometryLayout")
self.label = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy()
.hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.geometryLayout.addWidget(self.label)
self.radiusSpin = QtWidgets.QDoubleSpinBox(Dialog)
self.radiusSpin.setDecimals(1)
self.radiusSpin.setMinimum(1.0)
self.radiusSpin.setMaximum(45.0)
self.radiusSpin.setSingleStep(0.5)
self.radiusSpin.setProperty("value", 2.5)
self.radiusSpin.setObjectName("radiusSpin")
self.geometryLayout.addWidget(self.radiusSpin)
self.verticalLayout.addLayout(self.geometryLayout)
self.line_3 = QtWidgets.QFrame(Dialog)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.verticalLayout.addWidget(self.line_3)
self.gridLayout.addLayout(self.verticalLayout, 2, 0, 1, 1)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
self.buttonBox.button(QtWidgets.QDialogButtonBox.Apply).clicked.\
connect(self.applyChanges)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Agent Properties"))
self.sensorsCheck.setText(_translate("Dialog",
"Activate agent\'s sensor array"))
self.apertureLabel.setText(_translate("Dialog", "Aperture"))
self.apertureSpin.setSuffix(_translate("Dialog", "°"))
self.nsensorsLabel.setText(_translate("Dialog", "Num Sensors"))
self.label_2.setText(_translate("Dialog", "Max distance"))
self.tnoiseLabel.setText(_translate("Dialog", "Translation noise"))
self.tnoiseSpin.setSuffix(_translate("Dialog", "%"))
self.rnoiseLabel.setText(_translate("Dialog", "Rotation noise"))
self.rnoiseSpin.setSuffix(_translate("Dialog", "°"))
self.label.setText(_translate("Dialog", "Agent\'s radius"))
def exportValues(self):
""" Extract the values from the GUI elements and store them into the
agent
"""
self.agent.radius = self.radiusSpin.value()
self.agent.sensors = self.sensorsCheck.isChecked()
a = self.apertureSpin.value()
if a > 0:
s = max(1,self.numSensorsSpin.value())
if s > 1:
m = a/(s-1)
b = -a/2
sens = [float(i)*m+b for i in range(s)]
else:
sens = [0]
self.agent.sensorArray = sens
else:
self.agent.sensorArray = [0]
self.agent.maxDistance = self.maxDistSpin.value()
self.agent.translationNoiseFactor = self.tnoiseSpin.value()/100
self.agent.rotationNoise = self.rnoiseSpin.value()
def accept(self):
print("[DBG] Accepting new values")
self.exportValues()
self.parentAccept()
def applyChanges(self):
print("[DBG] Applying new values")
self.exportValues()
self.applySignal.emit()
def setValues(self, theAgent):
""" Copy the data from the agent into the dialog settings"""
print("[DBG] Setting current values into the dialog")
self.agent = theAgent
self.radiusSpin.setValue(self.agent.radius)
if self.agent.sensorArray:
numSensors = len(self.agent.sensorArray)
if numSensors > 0:
self.sensorsCheck.setChecked(self.agent.sensors)
aperture = self.agent.sensorArray[-1]-self.agent.sensorArray[0]
self.apertureSpin.setValue(aperture)
self.numSensorsSpin.setValue(numSensors)
else:
self.sensorsCheck.setChecked(False)
else:
self.sensorsCheck.setChecked(False)
self.apertureSpin.setValue(180) # Default values
self.numSensorsSpin(3) # Default values
self.maxDistSpin.setValue(self.agent.maxDistance)
self.rnoiseSpin.setValue(self.agent.rotationNoise)
self.tnoiseSpin.setValue(self.agent.translationNoiseFactor*100)
|
[
"alonso9v9@gmail.com"
] |
alonso9v9@gmail.com
|
67a52b02acbbe45b3421fa32b3b7742da78b68db
|
a6d934791aa9069718a7a3090dc332b21a44870e
|
/validater/exceptions.py
|
6ec6d0476bb1d30414d13a2384c133e7c5b4c034
|
[
"MIT"
] |
permissive
|
ppproxy/validater
|
ce993b35f7ee26f4e29f038133e3036f599c2536
|
05923875c99ec85ce95e7ac3faa53d64f7b94436
|
refs/heads/master
| 2020-12-26T11:20:46.358456
| 2016-09-17T21:38:28
| 2016-09-17T21:38:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,824
|
py
|
class ValidaterError(ValueError):
"""Mark invalid position"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# marks' item: (is_key, index_or_key)
self.marks = []
def mark_index(self, index):
self.marks.insert(0, (False, index))
return self
def mark_key(self, key):
self.marks.insert(0, (True, key))
return self
@property
def position(self):
"""A string which represent the position of invalid.
For example:
{
"tags": ["ok", "invalid"], # tags[1]
"user": {
"name": "invalid", # user.name
"age": 500 # user.age
}
}
"""
text = ""
for is_key, index_or_key in self.marks:
if is_key:
text = "%s.%s" % (text, index_or_key)
else:
if index_or_key is None:
text = "%s[]" % text
else:
text = "%s[%d]" % (text, index_or_key)
if text and text[0] == '.':
text = text[1:]
return text
@property
def message(self):
"""Error message"""
if self.args:
return self.args[0]
else:
return None
def __str__(self):
position = self.position
if self.args:
if position:
return "%s in %s" % (self.args[0], position)
else:
return self.args[0]
else:
if position:
return "in %s" % position
else:
return super().__str__()
class Invalid(ValidaterError):
"""Data invalid"""
class SchemaError(ValidaterError):
"""Schema error"""
|
[
"guyskk@qq.com"
] |
guyskk@qq.com
|
92d8e815e14f4c9b30c1e936f4f134092378124f
|
fc1ed3d37b61c7fde55e667aca558062f052d208
|
/models/uncertainty_module.py
|
bc7cf2e8d7206b8ad7d178931e0b635bf8b9ccc9
|
[
"MIT"
] |
permissive
|
renjiechao88/ProbFace
|
2305e1fd68c3b566e1e0dd2c20b4c66cd4276e71
|
cd7ef37e6fe6f62f98af7d0cebfb05674dda6739
|
refs/heads/main
| 2023-06-04T08:38:08.695445
| 2021-06-21T07:04:59
| 2021-06-21T07:04:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,808
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
import tensorflow.contrib.slim as slim
batch_norm_params = {
'decay': 0.995,
'epsilon': 0.001,
'center': True,
'scale': True,
'updates_collections': None,
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
batch_norm_params_sigma = {
'decay': 0.995,
'epsilon': 0.001,
'center': False,
'scale': False,
'updates_collections': None,
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],}
def scale_and_shift(x, gamma_init=1.0, beta_init=0.0):
num_channels = x.shape[-1].value
with tf.variable_scope('scale_and_shift'):
gamma = tf.get_variable('alpha', (),
initializer=tf.constant_initializer(gamma_init),
regularizer=slim.l2_regularizer(0.0),
dtype=tf.float32)
beta = tf.get_variable('gamma', (),
initializer=tf.constant_initializer(beta_init),
dtype=tf.float32)
x = gamma * x + beta
return x
def inference(inputs, embedding_size, phase_train,
weight_decay=5e-4, reuse=None, scope='UncertaintyModule'):
with slim.arg_scope([slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu):
with tf.variable_scope(scope, [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=phase_train):
print('UncertaintyModule input shape:', [dim.value for dim in inputs.shape])
net = slim.flatten(inputs)
fc1_size = 256
net = slim.fully_connected(net, fc1_size, scope='fc1',
normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params,
activation_fn=tf.nn.relu)
log_sigma_sq = slim.fully_connected(net, embedding_size, scope='fc_log_sigma_sq',
normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params_sigma,
activation_fn=None)
# Share the gamma and beta for all dimensions
if embedding_size == 1:
log_sigma_sq = scale_and_shift(log_sigma_sq, 1e-1, -1.0)
else:
log_sigma_sq = scale_and_shift(log_sigma_sq, 1e-1, -7.0)
# Add epsilon for sigma_sq for numerical stableness
log_sigma_sq = tf.log(1e-6 + tf.exp(log_sigma_sq))
# log_sigma_sq = tf.log(0.001 + tf.exp(log_sigma_sq))
return log_sigma_sq
|
[
"kaenchan@163.com"
] |
kaenchan@163.com
|
8108d0914dda3fab0d0cb371dddf18eef4c7b557
|
c5d838598ee22b8c6125bd46f3f6078f4e28376e
|
/mixpanel_cli/auth.py
|
1148c1f1fd7a4bdcf2a63688636a77775a286d91
|
[
"MIT"
] |
permissive
|
atomic-labs/mixpanel-cli
|
a989578bd2dc259a17214bd442237091ca43ea4f
|
d271e512a35124d663f60e6742b500c99fcac1fb
|
refs/heads/master
| 2021-01-10T19:12:49.765428
| 2014-01-22T01:11:02
| 2014-01-22T01:11:02
| 15,955,794
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
import time
import hashlib
def authenticate_parameters(api_key, api_secret, parameters, expire=60):
params = {}
params.update(parameters)
params["expire"] = int(time.time()) + expire
params["api_key"] = api_key
sig_base_string = ""
for k in sorted(params.keys()):
sig_base_string += "%s=%s" % (k, params[k])
sig_base_string += api_secret
md5 = hashlib.md5()
md5.update(sig_base_string.encode("utf-8"))
params["sig"] = md5.hexdigest()
return params
|
[
"ben.pedrick@gmail.com"
] |
ben.pedrick@gmail.com
|
c469fe2dcb449ee3400c521f8eb1549e7ba6e21d
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/stupiding_kaggle_EEG/kaggle_EEG-master/models/len2048p4_resize3_bs_c1r4p5_f9n256r35p1_v67.py
|
1d5cadde7ff34bcec4455bb9ed7040d4500b3aad
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 15,902
|
py
|
import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
from lasagne.layers import dnn
from batch_norm import BatchNormLayer
import metrics
import time
rs = T.shared_randomstreams.RandomStreams()
rs.seed(int(time.time()))
data_path = 'eeg_train.npy'
train_series = [0, 1, 2, 3, 4, 5]
valid_series = [6, 7]
test_series = [0, 1, 2, 3, 4, 5]
events = [0, 1, 2, 3, 4, 5]
num_events = len(events)
train_data_params = {'section': 'train',
'chunk_gen_fun': 'random_chunk_gen_fun',
'channels': 32,
'length': 2048,
'preprocess': 'per_sample_mean',
'chunk_size': 4096,
'num_chunks': 400,
'pos_ratio': 0.35,
'bootstrap': True,
'neg_pool_size': 81920,
'hard_ratio': 1,
'easy_mode': 'all',
'resize': [0.7, 1.3],
}
valid_data_params = {'section': 'valid',
'chunk_gen_fun': 'fixed_chunk_gen_fun',
'channels': 32,
'length': 2048,
'preprocess': 'per_sample_mean',
'chunk_size': 4096,
'pos_interval': 100,
'neg_interval': 100,
}
bs_data_params = {'section': 'bootstrap',
'chunk_gen_fun': 'fixed_chunk_gen_fun',
'channels': 32,
'length': 2048,
'preprocess': 'per_sample_mean',
'chunk_size': 4096,
'pos_interval': 100,
'neg_interval': 100,
}
test_valid_params = {'section': 'valid',
'chunk_gen_fun': 'test_valid_chunk_gen_fun',
'channels': 32,
'length': 2048,
'preprocess': 'per_sample_mean',
'chunk_size': 4096,
'test_lens': [2048],
'interval': 10,
}
test_data_params = {'section': 'test',
'chunk_gen_fun': 'sequence_chunk_gen_fun',
'channels': 32,
'length': 2048,
'preprocess': 'per_sample_mean',
'chunk_size': 4096,
'test_lens': [2048],
'test_valid': True,
}
batch_size = 64
momentum = 0.9
wc = 0.001
display_freq = 10
valid_freq = 20
bs_freq = 20
save_freq = 20
def lr_schedule(chunk_idx):
base = 0.1
if chunk_idx < 200:
return base
elif chunk_idx < 320:
return 0.1 * base
elif chunk_idx < 390:
return 0.01 * base
else:
return 0.001 * base
std = 0.02
p1 = 0
p2 = 0.1
p3 = 0.1
p4 = 0.1
metrics = [metrics.meanAccuracy, metrics.meanAUC]
metric_names = ['mean accuracy', 'areas under the ROC curve']
Conv2DLayer = dnn.Conv2DDNNLayer
Pool2DLayer = dnn.Pool2DDNNLayer
SumLayer = nn.layers.ElemwiseSumLayer
input_dims = (batch_size,
train_data_params['channels'],
1,
train_data_params['length'])
def build_model():
l_in = nn.layers.InputLayer(input_dims)
conv1 = Conv2DLayer(incoming = l_in, num_filters = 256, filter_size = (1, 9),
stride = 1, border_mode = 'same',
W = nn.init.Normal(std = std),
nonlinearity = None)
print 'conv1', nn.layers.get_output_shape(conv1)
bn1 = BatchNormLayer(incoming = conv1, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.very_leaky_rectify)
print 'bn1', nn.layers.get_output_shape(bn1)
pool1 = Pool2DLayer(incoming = bn1, pool_size = (1, 4), stride = (1, 4))
print 'pool1', nn.layers.get_output_shape(pool1)
drop1 = nn.layers.DropoutLayer(incoming = pool1, p = p1)
print 'drop1', nn.layers.get_output_shape(drop1)
conv2 = Conv2DLayer(incoming = drop1, num_filters = 256, filter_size = (1, 1),
stride = 1, border_mode = 'same',
W = nn.init.Normal(std = std),
nonlinearity = None)
print 'conv2', nn.layers.get_output_shape(conv2)
bn2 = BatchNormLayer(incoming = conv2, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.very_leaky_rectify)
print 'bn2', nn.layers.get_output_shape(bn2)
conv2a = Conv2DLayer(incoming = bn2, num_filters = 256, filter_size = (1, 9),
stride = 1, border_mode = 'same',
W = nn.init.Normal(std = std), b = None,
nonlinearity = None)
print 'conv2a', nn.layers.get_output_shape(conv2a)
sum2a = SumLayer(incomings = [conv2, conv2a], coeffs = 1)
print 'sum2a', nn.layers.get_output_shape(sum2a)
bn2a = BatchNormLayer(incoming = sum2a, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.rectify)
print 'bn2a', nn.layers.get_output_shape(bn2a)
conv2b = Conv2DLayer(incoming = bn2a, num_filters = 256, filter_size = (1, 9),
stride = 1, border_mode = 'same',
W = conv2a.W, b = None,
nonlinearity = None)
print 'conv2b', nn.layers.get_output_shape(conv2b)
sum2b = SumLayer(incomings = [conv2, conv2b], coeffs = 1)
print 'sum2b', nn.layers.get_output_shape(sum2b)
bn2b = BatchNormLayer(incoming = sum2b, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.rectify)
print 'bn2b', nn.layers.get_output_shape(bn2b)
conv2c = Conv2DLayer(incoming = bn2b, num_filters = 256, filter_size = (1, 9),
stride = 1, border_mode = 'same',
W = conv2a.W, b = None,
nonlinearity = None)
print 'conv2c', nn.layers.get_output_shape(conv2c)
sum2c = SumLayer(incomings = [conv2, conv2c], coeffs = 1)
print 'sum2c', nn.layers.get_output_shape(sum2c)
bn2c = BatchNormLayer(incoming = sum2c, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.rectify)
print 'bn2c', nn.layers.get_output_shape(bn2c)
pool2 = Pool2DLayer(incoming = bn2c, pool_size = (1, 4), stride = (1, 4))
print 'pool2', nn.layers.get_output_shape(pool2)
drop2 = nn.layers.DropoutLayer(incoming = pool2, p = p2)
print 'drop2', nn.layers.get_output_shape(drop2)
conv3 = Conv2DLayer(incoming = drop2, num_filters = 256, filter_size = (1, 1),
stride = 1, border_mode = 'same',
W = nn.init.Normal(std = std),
nonlinearity = None)
print 'conv3', nn.layers.get_output_shape(conv3)
bn3 = BatchNormLayer(incoming = conv3, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.very_leaky_rectify)
print 'bn3', nn.layers.get_output_shape(bn3)
conv3a = Conv2DLayer(incoming = bn3, num_filters = 256, filter_size = (1, 9),
stride = 1, border_mode = 'same',
W = nn.init.Normal(std = std), b = None,
nonlinearity = None)
print 'conv3a', nn.layers.get_output_shape(conv3a)
sum3a = SumLayer(incomings = [conv3, conv3a], coeffs = 1)
print 'sum3a', nn.layers.get_output_shape(sum3a)
bn3a = BatchNormLayer(incoming = sum3a, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.rectify)
print 'bn3a', nn.layers.get_output_shape(bn3a)
conv3b = Conv2DLayer(incoming = bn3a, num_filters = 256, filter_size = (1, 9),
stride = 1, border_mode = 'same',
W = conv3a.W, b = None,
nonlinearity = None)
print 'conv3b', nn.layers.get_output_shape(conv3b)
sum3b = SumLayer(incomings = [conv3, conv3b], coeffs = 1)
print 'sum3b', nn.layers.get_output_shape(sum3b)
bn3b = BatchNormLayer(incoming = sum3b, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.rectify)
print 'bn3b', nn.layers.get_output_shape(bn3b)
conv3c = Conv2DLayer(incoming = bn3b, num_filters = 256, filter_size = (1, 9),
stride = 1, border_mode = 'same',
W = conv3a.W, b = None,
nonlinearity = None)
print 'conv3c', nn.layers.get_output_shape(conv3c)
sum3c = SumLayer(incomings = [conv3, conv3c], coeffs = 1)
print 'sum3c', nn.layers.get_output_shape(sum3c)
bn3c = BatchNormLayer(incoming = sum3c, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.rectify)
print 'bn3c', nn.layers.get_output_shape(bn3c)
pool3 = Pool2DLayer(incoming = bn3c, pool_size = (1, 4), stride = (1, 4))
print 'pool3', nn.layers.get_output_shape(pool3)
drop3 = nn.layers.DropoutLayer(incoming = pool3, p = p3)
print 'drop3', nn.layers.get_output_shape(drop3)
conv4 = Conv2DLayer(incoming = drop3, num_filters = 256, filter_size = (1, 1),
stride = 1, border_mode = 'same',
W = nn.init.Normal(std = std),
nonlinearity = None)
print 'conv4', nn.layers.get_output_shape(conv4)
bn4 = BatchNormLayer(incoming = conv4, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.very_leaky_rectify)
print 'bn4', nn.layers.get_output_shape(bn4)
conv4a = Conv2DLayer(incoming = bn4, num_filters = 256, filter_size = (1, 9),
stride = 1, border_mode = 'same',
W = nn.init.Normal(std = std), b = None,
nonlinearity = None)
print 'conv4a', nn.layers.get_output_shape(conv4a)
sum4a = SumLayer(incomings = [conv4, conv4a], coeffs = 1)
print 'sum4a', nn.layers.get_output_shape(sum4a)
bn4a = BatchNormLayer(incoming = sum4a, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.rectify)
print 'bn4a', nn.layers.get_output_shape(bn4a)
conv4b = Conv2DLayer(incoming = bn4a, num_filters = 256, filter_size = (1, 9),
stride = 1, border_mode = 'same',
W = conv4a.W, b = None,
nonlinearity = None)
print 'conv4b', nn.layers.get_output_shape(conv4b)
sum4b = SumLayer(incomings = [conv4, conv4b], coeffs = 1)
print 'sum4b', nn.layers.get_output_shape(sum4b)
bn4b = BatchNormLayer(incoming = sum4b, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.rectify)
print 'bn4b', nn.layers.get_output_shape(bn4b)
conv4c = Conv2DLayer(incoming = bn4b, num_filters = 256, filter_size = (1, 9),
stride = 1, border_mode = 'same',
W = conv4a.W, b = None,
nonlinearity = None)
print 'conv4c', nn.layers.get_output_shape(conv4c)
sum4c = SumLayer(incomings = [conv4, conv4c], coeffs = 1)
print 'sum4c', nn.layers.get_output_shape(sum4c)
bn4c = BatchNormLayer(incoming = sum4c, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.rectify)
print 'bn4c', nn.layers.get_output_shape(bn4c)
pool4 = Pool2DLayer(incoming = bn4c, pool_size = (1, 2), stride = (1, 2))
print 'pool4', nn.layers.get_output_shape(pool4)
drop4 = nn.layers.DropoutLayer(incoming = pool4, p = p4)
print 'drop4', nn.layers.get_output_shape(drop4)
conv5 = Conv2DLayer(incoming = drop4, num_filters = 256, filter_size = (1, 1),
stride = 1, border_mode = 'same',
W = nn.init.Normal(std = std),
nonlinearity = None)
print 'conv5', nn.layers.get_output_shape(conv5)
bn5 = BatchNormLayer(incoming = conv5, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.very_leaky_rectify)
print 'bn5', nn.layers.get_output_shape(bn5)
conv5a = Conv2DLayer(incoming = bn5, num_filters = 256, filter_size = (1, 9),
stride = 1, border_mode = 'same',
W = nn.init.Normal(std = std), b = None,
nonlinearity = None)
print 'conv5a', nn.layers.get_output_shape(conv5a)
sum5a = SumLayer(incomings = [conv5, conv5a], coeffs = 1)
print 'sum5a', nn.layers.get_output_shape(sum5a)
bn5a = BatchNormLayer(incoming = sum5a, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.rectify)
print 'bn5a', nn.layers.get_output_shape(bn5a)
conv5b = Conv2DLayer(incoming = bn5a, num_filters = 256, filter_size = (1, 9),
stride = 1, border_mode = 'same',
W = conv5a.W, b = None,
nonlinearity = None)
print 'conv5b', nn.layers.get_output_shape(conv5b)
sum5b = SumLayer(incomings = [conv5, conv5b], coeffs = 1)
print 'sum5b', nn.layers.get_output_shape(sum5b)
bn5b = BatchNormLayer(incoming = sum5b, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.rectify)
print 'bn5b', nn.layers.get_output_shape(bn5b)
conv5c = Conv2DLayer(incoming = bn5b, num_filters = 256, filter_size = (1, 9),
stride = 1, border_mode = 'same',
W = conv5a.W, b = None,
nonlinearity = None)
print 'conv5c', nn.layers.get_output_shape(conv5c)
sum5c = SumLayer(incomings = [conv5, conv5c], coeffs = 1)
print 'sum5c', nn.layers.get_output_shape(sum5c)
bn5c = BatchNormLayer(incoming = sum5c, epsilon = 0.0000000001,
nonlinearity = nn.nonlinearities.rectify)
print 'bn5c', nn.layers.get_output_shape(bn5c)
pool5 = Pool2DLayer(incoming = bn5c, pool_size = (1, 4), stride = (1, 4))
print 'pool5', nn.layers.get_output_shape(pool5)
l_out = nn.layers.DenseLayer(incoming = pool5, num_units = num_events,
W = nn.init.Normal(std = std),
nonlinearity = nn.nonlinearities.sigmoid)
print 'l_out', nn.layers.get_output_shape(l_out)
return l_out
def build_train_valid(l_out):
params = nn.layers.get_all_params(l_out, regularizable = True)
wc_term = 0.5 * sum(T.sum(param ** 2) for param in params)
x_batch = T.tensor4('x', theano.config.floatX)
y_batch = T.matrix('y', 'int32')
train_output = nn.layers.get_output(l_out, x_batch)
train_loss = nn.objectives.binary_crossentropy(train_output, y_batch)
train_loss = nn.objectives.aggregate(train_loss, mode = 'mean')
train_loss += wc * wc_term
params = nn.layers.get_all_params(l_out, trainable = True)
valid_output = nn.layers.get_output(l_out, x_batch, deterministic = True)
lr = theano.shared(np.float32(lr_schedule(0)))
updates = nn.updates.nesterov_momentum(train_loss, params, lr, momentum)
x_shared = nn.utils.shared_empty(dim = len(input_dims))
y_shared = nn.utils.shared_empty(dim = 2, dtype = 'int32')
idx = T.scalar('idx', 'int32')
givens = {x_batch: x_shared[idx * batch_size:(idx + 1) * batch_size],
y_batch: y_shared[idx * batch_size:(idx + 1) * batch_size]}
iter_train = theano.function([idx], [train_loss, train_output],
givens = givens,
updates = updates)
givens = {x_batch: x_shared[idx * batch_size:(idx + 1) * batch_size]}
iter_valid = theano.function([idx], valid_output, givens = givens)
return x_shared, y_shared, idx, lr, iter_train, iter_valid
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
da97e1667044cb0a40ecdee0bd59859fdebfe826
|
0a7d49300a547eecc823b78a891057f1017db1b2
|
/rabbitmq/topic_send.py
|
888d59edb662dad9fd96ea9cd0090a8f9c8e439b
|
[] |
no_license
|
PeterZhangxing/codewars
|
f315b2ce610207e84a2f0927bc47b4b1dd89bee4
|
8e4dfaaeae782a37f6baca4c024b1c2a1dc83cba
|
refs/heads/master
| 2020-09-22T12:09:59.419919
| 2020-03-02T12:52:55
| 2020-03-02T12:52:55
| 224,330,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
import pika,sys
credentials = pika.PlainCredentials('zx2005', 'redhat')
# 使用上面定义的用户名密码,连接远程的队列服务器
connection = pika.BlockingConnection(pika.ConnectionParameters(
"10.1.11.128",
credentials=credentials
))
# 在tcp连接基础上,建立rabbit协议连接
channel = connection.channel()
# 申明通过字符串匹配,来确定发送数据到哪个队列的交换器
channel.exchange_declare(exchange='topic_logs',type='topic')
routing_key = sys.argv[1] if len(sys.argv) > 1 else 'anonymous.info'
message = ' '.join(sys.argv[2:]) or 'Hello World!'
channel.basic_publish(
exchange='topic_logs',
routing_key=routing_key,
body=message)
print(" [x] Sent %r:%r" % (routing_key, message))
connection.close()
|
[
"964725349@qq.com"
] |
964725349@qq.com
|
e65847855532814dff90be0f26da0a9d74bd64ad
|
449d5f4bdc83fc3a30c730e443c66b99ee0beaa8
|
/explore.py
|
cd9f6908f274e1d017fc4b746a76b97ed486c950
|
[] |
no_license
|
LinhQuach13/regression-exercises
|
32309b136afeba78ed334fa9911e7237d84e17ea
|
fb9a4cb007c6b6d5ad045ce6d3ada1560ecd5f02
|
refs/heads/master
| 2023-05-30T13:36:12.488143
| 2021-06-10T21:43:39
| 2021-06-10T21:43:39
| 373,301,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,913
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from env import host, user, password
# visualize
import seaborn as sns
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(11, 9))
plt.rc('font', size=13)
# turn off pink warning boxes
import warnings
warnings.filterwarnings("ignore")
import os
os.path.isfile('telco_df.csv')
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler
import sklearn.preprocessing
from sklearn.preprocessing import QuantileTransformer
################################ Telco Data ###########################################################################################
def plot_variable_pairs(ds):
'''
This function takes in the telco train dataset and returns 2 lmplots (scatterplots with a regression line).
The first plot shows the relationship between tenure and total_charges.
The second plot shows the relationship between monthly_charges and total_charges.
'''
#lmplot of tenure with total_charges with tenure
sns.lmplot(x="tenure", y="total_charges", data=ds, line_kws={'color': 'purple'})
plt.show()
#lmplot of tenure with total_charges with monthly_charges
sns.lmplot(x="monthly_charges", y="total_charges", data=ds, line_kws={'color': 'purple'})
plt.show();
def plot_variable_pairs2(ds):
'''
This function takes in the telco train dataset and returns 2 lmplots (scatterplots with a regression line).
The first plot shows the pairwise relationship between tenure, monthly_charges, and total_charges.
- arguments:
- ds: dataset or dataframe
'''
sns.pairplot(train[['tenure', 'monthly_charges', 'total_charges']], corner=True, kind= 'reg', plot_kws={'line_kws':{'color':'purple'}})
plt.show();
def plot_quant(ds, cont_vars):
'''
This function takes in the train dataset, and continuous variable column list
and ouputs the list as a plot.
arguments:
- ds= dataset you want to input (typically the train dataset)
- cont_vars= continuous variable list of columns
'''
#list of continuous variables
cont_vars = ['monthly_charges', 'total_charges', 'tenure', 'tenure_years']
for col in list(ds.columns):
if col in cont_vars:
sns.barplot(data = ds, y = col)
plt.show()
def plot_cat(ds, cat_vars):
'''
This function takes in the train dataset, and categorical variable column list
and ouputs the list as a plot.
arguments:
- ds= dataset you want to input (typically the train dataset)
- cat_vars= categorical variable list of columns
'''
#list of categorical variables
cat_vars = ['payment_type_id', 'internet_service_type_id', 'contract_type_id', 'gender', 'senior_citizen', 'partner', 'dependents', 'phone_service', 'multiple_lines', 'online_security', 'online_backup','device_protection', 'tech_support', 'streaming_tv', 'streaming_movies', 'paperless_billing', 'churn', 'contract_type', 'internet_service_type', 'payment_type']
for col in list(ds.columns):
if col in cat_vars:
sns.countplot(ds[col])
plt.show()
def plot_categorical_and_continuous_vars(ds, cat_vars, cont_vars):
'''
This function takes in the train dataset, categorical variable column list, and continuous variable column list
and ouputs the lists as plots.
arguments:
- ds= dataset you want to input (typically the train dataset)
- cat_vars= categorical variable list of columns
- cont_vars= continuous variable list of columns
'''
plot_cat(ds, cat_vars)
plot_quant(ds, cont_vars);
def months_to_years(ds):
ds['tenure_years'] = ds.tenure / 12;
|
[
"Linh.M.Quach1@gmail.com"
] |
Linh.M.Quach1@gmail.com
|
29eb3d63fc6b0b5a8feec92dd42aaccb8c3f3c16
|
d855e8ce6e43bdefb75df6c382f78a336e2f6559
|
/LeCeption.py
|
f40e425902d1e5d390a0a44e8781bb4985094408
|
[] |
no_license
|
TheInventorMan/Traffic-Sign-Classifier
|
ff0df1bbe35b543e2e2c6813291539522df4623c
|
c2cfb0d2a005be98dacbbc7f63767611642ebc12
|
refs/heads/master
| 2020-06-17T11:37:27.448105
| 2019-07-09T03:05:50
| 2019-07-09T03:05:50
| 195,912,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,383
|
py
|
import tensorflow as tf
from tensorflow.contrib.layers import flatten
def LeCeption(x):
mu = 0
sigma = 0.1
# Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# Activation.
conv1 = tf.nn.leaky_relu(conv1)
# Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Layer 2_1: Convolutional (Inception 1). Input = 14x14x6. Output = 10x10x16.
conv21_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv21_b = tf.Variable(tf.zeros(16))
conv21 = tf.nn.conv2d(conv1, conv21_W, strides=[1, 1, 1, 1], padding='VALID') + conv21_b
# Layer 2_2: Convolutional (Inception 2). Input = 14x14x6. Output = 12x12x16.
conv22_W = tf.Variable(tf.truncated_normal(shape=(3, 3, 6, 16), mean = mu, stddev = sigma))
conv22_b = tf.Variable(tf.zeros(16))
conv22 = tf.nn.conv2d(conv1, conv22_W, strides=[1, 1, 1, 1], padding='VALID') + conv22_b
# Layer 2_2: Double Max Pool. Input = 12x12x16. Output = 10x10x16
conv22 = tf.nn.max_pool(conv22, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='VALID')
conv22 = tf.nn.max_pool(conv22, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='VALID')
# Inception Stack. Output = 10x10x32
conv2 = tf.concat((conv21, conv22), 3)
# Activation.
conv2 = tf.nn.leaky_relu(conv2)
# Pooling. Input = 10x10x32. Output = 5x5x32.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Branch off bypass. Input = 5x5x32. Output = 800.
bypBranch = flatten(conv2)
# Main feedforward path
# Layer 3: Convolutional. Input = 5x5x32. Output = 1x1x800.
conv3_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 32, 800), mean = mu, stddev = sigma))
conv3_b = tf.Variable(tf.zeros(800))
conv3 = tf.nn.conv2d(conv2, conv3_W, strides=[1, 1, 1, 1], padding='VALID') + conv3_b
# Activation
conv3 = tf.nn.leaky_relu(conv3)
# Merge branches. Output = 1600.
mainBranch = flatten(conv3)
fc0 = tf.concat([mainBranch, bypBranch], 1)
fc0 = tf.nn.dropout(fc0, keep_prob)
# Layer 4: Fully Connected. Input = 1600. Output = 400.
fc1_W = tf.Variable(tf.truncated_normal(shape=(1600, 400), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(400))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# Activation.
fc1 = tf.nn.leaky_relu(fc1)
fc1 = tf.nn.dropout(fc1, keep_prob)
# Layer 5: Fully Connected. Input = 400. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(400, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# Activation.
fc2 = tf.nn.leaky_relu(fc2)
fc2 = tf.nn.dropout(fc2, keep_prob)
# Layer 6: Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, n_classes), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(n_classes))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
|
[
"mnasir@mit.edu"
] |
mnasir@mit.edu
|
b1b1056e1f4c10ad5a9611023811b6a1e72a169d
|
6d57e245add80e6c77bd3032913f98eaee130c98
|
/code/orbit_pv.py
|
a0a2a93e7d437f1a43c1cc5b0926ff8bd1ce3fb9
|
[] |
no_license
|
keflavich/atca_gc_h2co
|
a306de6205a3f1d2bae0c5d80e355dc2fbe33672
|
4ec085d1f0aa6d63d4f0c769d57f5803cfc3a24a
|
refs/heads/master
| 2023-01-23T21:22:49.527921
| 2015-09-27T08:02:16
| 2015-09-27T08:02:16
| 42,525,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,408
|
py
|
import os
import numpy as np
import pvextractor
from pvextractor.geometry.poly_slices import extract_poly_slice
import spectral_cube
from spectral_cube import SpectralCube, BooleanArrayMask, Projection
import aplpy
import pylab as pl
import matplotlib
import copy
from astropy import units as u
from astropy import coordinates
from astropy.io import ascii, fits
from astropy import log
from astropy.wcs import WCS
from astropy import wcs
import matplotlib
matplotlib.rc_file('pubfiguresrc')
table = ascii.read(('orbit_K14_2.dat'), format='basic', comment="#", guess=False)
coords = coordinates.SkyCoord(table['l']*u.deg, table['b']*u.deg, frame='galactic')
P = pvextractor.Path(coords, width=300*u.arcsec)
dl = (table['l'][1:]-table['l'][:-1])
db = (table['b'][1:]-table['b'][:-1])
dist = (dl**2+db**2)**0.5
cdist = np.zeros(dist.size+1)
cdist[1:] = dist.cumsum()
reftime = -2
bricktime = 0.3
time = table['t']
# how much time per pixel?
dtdx = (table['t'].max() - table['t'].min()) / cdist.max()
figsize=(20,10)
def offset_to_point(glon, glat):
"""
Determine the offset along the orbit to the nearest point on an orbit to
the specified point
"""
import shapely.geometry as geom
line = geom.LineString(table['l','b'])
point = geom.Point(glon, glat)
return line.project(point)
cmap = copy.copy(pl.cm.RdYlBu_r)
cmap.set_under((0.9,0.9,0.9,0.5))
molecules = ('H2CO11', )
filenames = ('../data/h2comosaic_contsub_clean_min.fits',)
for molecule,fn in zip(molecules,filenames):
log.info(molecule)
cube = spectral_cube.SpectralCube.read(fn)
if 'smooth' in fn:
cmap.set_bad((1.0,)*3)
else:
cmap.set_bad((0.9,0.9,0.9,0.5))
pvfilename = ('orbits/KDL2014_orbit_on_{0}.fits'.format(molecule))
if os.path.exists(pvfilename):
pv = fits.open(pvfilename)[0]
else:
# respect_nan = False so that the background is zeros where there is data
# and nan where there is not data
# But not respecting nan results in data getting averaged with nan, so we
# need to respect it and then manually flag (in a rather unreliable
# fashion!)
#pv = pvextractor.extract_pv_slice(cube, P, respect_nan=False)
pv = pvextractor.extract_pv_slice(cube, P, respect_nan=True)
if not os.path.isdir(os.path.dirname(pvfilename)):
os.mkdir(os.path.dirname(pvfilename))
pv.writeto(pvfilename)
bad_cols = np.isnan(np.nanmax(pv.data, axis=0))
nandata = np.isnan(pv.data)
pv.data[nandata & ~bad_cols] = 0
ok = ~nandata & ~bad_cols
fig1 = pl.figure(1, figsize=figsize)
fig1.clf()
ax = fig1.gca()
mywcs = WCS(pv.header)
xext, = mywcs.sub([1]).wcs_pix2world((0,pv.shape[1]), 0)
print "xext: ",xext
yext, = mywcs.sub([2]).wcs_pix2world((0,pv.shape[0]), 0)
yext /= 1e3
dy = yext[1]-yext[0]
dx = xext[1]-xext[0]
#F = aplpy.FITSFigure(pv, figure=fig1)
#actual_aspect = pv.shape[0]/float(pv.shape[1])
#F.show_grayscale(aspect=0.5/actual_aspect)
im = ax.imshow(pv.data, extent=[xext[0], xext[1], yext[0], yext[1]],
aspect=0.6*dx/dy, cmap=cmap, vmin=-0.1, vmax=0.01)
ax2 = ax.twiny()
ax2.xaxis.set_label_position('top')
ax2.xaxis.set_ticklabels(["{0:0.2f}".format(x) for x in
np.interp(ax2.xaxis.get_ticklocs(),
np.linspace(0,1,time.size),
time-reftime)])
ax2.set_xlabel("Time since 1$^\\mathrm{st}$ pericenter passage [Myr]",
size=24, labelpad=10)
ax.set_xlabel("Offset (degrees)")
ax.set_ylabel("$V_{LSR}$ $(\mathrm{km\ s}^{-1})$")
for color, segment in zip(('red','green','blue','black','purple'),
('abcde')):
selection = table['segment'] == segment
# Connect the previous to the next segment - force continuity
if np.argmax(selection) > 0:
selection[np.argmax(selection)-1] = True
ax.plot(np.array(cdist[selection]),
table["v'los"][selection], zorder=1000,
color=color, linewidth=3, alpha=0.25)
#F.show_lines(np.array([[cdist[selection],
# table["v'los"][selection]*1e3]]), zorder=1000,
# color=color, linewidth=3, alpha=0.25)
#F.show_markers(cdist[selection], table["v'los"][selection]*1e3, zorder=1000,
# color=color, marker='+')
#F.recenter(x=4.5/2., y=20., width=4.5, height=240000)
#F.show_markers([offset_to_point(0.47,-0.01)], [30.404e3], color=['r'])
#F.show_markers([offset_to_point(0.38,+0.04)], [39.195e3], color=['b'])
#F.show_markers([offset_to_point(0.47, -0.01)],[30.404e3], edgecolor='r', marker='x')
#F.show_markers([offset_to_point(0.38, 0.04)], [39.195e3], edgecolor='b', marker='x')
#F.show_markers([offset_to_point(0.253, 0.016)], [36.5e3], edgecolor='purple', marker='x')
ax.plot(offset_to_point(0.47, -0.01),30.404, color='r', marker='x', markersize=25)
ax.plot(offset_to_point(0.38, 0.04), 39.195, color='b', marker='x', markersize=25)
ax.plot(offset_to_point(0.253, 0.016), 36.5, color='purple', marker='x', markersize=25)
#F.refresh()
#F._ax1.set_ylabel("$V_{LSR} (\mathrm{km\ s}^{-1})$")
#F._ax1.set_yticklabels([(str(int(x.get_text())/1000)) for x in F._ax1.get_yticklabels()])
#F.refresh()
ax.axis([xext[0], xext[1], yext[0], yext[1]])
fig1.savefig(('orbits/KDL2014_orbit_on_{0}.pdf'.format(molecule)),
bbox_inches='tight')
fig2 = pl.figure(2, figsize=figsize)
pl.clf()
img = cube.mean(axis=0).hdu
ok = ~np.isnan(img.data)
img.data[np.isnan(img.data)] = 0
F2 = aplpy.FITSFigure(img, convention='calabretta', figure=fig2)
F2.show_grayscale()
patches = P.to_patches(1, ec='gray', fc='none',
#transform=ax.transData,
clip_on=True, #clip_box=ax.bbox,
wcs=cube.wcs)
for patch in patches:
patch.set_linewidth(0.5)
patch.set_alpha(0.2)
patch.zorder = 50
patches[0].set_edgecolor('green')
patches[0].set_alpha(1)
patches[0].set_linewidth(1)
patches[0].zorder += 1
patchcoll = matplotlib.collections.PatchCollection(patches, match_original=True)
patchcoll.zorder=10
c = F2._ax1.add_collection(patchcoll)
F2._rectangle_counter += 1
rectangle_set_name = 'rectangle_set_' + str(F2._rectangle_counter)
for color, segment in zip(('red','green','blue','black','purple'),
('abcde')):
selection = table['segment'] == segment
# Connect the previous to the next segment - force continuity
if np.argmax(selection) > 0:
selection[np.argmax(selection)-1] = True
F2.show_lines(np.array([[table['l'][selection],
table["b"][selection]]]), zorder=1000,
color=color, linewidth=3, alpha=0.5)
#F2.show_markers(table['l'][selection], table["b"][selection], zorder=1000,
# color=color, marker='+', alpha=0.5)
F2._layers[rectangle_set_name] = c
F2.recenter(0, -0.03, width=1.8, height=0.3)
F2.set_tick_labels_format('d.dd','d.dd')
F2.show_markers([0.47], [-0.01], edgecolor='r', marker='x', s=100, zorder=1500)
F2.show_markers([0.38], [0.04], edgecolor='b', marker='x', s=100, zorder=1500)
F2.show_markers([0.253], [0.016], edgecolor='purple', marker='x', s=100, zorder=1500)
F2.save(('orbits/KDL2014_orbitpath_on_{0}.pdf'.format(molecule)))
# Compute the temperature as a function of time in a ppv tube
offset = np.linspace(0, cdist.max(), pv.shape[1])
time = np.interp(offset, cdist, table['t'])
vel = np.interp(time, table['t'], table["v'los"])
y,x = np.indices(pv.data.shape)
p,v = WCS(pv.header).wcs_pix2world(x,y, 0)
vdiff = 15
velsel = (v > (vel-vdiff)*1e3) & (v < (vel+vdiff)*1e3)
pv.data[nandata] = np.nan
pv.data[pv.data==0] = np.nan
pv.data[~velsel] = np.nan
mean_tem = np.nanmean(pv.data, axis=0)
min_tem = np.nanmin(pv.data, axis=0)
max_tem = np.nanmax(pv.data, axis=0)
std_tem = np.nanstd(pv.data, axis=0)
# errorbar version: ugly
#eb = ax3.errorbar(time, mean_tem, yerr=[min_tem, max_tem],
# linestyle='none', capsize=0, color='r', errorevery=20)
#eb[-1][0].set_linestyle('--')
#ax3.fill_between(time, mean_tem-std_tem, mean_tem+std_tem,
# color='b', alpha=0.2)
#ax3.plot(time, mean_tem, color='b', alpha=0.2)
fig3 = pl.figure(3, figsize=figsize)
fig3.clf()
ax3 = fig3.gca()
ax3.plot(time-reftime, pv.data.T, 'k.', alpha=0.5, markersize=3)
ax3.set_xlabel("Time since 1$^\\mathrm{st}$ pericenter passage [Myr]", size=24, labelpad=10)
if 'Temperature' in molecule:
ax3.set_ylim(0,vmax)
ax3.set_ylabel("Temperature [K]", size=24, labelpad=10)
ytext = 180
elif 'Ratio' in fn:
ax3.set_ylim(0,0.5)
ax3.set_ylabel("Ratio $R_1$", size=24, labelpad=10)
ytext = 0.5*14./15.
else:
ax3.set_ylabel("$T_A^*$ [K]", size=24, labelpad=10)
ytext = ax3.get_ylim()[1]*(14./15.)
ax3.text(bricktime, ytext, "Brick", verticalalignment='center',
horizontalalignment='center', rotation='vertical', color='k', weight='bold')
ax3.text(bricktime+0.43, ytext, "Sgr B2", verticalalignment='center',
horizontalalignment='center', rotation='vertical', color='k', weight='bold')
ax3.text(bricktime+3.58, ytext*135./140., "20 km s$^{-1}$", verticalalignment='center',
horizontalalignment='center', rotation='vertical', color='k', weight='bold')
ax3.text(bricktime+3.66, ytext*135./140., "50 km s$^{-1}$", verticalalignment='center',
horizontalalignment='center', rotation='vertical', color='k', weight='bold')
ax3.text(bricktime+3.28, ytext, "Sgr C", verticalalignment='center',
horizontalalignment='center', rotation='vertical', color='k', weight='bold')
pl.setp(ax3.get_xticklabels(), fontsize=20)
pl.setp(ax3.get_yticklabels(), fontsize=20)
ax3.set_xlim(-0.1,4.6)
fig3.savefig(('orbits/KDL2014_{0}_vs_time.pdf'.format(molecule)),
bbox_inches='tight')
|
[
"keflavich@gmail.com"
] |
keflavich@gmail.com
|
ea6a89ee9ef07d139f1b15d21613f28a0e4a44a6
|
ffbe68f6b32cb47fa24d88cdf12f489ea03b58aa
|
/identityoperator.py
|
99f72258ae220ec04375a95a2afe7aec867c6443
|
[] |
no_license
|
Vipretha/Vipretha
|
3fe27f986cdb7f6d675753adf7d79d5eacb52b02
|
ccded940e59115602399cdfe89a304eb75c951bd
|
refs/heads/main
| 2023-08-17T00:36:29.719078
| 2021-10-04T05:18:53
| 2021-10-04T05:18:53
| 402,299,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
a=70
b=38
if(a is b):
print ("a and b are same")
else:
print("a and b are not same")
b=70
if(b is a):
print("b and a are same")
else:
print("b and a are not same")
|
[
"noreply@github.com"
] |
Vipretha.noreply@github.com
|
afd569127414d4de2f6c0cf86dbb1fa6c09b1535
|
56fc8fe58ec8d576ec857f19a8adc43b49e19125
|
/DjangoDrf/DjangoDrf/settings.py
|
b6ac5e68c4de3e0f5473a6352e333c8f3d869f45
|
[] |
no_license
|
Qpigzhu/Drf
|
53ae3dfd7d2715ea49bbfca02ada1a9239cb25a2
|
e4faa165a81abe8e641b992b6f86cc46cb01ac16
|
refs/heads/master
| 2022-12-13T16:30:33.868771
| 2018-12-12T02:34:11
| 2018-12-12T02:34:11
| 161,421,986
| 0
| 0
| null | 2022-12-08T01:20:24
| 2018-12-12T02:32:20
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,141
|
py
|
"""
Django settings for DjangoDrf project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,BASE_DIR)
sys.path.insert(0,os.path.join(BASE_DIR,'apps'))
sys.path.insert(0,os.path.join(BASE_DIR, 'extra_apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm6aywf_!9n)3j&&##x#_&-_=d=hfq&4yo9!@-f)3pc82&5_=3s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = "users.UserProfile" #重置User模型使得生效
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users.apps.UsersConfig',
'goods.apps.GoodsConfig',
'trade.apps.TradeConfig',
'user_operation.apps.UserOperationConfig',
'DjangoUeditor',
'xadmin',
'crispy_forms',
'rest_framework',
'django_filters',
'corsheaders',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'DjangoDrf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DjangoDrf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'vue_shop',
'USER':'root',
'PASSWORD':'root',
'HOST':'127.0.0.1',
'PORT':'3307', #端口
#mysql的数据库引擎有InnoDB 和 myisam
#第三方登录的库要求使用innodb 否则会migration出错。
"OPTIONS":{"init_command":"SET default_storage_engine=INNODB;"},
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans' #中文支持,django1.8以后支持;1.8以前是zh-cn
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
#重载用户登录逻辑,使得用户能用手机登录
AUTHENTICATION_BACKENDS = (
'users.views.CustomBackend',
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# 设置上传文件,图片访问路径
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#REST_FRAMEWORK的分页设置
# REST_FRAMEWORK = {
# 'PAGE_SIZE': 10,
# }
#登录验证设置拦截器
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
}
#JWT的设置
import datetime
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=7), #JWT保存时间
'JWT_AUTH_HEADER_PREFIX': 'JWT', #解析的格式
}
#手机正则表达式
REGEX_MOBILE = "^1[358]\d{9}$|^147\d{8}$|^176\d{8}$"
#云片网KEY
APIKEY = 'c3f3f6e838aebdcd4cbbf02575104989'
|
[
"331103418@qq.com"
] |
331103418@qq.com
|
70639d45671e5396a8a8c8e6ca9b2f500e141e72
|
575f91e81238eeba0a114d2360a7a727440649ba
|
/cifar_models/densenet.py
|
47ac6ec0233f40a9a9d1e6f4029535f3508a3b27
|
[] |
no_license
|
Lappuccino/SimulatorAttack
|
34075c2f5c6f424062a2e06ff32379c17e31f619
|
94d91639c55d594f1b9ee4212c3b99b1f0941a9c
|
refs/heads/master
| 2023-06-09T09:35:51.018553
| 2021-06-18T03:45:00
| 2021-06-18T03:45:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,688
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = ['densenet']
class Bottleneck(nn.Module):
def __init__(self, inplanes, expansion=4, growthRate=12, dropRate=0):
super(Bottleneck, self).__init__()
planes = expansion * growthRate
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, growthRate, kernel_size=3,
padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.dropRate = dropRate
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, training=self.training)
out = torch.cat((x, out), 1)
return out
class BasicBlock(nn.Module):
def __init__(self, inplanes, expansion=1, growthRate=12, dropRate=0):
super(BasicBlock, self).__init__()
planes = expansion * growthRate
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, growthRate, kernel_size=3,
padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.dropRate = dropRate
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, training=self.training)
out = torch.cat((x, out), 1)
return out
class Transition(nn.Module):
def __init__(self, inplanes, outplanes):
super(Transition, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, outplanes, kernel_size=1,
bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, depth=22, block=Bottleneck,
dropRate=0, num_classes=10, growthRate=12, compressionRate=2):
super(DenseNet, self).__init__()
assert (depth - 4) % 3 == 0, 'depth should be 3n+4'
n = (depth - 4) / 3 if block == BasicBlock else (depth - 4) // 6
self.growthRate = growthRate
self.dropRate = dropRate
# self.inplanes is a global variable used across multiple
# helper functions
self.inplanes = growthRate * 2
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, padding=1,
bias=False)
self.dense1 = self._make_denseblock(block, n)
self.trans1 = self._make_transition(compressionRate)
self.dense2 = self._make_denseblock(block, n)
self.trans2 = self._make_transition(compressionRate)
self.dense3 = self._make_denseblock(block, n)
self.bn = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(self.inplanes, num_classes)
# Weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_denseblock(self, block, blocks):
layers = []
for i in range(blocks):
# Currently we fix the expansion ratio as the default value
layers.append(block(self.inplanes, growthRate=self.growthRate, dropRate=self.dropRate))
self.inplanes += self.growthRate
return nn.Sequential(*layers)
def _make_transition(self, compressionRate):
inplanes = self.inplanes
outplanes = int(math.floor(self.inplanes // compressionRate))
self.inplanes = outplanes
return Transition(inplanes, outplanes)
def forward(self, x):
x = self.conv1(x)
x = self.trans1(self.dense1(x))
x = self.trans2(self.dense2(x))
x = self.dense3(x)
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def densenet(**kwargs):
"""
Constructs a ResNet model.
"""
return DenseNet(**kwargs)
|
[
"sharpstill@163.com"
] |
sharpstill@163.com
|
822c99d79857911dcf6e2c367f13cab24f178bd6
|
971f88cd52eb97407ce7c65e66e48b641c0f5973
|
/Linear Equations Calculator.py
|
c7c8d1bca7041edcedd364e9dca88b6e218df0eb
|
[] |
no_license
|
IanBotashev/Python-Programs
|
127a2b6098c7a8c728b9eb04568525ed4a90649a
|
5e4ac7892d5bfab90cbd1bfc704493e91943262d
|
refs/heads/master
| 2020-09-06T09:26:47.863046
| 2019-11-08T05:34:45
| 2019-11-08T05:34:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,506
|
py
|
#Some code from Python.org
#Some code from StackOverflow.com
#This is a work-in-progress
#First variables:
mainvar = 1
tasdfasfdhjasdhfkjashdfk = 2
while tasdfasfdhjasdhfkjashdfk == 2:
#Variables called in this loop:
typee = 0
x = 1
n = 2.1
rwethereyet = 1
wait = 1
tingytwo = 1
mrb = 1
choice = 0
#Imports modules
from fractions import Fraction
from decimal import Decimal, ROUND_UP
import random
import sys
#Intro:
print("Welcome to Linear Equation Calculator! Made by MSGUY in Python 3.")
input("(Enter)")
print("Formula Finder: Instead of doing all the math to turn a line into an equation, just choose an equation and know two points on the line.")
input("(Enter)")
print("Or, you can choose converter, and convert one equation type to another type of equation. Information inputs very depending on the type of equation.")
input("(Enter)")
print("Fullscreen mode works best.")
input("(Enter)")
print("In some cases, the program will print things that should've been simplified, like 4/2 to 2. I am working on fixing this, but for now you will have to simplify on your own.")
input("(Enter)")
#Type of calculator choice
while mainvar == 1:
wait = 1
typee = 0
x = 1
n = 2.1
rwethereyet = 1
wait = 1
tingytwo = 1
mrb = 1
tingytwo = 1
while wait == 1:
choice = input("Please choose a type of calculator. c for converter and f for formula finder. ")
if choice == "c" or choice == "f":
wait = 2
#Formula finder start
if choice == "f":
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Welcome to Python Linear Equation Formula Finder!")
input("(Enter)")
print("Note: If you do not answer in numbers for certain questions, the program will not work.")
input("(Enter)")
print("Let M represent the slope of the equation, and let B represent the y-intercept.")
input("(Enter)")
#Equation choice
while x == 1:
y =input("Which kind of equation? 1 for slope intercept (y = mx + b), 2 for point slope (y - y1 = m(x - x1)), or 3 for standard (Ax + By = C). ")
if y == "1":
typee = 1
x = 2
print("y = mx + b")
if y == "2":
typee = 2
x = 2
print("y - y1 = m(x - x1)")
if y == "3":
typee = 3
x = 2
("Ax + By = C")
#Integer choice- are numbers not decimals?
while rwethereyet == 1:
integer = input("Are all your coordinate points integers (no decimal places)? Please answer 1(yes) or 2(no). ")
if integer == "1":
print((str(integer)))
rwethereyet = 2
integer = 1
elif integer == "2":
print((str(integer)))
integer = 2
rwethereyet = 2
else:
rwethereyet = 1
#Enter line coordinates
if integer == 1:
xone = int(input("What is the first X value? "))
yone = int(input("What is the first Y value? "))
xtwo = int(input("What is the second X value? "))
ytwo = int(input("What is the second Y value? "))
finalmy = int(ytwo) - int(yone)
finalmx = int(xtwo) - int(xone)
elif integer == 2:
xone = float(input("What is the first X value? "))
yone = float(input("What is the first Y value? "))
xtwo = float(input("What is the second X value? "))
ytwo = float(input("What is the second Y value? "))
finalmy = float(ytwo) - float(yone)
finalmx = float(xtwo) - float(xone)
if finalmx < 0:
finalmx = abs(finalmx)
finalmy = -abs(finalmy)
if finalmy < 0 and finalmx < 0:
finalmx = abs(finalmx)
finalmy = abs(finalmy)
if not finalmy == 0 and not finalmx == 0:
m = finalmy / finalmx
if not isinstance(m, int):
mtwo = ((str(finalmy)) + "/" + (str(finalmx)))
elif isinstance(m, int):
mtwo = m
#Prints slope
print("Slope: " + (str(mtwo)))
#Determines Y intercept (B)
if m > 0:
b = yone - xone
if m < 0:
b = yone - xone
if m == 0:
b = yone
#Determines X intercept
xi = b / m
#Prints Y and X intercept
print("the y intercept is (0," + str(b) + ")")
if integer == 1:
print("the x intercept is (" + str(int(xi)) + ",0)")
if integer == 2:
print("the x intercept is (" + str(float(xi)) + ",0")
print("Equation: ")
#Determines equation for slope intercept
if typee == 1:
if b > 0:
if m > 1:
print("y = " + (str(mtwo)) + "x + " + (str(b)))
if m < 1:
print("y = " + (str(mtwo)) + "x + " + (str(b)))
if m == 1:
print("y = x + " + (str(b)))
if b < 0:
if m > 1:
print("y = " + (str(mtwo)) + "x " + (str(b)))
if m < 1:
print("y = " + (str(mtwo)) + "x " + (str(b)))
if m == 1:
print("y = x " + (str(b)))
if b == 0:
if m > 1:
print("y = " + (str(mtwo)) + "x")
if m < 1:
print("y = " + (str(mtwo)) + "x")
if m == 1:
print("y = x")
#Determines equation for point slope
if typee == 2:
if yone > 0:
if xone > 0:
print("y - " + (str(yone)) + " = " + (str(mtwo)) + "(x - " + (str(xone)) + ")")
if xone < 0:
print("y - " + (str(yone)) + " = " + (str(mtwo)) + "(x + " + (str(abs(xone))) + ")")
if xone == 0:
print("y - " + (str(yone)) + " = " + (str(mtwo)) + "(x)")
if yone < 0:
if xone > 0:
print("y + " + (str(abs(yone))) + " = " + (str(mtwo)) + "(x - " + (str(xone)) + ")")
if xone < 0:
print("y + " + (str(abs(yone))) + " = " + (str(mtwo)) + "(x + " + (str(abs(xone))) + ")")
if xone == 0:
print("y + " + (str(abs(yone))) + " = " + (str(mtwo)) + "(x)")
if yone == 0:
if xone > 0:
print("y = " + (str(mtwo)) + "(x - " + (str(xone)) + ")")
if xone < 0:
print("y = " + (str(mtwo)) + "(x + " + (str(abs(xone))) + ")")
if xone == 0:
print("y = " + (str(mtwo)) + "(x)")
#Determines equation for point slope
if typee == 3:
if b > 0:
if m > 1:
print((str(mtwo)) + "x + y = " + (str(b)))
if m < 1:
print((str(mtwo)) + "x + y = " + (str(b)))
if m == 1:
print("x + y = " + (str(b)))
if b < 0:
if m > 1:
print((str(mtwo)) + "x + y = " + (str(b)))
if m < 1:
print((str(mtwo)) + "x + y = " + (str(b)))
if m == 1:
print("x + y = " + (str(b)))
if b == 0:
if m > 1:
print((str(mtwo)) + "x + y = 0")
if m < 1:
print((str(mtwo)) + "x + y = 0")
if m == 1:
print("x + y = 0")
#Ends program or starts it over.
while mrb == 1:
end = input("Type 'yes' for another conversion/calculation, and type 'no' to end the program. ")
if end == "no":
tasdfasfdhjasdhfkjashdfk = 1
mainvar = 2
mrb = 2
print("Successfully ended.")
sys.exit()
elif end == "yes":
choice = 0
mainvar = 1
mrb = 0
input("(Enter to continue)")
mrb = 0
mrb = 0
else:
mrb = 1
if choice == "c":
#SI: Slope Intercept
#PS: Point Slope
#S: Standard
#Conversion list:
#SI->PS->DONE!
#SI->S
#PS->S->DONE!
#PS->SI->DONE!
#S->SI
#S->PS
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
#Slope intercept -> point slope
def converter_sips():
tingy = 1
tingytwo = 1
print("Step one: Enter information. An easy way to answer the X and Y value questions is to use the Y intercept (y = mx + (B)).")
xthing = float(input("Enter an X value "))
ything = float(input("Enter a Y value "))
mslope = float(input("Enter the slope's numerator. "))
mslopeythingy = float(input("Enter the slope's denominator. If the slope is an integer, just enter 1. "))
print("Step two: Are all the numbers integers (no decimal places)?")
while tingy == 1:
thingchoice = input("y for yes, n for no ")
if thingchoice == "y":
tingy = 2
xthing = int(xthing)
ything = int(ything)
mslope = int(mslope)
mslopeythingy = int(mslopeythingy)
elif thingchoice == "n":
tingy = 2
xthing = float(xthing)
ything = float(ything)
mslope = float(mslope)
mslopeythingy = float(mslopeythingy)
else:
tingy = 1
if mslopeythingy != 1:
mslopeactual = ((str(mslope)) + "/" + (str(mslopeythingy)))
mslope = mslopeactual
else:
mslope = mslope
if xthing == 0 and ything != 0:
if ything > 0:
print("y - " + (str(ything)) + " = " + (str(mslope)) + "(x)")
if ything < 0:
print("y + " + (str(abs(ything))) + " = " + (str(mslope)) + "(x)")
if ything == 0 and xthing != 0:
if xthing > 0:
print("y = " + (str(mslope)) + "(x + )" + (str(xthing)) + ")")
if xthing < 0:
print("y = " + (str(mslope)) + "(x + )" + (str(abs(xthing))) + ")")
if ything == 0 and ything == 0:
print("y = " + (str(mslope)) + "(x)")
if xthing != 0 and ything !=0:
if xthing > 0 and ything > 0:
print("y - " + (str(ything)) + " = " + (str(mslope)) + "(x - " + (str(xthing)) + ")")
if xthing > 0 and ything < 0:
print("y + " + (str(abs(ything))) + " = " + (str(mslope)) + "(x - " + (str(xthing)) + ")")
if xthing < 0 and ything > 0:
print("y - " + (str(ything)) + " = " + (str(mslope)) + "(x + " + (str(abs(xthing))) + ")")
if xthing < 0 and ything < 0:
print("y + " + (str(abs(ything))) + " = " + (str(mslope)) + "(x + " + (str(abs(xthing))) + ")")
#Slope intercept -> standard
def converter_sis():
tingy = 1
tingytwo = 1
print("Step one: Enter information.")
xthing = float(input("Enter an X value "))
ything = float(input("Enter a Y value "))
mslope = float(input("Enter the slope's numerator. "))
mslopeythingy = float(input("Enter the slope's denominator. If the slope is an integer, just enter 1. "))
mslopeactuall = mslope / mslopeythingy
bbb = float(input("What is the y-intercept of the equation? "))
print("Step two: Are all the numbers integers (no decimal places)?")
while tingy == 1:
thingchoice = input("y for yes, n for no ")
if thingchoice == "y":
tingy = 2
xthing = int(xthing)
ything = int(ything)
mslope = int(mslope)
mslopeythingy = int(mslopeythingy)
bbb = int(bbb)
elif thingchoice == "n":
tingy = 2
xthing = float(xthing)
ything = float(ything)
mslope = float(mslope)
mslopeythingy = float(mslopeythingy)
bbb = float(bbb)
else:
tingy = 1
if mslopeythingy != 1:
mslopeactual = ((str(mslope)) + "/" + (str(mslopeythingy)))
mslope = mslopeactual
else:
mslope = mslope
if bbb >= 0:
typeOfBB = "+ "
slopeIntercept = "y = " + str(mslope) + "x " + typeOfBB + str(bbb)
else:
typeOfBB = "- "
slopeIntercept = "y = " + str(mslope) + "x " + typeOfBB + (str(abs(bbb)))
if mslopeactuall >= 0:
finalFINAL = ("y - " + str(mslope) + "x = " + str(bbb))
if mslopeactuall < 0:
finalFINAL = ("y + " + (str(abs(mslope))) + "x = " + str(bbb))
print("Conversion: " + finalFINAL)
#Point slope -> standard
def converter_pss():
#Information entry:
tingythree = 1
print("Step one: Enter information")
xthing1 = float(input("Enter variable X1 "))
ything1 = float(input("Enter variable Y1 "))
mslopeynum = float(input("Enter slope numerator "))
mslopeyden = float(input("Enter slope denominator. If the slope is a whole number, enter 1. "))
print("Step two: Are all these numbers integers (No decimal points)? ")
#y - y1 = m(x - x1)
#y = mx + b
while tingythree == 1:
#integer choice:
thingchoice1 = input("y for yes, n for no ")
if thingchoice1 == "y":
xthing1 = int(xthing1)
ything1 = int(ything1)
mslopeynum = int(mslopeynum)
mslopeyden = int(mslopeyden)
tingythree = 2
elif thingchoice1 == "n":
tingythree = 2
else:
tingythree = 1
#Finds Y intercept:
xthing22 = xthing1
if xthing22 == 0:
ything2 = ything1
while xthing22 != 0:
if mslopeynum >= 0 and mslopeyden >= 0:
ything2 = ything1 - mslopeynum
xthing22 = xthing22 - mslopeyden
if mslopeynum < 0 and mslopeyden < 0:
mslopeynum123 = abs(mslopeynum)
mslopeyden123 = abs(mslopeyden)
ything2 = ything1 + mslopeynum123
xthing22 = xthing22 + mslopeyden123
if mslopeynum < 0 and mslopeyden >= 0:
mslopeynum123 = abs(mslopeynum)
ything2 = ything1 + mslopeynum123
xthing22 = xthing22 - mslopeyden
if mslopeynum >= 0 and mslopeyden < 0:
mslopeyden123 = abs(mslopeyden)
ything2 = ything1 - mslopeynum
xthing22 = xthing22 + mslopeyden123
bb = ything2
print(bb)
if mslopeynum >= 0 and mslopeyden >= 0:
mslopeyy = mslopeynum / mslopeyden
if isinstance(mslopeyy, int):
mslopeyFinal4 = mslopeyy
else:
mslopeyFinal4 = str(mslopeynum) + "/" + str(mslopeyden)
if mslopeynum < 0 and mslopeyden < 0:
mslopeyy = mslopeynum / mslopeyden
if isinstance(mslopeyy, int):
mslopeyFinal4 = mslopeyy
else:
mslopeyFinal4 = mslopeynum + "/" + mslopeyden
if mslopeynum < 0 and mslopeyden >= 0:
mslopeyy = mslopeynum / mslopeyden
if isinstance(mslopeyy, int):
mslopeyFinal4 = mslopeyy
else:
mslopeyFinal4 = mslopeynum + "/" + mslopeyden
if mslopeynum >= 0 and mslopeyden < 0:
mslopeyy = mslopeynum / mslopeyden
if isinstance(mslopeyy, int):
mslopeyFinal4 = mslopeyy
else:
mslopeyFinal4 = mslopeynum + "/" + mslopeyden
if mslopeyden == 1:
mslopeyFinal4 = mslopeynum
finalThreeOne = "y = " + str(mslopeyFinal4) + "x + " + str(bb)
if mslopeyFinal4 >= 0:
finalThreeTwo = ("y - " + str(mslopeyFinal4) + "x = " + str(bb))
if mslopeyFinal4 < 0:
finalThreeTwo = ("y + " + (str(abs(mslopeyFinal4))) + "x = " + str(bb))
print("Conversion: " + finalThreeTwo)
#Point slope -> slope intercept
def converter_pssi():
#Information entry:
tingythree = 1
print("Step one: Enter information")
xthing1 = float(input("Enter variable X1 "))
ything1 = float(input("Enter variable Y1 "))
mslopeynum = float(input("Enter slope numerator "))
mslopeyden = float(input("Enter slope denominator. If the slope is a whole number, enter 1. "))
print("Step two: Are all these numbers integers (No decimal points)? ")
#y - y1 = m(x - x1)
#y = mx + b
while tingythree == 1:
#integer choice:
thingchoice1 = input("y for yes, n for no ")
if thingchoice1 == "y":
xthing1 = int(xthing1)
ything1 = int(ything1)
mslopeynum = int(mslopeynum)
mslopeyden = int(mslopeyden)
tingythree = 2
elif thingchoice1 == "n":
tingythree = 2
else:
tingythree = 1
#Finds Y intercept:
xthing22 = xthing1
if xthing22 == 0:
ything2 = ything1
while xthing22 != 0:
if mslopeynum >= 0 and mslopeyden >= 0:
ything2 = ything1 - mslopeynum
xthing22 = xthing22 - mslopeyden
if mslopeynum < 0 and mslopeyden < 0:
mslopeynum123 = abs(mslopeynum)
mslopeyden123 = abs(mslopeyden)
ything2 = ything1 + mslopeynum123
xthing22 = xthing22 + mslopeyden123
if mslopeynum < 0 and mslopeyden >= 0:
mslopeynum123 = abs(mslopeynum)
ything2 = ything1 + mslopeynum123
xthing22 = xthing22 - mslopeyden
if mslopeynum >= 0 and mslopeyden < 0:
mslopeyden123 = abs(mslopeyden)
ything2 = ything1 - mslopeynum
xthing22 = xthing22 + mslopeyden123
bb = ything2
print(bb)
if mslopeynum >= 0 and mslopeyden >= 0:
mslopeyy = mslopeynum / mslopeyden
if isinstance(mslopeyy, int):
mslopeyFinal4 = mslopeyy
else:
mslopeyFinal4 = str(mslopeynum) + "/" + str(mslopeyden)
if mslopeynum < 0 and mslopeyden < 0:
mslopeyy = mslopeynum / mslopeyden
if isinstance(mslopeyy, int):
mslopeyFinal4 = mslopeyy
else:
mslopeyFinal4 = mslopeynum + "/" + mslopeyden
if mslopeynum < 0 and mslopeyden >= 0:
mslopeyy = mslopeynum / mslopeyden
if isinstance(mslopeyy, int):
mslopeyFinal4 = mslopeyy
else:
mslopeyFinal4 = mslopeynum + "/" + mslopeyden
if mslopeynum >= 0 and mslopeyden < 0:
mslopeyy = mslopeynum / mslopeyden
if isinstance(mslopeyy, int):
mslopeyFinal4 = mslopeyy
else:
mslopeyFinal4 = mslopeynum + "/" + mslopeyden
if mslopeyden == 1:
mslopeyFinal4 = mslopeynum
finalFour = "y = " + str(mslopeyFinal4) + "x + " + str(bb)
print("Conversion: " + finalFour)
#Standard -> slope intercept
def converter_ssi():
i = 0
A = float(input("Enter value x is multiplied by (example: (2)x + By = C) "))
B = float(input("Enter value y is multiplied by (example: Ax + (2)y = C) "))
C = float(input("Enter the constraint at the end of the equation "))
while i == 0:
integer = input("Are these numbers integers? (y/n) ")
if integer == "y":
i = 1
A = int(A)
B = int(B)
C = int(C)
elif integer == "n":
i = 1
A = float(A)
B = float(B)
C = float(C)
else:
i = 0
if B >= 0:
abcONE = str(A) + "x + " + str(B) + "y = " + str(C)
elif B < 0:
abcONE = str(A) + "x - " + (str(abs(B))) + "y = " + str(C)
if C >= 0:
if B != 1:
afinal = A / B
if isinstance(afinal, int):
afinal = A / B
else:
afinal = str(A) + "/" + str(B)
print(afinal)
if A != 1:
abcTWO = "y = " + str(afinal) + "x + " + str(C)
else:
abcTWO = str(B) + "y = x + " + str(C)
else:
afinal = A
if A != 1:
abcTWO = "y = " + str(afinal) + "x + " + str(C)
else:
abcTWO = "y = x + " + str(C)
if C < 0:
if B != 1:
afinal = A / B
if isinstance(afinal, int):
afinal = A / B
else:
afinal = str(A) + "/" + str(B)
if A != 1:
abcTWO = "y = " + str(afinal) + "x - " + (str(abs(C)))
else:
abcTWO = str(B) + "y = " + str(afinal) + "x - " + (str(abs(C)))
else:
afinal = A
if A != 1:
abcTWO = "y = " + str(afinal) + "x - " + (str(abs(C)))
else:
abcTWO = "y = x + " + (str(abs(C)))
print("Conversion: " + abcTWO)
#Standard -> point slope
def converter_sps():
i = 0
A = float(input("Enter value x is multiplied by (example: (2)x + By = C) "))
B = float(input("Enter value y is multiplied by (example: Ax + (2)y = C) "))
C = float(input("Enter the constraint at the end of the equation "))
while i == 0:
integer = input("Are these numbers integers? (y/n) ")
if integer == "y":
i = 1
A = int(A)
B = int(B)
C = int(C)
elif integer == "n":
i = 1
A = float(A)
B = float(B)
C = float(C)
else:
i = 0
if B >= 0:
abcONE = str(A) + "x + " + str(B) + "y = " + str(C)
elif B < 0:
abcONE = str(A) + "x - " + (str(abs(B))) + "y = " + str(C)
if C >= 0:
if B != 1:
afinal = A / B
if isinstance(afinal, int):
afinal = A / B
else:
afinal = str(A) + "/" + str(B)
print(afinal)
if A != 1:
abcTWO = "y = " + str(afinal) + "x + " + str(C)
else:
abcTWO = str(B) + "y = x + " + str(C)
else:
afinal = A
if A != 1:
abcTWO = "y = " + str(afinal) + "x + " + str(C)
else:
abcTWO = "y = x + " + str(C)
if C < 0:
if B != 1:
afinal = A / B
if isinstance(afinal, int):
afinal = A / B
else:
afinal = str(A) + "/" + str(B)
if A != 1:
abcTWO = "y = " + str(afinal) + "x - " + (str(abs(C)))
else:
abcTWO = str(B) + "y = " + str(afinal) + "x - " + (str(abs(C)))
else:
afinal = A
if A != 1:
abcTWO = "y = " + str(afinal) + "x - " + (str(abs(C)))
else:
abcTWO = "y = x + " + (str(abs(C)))
print("Conversion: " + abcTWO)
tingy = 1
tingytwo = 1
xthing = 0
ything = C
mslope = A
while tingy == 1:
if integer == "y":
tingy = 2
xthing = int(xthing)
ything = int(ything)
mslope = int(mslope)
elif integer == "n":
tingy = 2
xthing = float(xthing)
ything = float(ything)
mslope = float(mslope)
else:
tingy = 1
if xthing == 0 and ything != 0:
if ything > 0:
print("y - " + (str(ything)) + " = " + (str(mslope)) + "(x)")
if ything < 0:
print("y + " + (str(abs(ything))) + " = " + (str(mslope)) + "(x)")
if ything == 0 and xthing != 0:
if xthing > 0:
print("y = " + (str(mslope)) + "(x + )" + (str(xthing)) + ")")
if xthing < 0:
print("y = " + (str(mslope)) + "(x + )" + (str(abs(xthing))) + ")")
if ything == 0 and ything == 0:
print("y = " + (str(mslope)) + "(x)")
if xthing != 0 and ything !=0:
if xthing > 0 and ything > 0:
print("y - " + (str(ything)) + " = " + (str(mslope)) + "(x - " + (str(xthing)) + ")")
if xthing > 0 and ything < 0:
print("y + " + (str(abs(ything))) + " = " + (str(mslope)) + "(x - " + (str(xthing)) + ")")
if xthing < 0 and ything > 0:
print("y - " + (str(ything)) + " = " + (str(mslope)) + "(x + " + (str(abs(xthing))) + ")")
if xthing < 0 and ything < 0:
print("y + " + (str(abs(ything))) + " = " + (str(mslope)) + "(x + " + (str(abs(xthing))) + ")")
#Asks for decision and executes function
print("What would you like to convert?")
print("1: slope intercept to point slope")
print("2: slope intercept to standard")
print("3: point slope to standard")
print("4: point slope to slope intercept")
print("5: standard to slope intercept")
print("6: standard to point slope")
while tingytwo == 1:
choicee = input("Type the number of your conversion here. ")
if choicee == "1":
tingytwo = 2
converter_sips()
elif choicee == "2":
tingytwo = 2
converter_sis()
elif choicee == "3":
tingytwo = 2
converter_pss()
elif choicee == "4":
tingytwo = 2
converter_pssi()
elif choicee == "5":
tingytwo = 2
converter_ssi()
elif choicee == "6":
tingytwo = 2
converter_sps()
else:
tingytwo = 1
#Ends program, or starts program over.
while mrb == 1:
end = input("Type 'yes' for another conversion/calculation, and type 'no' to end the program. ")
if end == "no":
tasdfasfdhjasdhfkjashdfk = 1
mainvar = 2
mrb = 2
print("Successfully ended.")
sys.exit()
elif end == "yes":
choice = 0
mainvar = 1
mrb = 0
input("(Enter to continue)")
mrb = 0
mrb = 0
else:
mrb = 1
|
[
"noreply@github.com"
] |
IanBotashev.noreply@github.com
|
53b118fb80ab4e424b51bac2cc1c9c19ec9a21c5
|
07aaf101828a3662c6ca4c5eed90619a676aa56e
|
/demos/021_linked_lists/ll_answers.py
|
0e5bd1084f17164b488c7aaec4400c6f74e463ab
|
[] |
no_license
|
hillarysim/YeetingOurWayInto61a
|
c7c8836ab660dd6844cc87fdd1d448d4a5742af4
|
c05a539c8c3f8bd9d146cb3067a9ef9011358d36
|
refs/heads/master
| 2022-11-21T14:38:15.791580
| 2020-07-20T00:05:49
| 2020-07-20T00:05:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
x = Foolz("dino")
# prints dino
print(x)
# Division Error?
x
# 1/0.1
str(x)
# "Division Error?"
def nonMutativeLink(l, func):
if l is Link.empty:
return Link.empty
else:
return Link(func(link.first), nonMutativeLink(l.rest, func))
def mutativeLink(l, func):
if l is Link.empty:
return l
else:
l.first = func(l.first)
mutativeLink(l.rest, func)
return l
|
[
"ctwong958@berkeley.edu"
] |
ctwong958@berkeley.edu
|
4e9111ec60477725635a3a1c5dc41784e9dd1e57
|
0c5a631622648b3cdf8534c2daf7569a1e17ff9a
|
/tests/benchmarks/constructs/InplaceOperationIntegerMul.py
|
f67fc5a5c13c35d2a1fdc02eda16ef077d8fa56b
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
wweiradio/Nuitka
|
9c5b7cf8dcde41dc43e3b17e8427ed5af77a4930
|
1914597d47585f3c899d76490fa6d173eca966d0
|
refs/heads/master
| 2021-01-11T22:39:25.276905
| 2016-12-10T11:27:30
| 2016-12-10T11:27:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,305
|
py
|
# Copyright 2016, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module_value1 = 5
module_value2 = 3
def calledRepeatedly():
# Force frame and eliminate forward propagation (currently).
module_value1
# Make sure we have a local variable x anyway
s = 2
local_value = module_value1
s *= module_value1
# construct_begin
s *= 1000
# construct_end
s *= module_value2
return s
for x in xrange(50000):
calledRepeatedly()
print("OK.")
|
[
"kay.hayen@gmail.com"
] |
kay.hayen@gmail.com
|
88ff6e12447e7c702d0ed437b186edb4560eebfe
|
9c2e0c84f9beac4c942cf05ac65a15253058f8ea
|
/tests/test_cffi.py
|
eddf079fd50532c9f0e80111a79a1aa5ba07c311
|
[
"BSD-3-Clause"
] |
permissive
|
smile-luobin/python-zstandard
|
881af6eda22254dbaa22eef9b9f930ddfb97c6e0
|
ca0199a2e29145111cee8af60385e24d776413cc
|
refs/heads/master
| 2021-01-11T09:08:41.664687
| 2016-12-19T17:10:25
| 2016-12-19T17:10:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
import io
try:
import unittest2 as unittest
except ImportError:
import unittest
import zstd
try:
import zstd_cffi
except ImportError:
raise unittest.SkipTest('cffi version of zstd not available')
class TestCFFIWriteToToCDecompressor(unittest.TestCase):
def test_simple(self):
orig = io.BytesIO()
orig.write(b'foo')
orig.write(b'bar')
orig.write(b'foobar' * 16384)
dest = io.BytesIO()
cctx = zstd_cffi.ZstdCompressor()
with cctx.write_to(dest) as compressor:
compressor.write(orig.getvalue())
uncompressed = io.BytesIO()
dctx = zstd.ZstdDecompressor()
with dctx.write_to(uncompressed) as decompressor:
decompressor.write(dest.getvalue())
self.assertEqual(uncompressed.getvalue(), orig.getvalue())
|
[
"gregory.szorc@gmail.com"
] |
gregory.szorc@gmail.com
|
54be96328bf4d1ff35e0f3ba31e9309c38bc8a35
|
839a2babf0f74f5ab45aedfedb527613935ae19a
|
/content/blog/2022-09-15-data-structures-and-algorithms-cheatsheet/py/heap_mine_test.py
|
cbd63f828addc063872a438e35f686d86a64d0a3
|
[
"MIT"
] |
permissive
|
9oelM/9oelM.github.io
|
84eb41e7c748620a943a742ed7d7d192b006a75e
|
f001ace931d2963a39045c980d192fd7b8f7dd91
|
refs/heads/dev
| 2023-08-30T20:02:11.473984
| 2023-08-22T02:35:41
| 2023-08-22T02:35:41
| 105,492,752
| 4
| 1
|
MIT
| 2023-02-09T00:45:01
| 2017-10-02T02:56:12
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
import copy
import heapq
from heap_mine import MinHeap
my_heap = MinHeap()
def compare_and_check(a, b):
if a != b:
raise Exception(f"not same: {a} and {b}")
def test():
for arr in [
[9,8,7,6,5,4,3,214,124,51,516,7,48,8,345869,8,16,6176,16,78,71],
[1],
[2,1],
[1,1],
[1,2],
[1,2,3],
[3,2,1,2,3,4],
[3,2,1,2,3,4,-1,-100,15,161,16666,1,-555,-1],
[1],
[-1],
[5,3,4,2,3,6],
[-1,-2,-3,-4,-5,-10,-15,1,2,4,7,9,1,3,2,1,2,3,4,-1,-100,15,161,16666,1,-555,-1]
]:
test_arr = copy.deepcopy(arr)
my_heap.heapify(test_arr)
heapq.heapify(arr)
compare_and_check(arr, test_arr)
for num in [0, 100, 500, -5, -100,15,16,734,1651,617,89,9,1612738,3789]:
my_heap.push(test_arr, num)
heapq.heappush(arr, num)
compare_and_check(arr, test_arr)
compare_and_check(heapq.nsmallest(1, arr)[0], my_heap.peek(test_arr))
for _ in range(10):
a = heapq.heappop(arr)
b = my_heap.pop(test_arr)
compare_and_check(a, b)
compare_and_check(arr, test_arr)
compare_and_check(heapq.nsmallest(1, arr)[0], my_heap.peek(test_arr))
print("Pass")
test()
|
[
"hj923@hotmail.com"
] |
hj923@hotmail.com
|
2b17d9d8ff94ed08700f722f676a4dba21eb7587
|
15afc6a3270d9b42cc84a788853ce46456be01f2
|
/section_ii/project_a/chapter_12/example_5/game_functions.py
|
b4fef3ea991ce9d343994785e415a8b4da160af9
|
[] |
no_license
|
xieqing0428/python_helloworld
|
161c90564638dc49e3a82a00607a762b36a39212
|
e08f63616aabe609ff1ac53b8e0ab32eaf2a472b
|
refs/heads/master
| 2020-04-16T11:01:37.918248
| 2019-02-14T07:19:09
| 2019-02-14T07:19:09
| 165,521,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,787
|
py
|
# -*- coding:utf-8 -*-
"""
@author: Alessa0
@file: game_functions.py
@time: 2019-01-19 17:29
"""
import sys
import pygame
from python_helloworld.section_ii.project_a.chapter_12.example_5.bullet \
import Bullet
def check_events(ufo_settings, screen, ufo, bullets):
"""检查按键事件"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ufo_settings, screen, ufo, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ufo)
def fire_bullet(ufo_settings, screen, ufo, bullets):
"""开火"""
if len(bullets) < ufo_settings.bullet_allowed:
new_bullet = Bullet(ufo_settings, screen, ufo)
bullets.add(new_bullet)
def check_keydown_events(event, ufo_settings, screen, ufo, bullets):
"""按键"""
if event.key == pygame.K_UP:
ufo.moving_up = True
if event.key == pygame.K_DOWN:
ufo.moving_down = True
if event.key == pygame.K_SPACE:
fire_bullet(ufo_settings, screen, ufo, bullets)
def check_keyup_events(event, ufo):
"""起键"""
if event.key == pygame.K_UP:
ufo.moving_up = False
if event.key == pygame.K_DOWN:
ufo.moving_down = False
def update_screen(ufo_settings, screen, ufo, bullets):
"""更新屏幕"""
screen.fill(ufo_settings.bg_color)
# 更新子弹位置
for bullet in bullets.sprites():
bullet.draw_bullet()
ufo.blitme()
# 绘制屏幕
pygame.display.flip()
def update_bullet(ufo, bullets):
"""过界子弹消失"""
bullets.update()
for bullet in bullets.copy():
if bullet.rect.left >= ufo.screen_rect.right:
bullets.remove(bullet)
|
[
"849565690@qq.com"
] |
849565690@qq.com
|
a81d92c2056b58b1664cc2b35d3227bbf00f6719
|
d64b15f01374254c6628abaa0535186738dca6d3
|
/SSVI/SSVI_TF_d.py
|
2f08fc4b902b68f0fe0d012754c5086f82776c80
|
[] |
no_license
|
dnguyen1196/SSVI-tensor-factorization
|
b3fa82c8967eb5a415c9d29dc2e290535ee8f41f
|
e371094de77bfa866d411bd84a7e65463e5a96f9
|
refs/heads/master
| 2020-03-15T14:37:25.008000
| 2018-05-17T16:02:06
| 2018-05-17T16:02:06
| 132,193,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,751
|
py
|
import Probability.ProbFun as probs
import numpy as np
from numpy.linalg import inv
from numpy.linalg import norm
import time
"""
SSVI_TF.py
SSVI algorithm to learn the hidden matrix factors behind tensor
factorization
"""
class H_SSVI_TF_2d():
def __init__(self, model, tensor, rank, rho_cov, k1=1, k2=10, scheme="adagrad", batch_size=5):
"""
:param model:
:param tensor:
:param rank:
:param rho_cov:
:param k1:
:param k2:
:param scheme:
"""
self.model = model
self.tensor = tensor
self.D = rank
self.report = 1
self.size_per_dim = tensor.dims # dimension of the tensors
self.order = len(tensor.dims) # number of dimensions
self.rho_cov = rho_cov
# Get prior mean and covariance
# self.pmu, self.pSigma = self.model.p_prior.find(0, 0)
# self.pmu = [np.ones((self.D, )) for _ in len(self.size_per_dim)]
self.pmu = np.ones((self.D,))
self.pSigma = [1 for _ in self.size_per_dim]
self.likelihood_type = model.p_likelihood.type
if self.likelihood_type == "normal":
self.link_fun = lambda m : m
elif self.likelihood_type == "bernoulli":
self.link_fun = lambda m : 1. /(1 + np.exp(-m))
elif self.likelihood_type == "poisson":
self.link_fun = lambda m : np.log(1 + np.exp(-m))
# optimization scheme
self.opt_scheme = scheme
# Stochastic optimization parameters
self.batch_size = batch_size
self.iterations = 5000
self.k1 = k1
self.k2 = k2
# Optimization scheme
if scheme == "adagrad":
# adagrad parameters
self.offset = 0.0001
self.ada_acc_grad = [np.zeros((self.D, s)) for s in self.size_per_dim]
self.eta = 1
elif scheme == "schaul":
# schaul-like update window width
self.window_size = 5
self.eta = 1
self.recent_gradients\
= [[np.zeros((self.D, self.window_size)) for _ in range(s)] for s in self.size_per_dim]
self.recent_gradients_sum \
= [[np.zeros((self.D, self.window_size)) for _ in range(s)] for s in self.size_per_dim]
self.cur_gradient_pos = [[0 for _ in range(s)] for s in self.size_per_dim]
elif scheme == "adadelta":
# adadelta parameters
self.gamma = 0.1
self.offset = 0.0001
self.alpha = 1
self.g_t = [[np.zeros((self.D,)) for _ in range(s)] for s in self.size_per_dim]
self.s_t = [[np.zeros((self.D,)) for _ in range(s)] for s in self.size_per_dim]
# nesterov accelerated gradient
self.momentum = 0.9
self.delta_theta_t = [[np.zeros((self.D,)) for _ in range(s)] for s in self.size_per_dim]
def factorize(self):
"""
factorize
:return: None
Doing round robin updates for each column of the
hidden matrices
"""
update_column_pointer = [0] * self.order
start = time.time()
# while self.check_stop_cond():
for iteration in range(self.iterations):
current = time.time()
if iteration != 0 and iteration % self.report == 0:
print ("iteration: ", iteration, " - test error: ", \
self.evaluate_test_error(), " - train error: ", self.evaluate_train_error(), " - time: ", current - start)
for dim in range(self.order):
col = update_column_pointer[dim]
# Update the natural params of the col-th factor
# in the dim-th dimension
self.update_natural_params(dim, col)
self.update_hyper_parameter(dim)
# Move on to the next column of the hidden matrices
for dim in range(self.order):
update_column_pointer[dim] = (update_column_pointer[dim] + 1) \
% self.size_per_dim[dim]
def update_natural_params(self, dim, i):
"""
:param i:
:param dim:
:return:
"""
observed_i = self.tensor.find_observed_ui(dim, i)
# print(observed_i)
if len(observed_i) > self.batch_size:
observed_idx = np.random.choice(len(observed_i), self.batch_size, replace=False)
observed_i = np.take(observed_i, observed_idx, axis=0)
M = len(observed_i)
(m, S) = self.model.q_posterior.find(dim, i)
Di_acc = np.zeros((self.D, self.D))
di_acc = np.zeros((self.D, ))
for entry in observed_i:
coord = entry[0]
y = entry[1]
(di_acc_update, Di_acc_update) = self.estimate_di_Di(dim, i, coord, y, m, S)
Di_acc += Di_acc_update
di_acc += di_acc_update
# Update covariance parameter
rhoS = self.rho_cov
covGrad = (1./self.pSigma[dim] * np.eye(self.D) - 2 * Di_acc)
S = inv((1-rhoS) * inv(S) + rhoS * covGrad)
# Update mean parameter
meanGrad = (np.inner(1./self.pSigma[dim] * np.eye(self.D), self.pmu - m) + di_acc)
update = self.compute_update_mean_param(dim, i, m, meanGrad)
m = np.add(update, m)
self.model.q_posterior.update(dim, i, (m, S))
def compute_update_mean_param(self, dim, i, m, mGrad):
if self.opt_scheme == "adagrad":
self.ada_acc_grad[dim][:, i] += np.multiply(mGrad, mGrad)
return self.eta / np.sqrt(self.ada_acc_grad[dim][:, i]) * mGrad
elif self.opt_scheme == "schaul":
current_grad_pos = self.cur_gradient_pos[dim][i]
self.recent_gradients[dim][i][: , current_grad_pos] = mGrad
self.cur_gradient_pos[dim][i] = (self.cur_gradient_pos[dim][i] + 1) % self.window_size
recent_gradients = self.recent_gradients[dim][i]
recent_gradients_squared = np.square(recent_gradients)
recent_gradients_sum = np.sum(recent_gradients, 1)
expected_squares_gradient = np.sum(recent_gradients_squared, 1)
return self.eta / np.sqrt(expected_squares_gradient) * mGrad
elif self.opt_scheme == "adadelta":
g_0 = self.g_t[dim][i]
s_0 = self.s_t[dim][i]
# Update g_t
g_t = (1. - self.gamma) * np.square(mGrad) + self.gamma * g_0
self.g_t[dim][i] = g_t
# Compute gradient update
delta_theta_t = self.alpha * \
np.divide(np.sqrt(np.add(s_0, self.offset)), \
np.sqrt(np.add(g_t, self.offset))) * mGrad
# Update s_t
self.s_t[dim][i] = (1 - self.gamma) * np.square(delta_theta_t) + self.gamma * s_0
# Update deltaTheta_t
self.delta_theta_t[dim][i] = delta_theta_t
return delta_theta_t
def estimate_di_Di(self, dim, i, coord, y, m, S):
"""
:param dim:
:param i:
:param coord:
:param y:
:return:
"""
othercols = coord[: dim]
othercols.extend(coord[dim + 1 :])
alldims = list(range(self.order))
otherdims = alldims[:dim]
otherdims.extend(alldims[dim + 1 : ])
di = np.zeros((self.D, ))
Di = np.zeros((self.D, self.D))
for k1 in range(self.k1):
ui = self.sample_uis(othercols, otherdims)
meanf = np.dot(ui, m)
covS = np.dot(ui, np.inner(S, ui))
Expected_fst_derivative, Expected_snd_derivative = \
self.compute_expected_first_snd_derivative(y, meanf, covS)
di += ui * Expected_fst_derivative/self.k1 # Update di
Di += np.outer(ui, ui) * Expected_snd_derivative/(2*self.k1) # Update Di
return di, Di
def update_hyper_parameter(self, dim):
"""
:param dim:
:return:
"""
sigma = 0.0
M = self.size_per_dim[dim]
for j in range(M):
m, S = self.model.q_posterior.find(dim, j)
sigma += np.trace(S) + np.dot(m, m)
self.pSigma[dim] = sigma/(M*self.D)
def compute_expected_first_snd_derivative(self, y, meanf, covS):
first_derivative = 0.0
snd_derivative = 0.0
s = self.model.p_likelihood.params
for k2 in range(self.k2):
f = probs.sample(self.likelihood_type, (meanf, covS))
snd_derivative += probs.snd_derivative(self.likelihood_type, (y, f, s))
first_derivative += probs.fst_derivative(self.likelihood_type, (y, f, s))
return first_derivative/self.k2, snd_derivative/self.k2
def compute_expected_uis(self, othercols, otherdims):
uis = np.ones((self.D,))
for dim, col in enumerate(othercols):
# Sample from the approximate posterior
(mi, Si) = self.model.q_posterior.find(otherdims[dim], col)
uis = np.multiply(uis, mi)
return uis
def sample_uis(self, othercols, otherdims):
uis = np.ones((self.D,))
for dim, col in enumerate(othercols):
# Sample from the approximate posterior
(mi, Si) = self.model.q_posterior.find(otherdims[dim], col)
uj_sample = probs.sample("multivariate_normal", (mi, Si))
uis = np.multiply(uis, uj_sample)
return uis
def check_stop_cond(self):
"""
:return: boolean
Check for stopping condition
"""
return
def evaluate_train_error(self):
"""
:return:
"""
error = 0.0
for i, entry in enumerate(self.tensor.train_entries):
predict = self.predict_entry(entry)
correct = self.tensor.train_vals[i]
if self.likelihood_type == "normal":
error += np.abs(predict - correct)/abs(correct)
elif self.likelihood_type == "bernoulli":
error += 1 if predict != correct else 0
else:
return 0
return error/len(self.tensor.train_vals)
def evaluate_test_error(self):
"""
:return:
"""
error = 0.0
for i, entry in enumerate(self.tensor.test_entries):
predict = self.predict_entry(entry)
correct = self.tensor.test_vals[i]
if self.likelihood_type == "normal":
error += np.abs(predict - correct)/abs(correct)
elif self.likelihood_type == "bernoulli":
error += 1 if predict != correct else 0
else:
return 0
return error/len(self.tensor.test_vals)
def predict_y_given_m(self, m):
if self.likelihood_type == "normal":
return m
elif self.likelihood_type == "poisson":
f = self.link_fun(m)
#TODO: implement
return 1
elif self.likelihood_type == "bernoulli":
# print(m)
f = self.link_fun(m)
return 1 if m >= 0.5 else -1
else:
raise Exception("Unidentified likelihood type")
def compute_expected_count(self, hermite_weights):
return
def compute_gauss_hermite(self, f, n):
return
def predict_entry(self, entry):
u = np.ones((self.D,))
for dim, col in enumerate(entry):
m, S = self.model.q_posterior.find(dim, col)
u = np.multiply(u, m)
m = np.sum(u)
return self.predict_y_given_m(m)
|
[
"ducnguyenmanh96@gmail.com"
] |
ducnguyenmanh96@gmail.com
|
3f45d74e68ce58dcac1bcf428cac778127060fec
|
886005f0847c094dec01b41769475e1e966b8b7f
|
/leetcode/1051. Height Checker.py
|
117394356b153ba84d8dd3c2e53166a902caf255
|
[] |
no_license
|
alex-radchenko-github/codewars-and-leetcode
|
e4d55c5e6c5279a2cada5eb9e04ad1183ed36087
|
20a56eb4753a9ead190a7c529f2c06c99d64f325
|
refs/heads/main
| 2023-01-02T22:45:38.878716
| 2020-11-03T04:49:59
| 2020-11-03T04:49:59
| 307,790,531
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
def heightChecker(heights):
return len(list(filter(lambda x: x[0]!=x[1], list(zip(heights, sorted(heights))))))
# return list(zip(heights, sorted(heights)))
print(heightChecker([1,1,4,2,1,3]))
print(heightChecker([5,1,2,3,4]))
|
[
"48209253+radchenko511@users.noreply.github.com"
] |
48209253+radchenko511@users.noreply.github.com
|
97b29febcb2be2ac2dafb2215f884fec355564c1
|
747b5ad3163d745d2ebe72b473e1346554a94045
|
/utilities/Augmentation.py
|
4414fa70e3b7795dc2f922074e1d4dd7af8efad4
|
[] |
no_license
|
bibliotecadebabel/EvAI
|
d5227db96341cf8bdf44d2c23ae6b2987aab6a5f
|
2beeaf68e28c86aedb9fb6d3cb80753676d85933
|
refs/heads/master
| 2023-04-09T17:14:55.039442
| 2021-04-15T20:44:32
| 2021-04-15T20:44:32
| 208,825,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,733
|
py
|
import numpy as np
import torch
ENABLE_EXTRA = False
class Ricap():
def __init__(self, beta):
self.__beta = beta
self.__w = {}
self.__c = {}
# Autor = 4ui_iurz1 (2019)
# Repositorio de Github = https://github.com/4uiiurz1/pytorch-ricap
def doRicap(self, inputs, target, cuda=True):
I_x, I_y = inputs.size()[2:]
w = int(np.round(I_x * np.random.beta(self.__beta, self.__beta)))
h = int(np.round(I_y * np.random.beta(self.__beta, self.__beta)))
w_ = [w, I_x - w, w, I_x - w]
h_ = [h, h, I_y - h, I_y - h]
cropped_images = {}
c_ = {}
W_ = {}
for k in range(4):
idx = torch.randperm(inputs.size(0))
x_k = np.random.randint(0, I_x - w_[k] + 1)
y_k = np.random.randint(0, I_y - h_[k] + 1)
cropped_images[k] = inputs[idx][:, :, x_k:x_k + w_[k], y_k:y_k + h_[k]]
if cuda == True:
c_[k] = target[idx].cuda()
else:
c_[k] = target[idx]
W_[k] = w_[k] * h_[k] / (I_x * I_y)
self.__c = c_
self.__w = W_
patched_images = torch.cat(
(torch.cat((cropped_images[0], cropped_images[1]), 2),
torch.cat((cropped_images[2], cropped_images[3]), 2)),
3)
return patched_images
# Autor = 4ui_iurz1 (2019)
# Repositorio de Github = https://github.com/4uiiurz1/pytorch-ricap
def generateLoss(self, layer):
parent_layer = layer.node.parents[0].objects[0]
output = parent_layer.value
loss = sum([self.__w[k] * layer.object(output, self.__c[k]) for k in range(4)])
return loss
|
[
"felixmorales@github.com"
] |
felixmorales@github.com
|
c0c172189cdab047727594e3f9156a35dd596250
|
1d825df911d2b0a4cc36b26da9a8883eed9de8ec
|
/lib/utilities.py
|
2605f7d86b54724839568903680e797c0ca2b32c
|
[] |
no_license
|
sinofeng/wpi-svm
|
74793e91ac5330a3665caa71d44b36931fc59f0e
|
5ad078a9a3475eb6a3fe3efd12e8fca39e4857ef
|
refs/heads/master
| 2021-03-17T22:02:54.675868
| 2012-10-27T13:44:37
| 2012-10-27T13:44:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
# -*- coding: utf-8 -*-
import re
class Logger(object):
def __init__(self, file_path):
self.log_h = open(file_path, 'wb')
def log(self, line):
print line
self.log_h.write(line+'\n')
def close(self):
self.log_h.close()
def parse_field(dict, key):
""" Simple dict wrapper
dict: name of dict object
key: name of key
Return: dict[key] or None
"""
try:
value = dict[key]
except KeyError:
value = None
return value
def remove_url_prefix(url):
""" Remove the prefix of url
url: input url string
"""
url_regex = re.compile(r"^(\w+:?//)?(.*)$", re.IGNORECASE)
url_match = url_regex.match(url)
if url_match:
url = url_match.group(2)
return url
def search_url(turl, url_list):
trul2 = remove_url_prefix(turl)
for url in url_list:
if trul2 == remove_url_prefix(url):
return True
return False
def cmp_url(u1, u2, mode = 'strict'):
if mode == 'strict':
if remove_url_prefix(u1) == remove_url_prefix(u2):
return True
return False
elif mode == 'loose':
urlre = re.compile(r'^(\w+://)?([^#\?]+)#?\??')
match_u1 = urlre.match(u1)
match_u2 = urlre.match(u2)
if match_u1 and match_u2:
if match_u1.group(2) == match_u2.group(2):
return True
return False
else:
return cmp_url(u1, u2, 'strict')
def main():
pass
if __name__ == '__main__':
main()
|
[
"chen_xm@sjtu.edu.cn"
] |
chen_xm@sjtu.edu.cn
|
73b427dd65269dfe448cdf4dd9b71647982cd4cd
|
14289b4de03edaf950fdae17536c1b04f9d0874d
|
/python-course/day-05/code/10.读取文件的位置.py
|
8889ca90e85e764c1d4fa23ac436405328940e45
|
[] |
no_license
|
piggy1024/python-learn
|
cc03c1302a707c48d616c9a837d9b3d49e07bf15
|
2d54e56dfe7b5c2ca90ae1dc846d8ea4774a7629
|
refs/heads/main
| 2023-01-02T00:27:09.822020
| 2020-10-24T01:50:38
| 2020-10-24T01:50:38
| 306,784,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# 10.读取文件的位置.py
# seek(参数1,参数2) 参数1是要切换到哪里
# 参数2:
# 0 从头计算 ,默认值
# 1 从当前位置计算
# 2 从最后位置开始计算
# 一个中文三个字节
with open(r'../demo.txt','rb') as file_obj:
print(file_obj.seek(-10,2))
print(file_obj.read())
print('当前读取到哪儿:',file_obj.tell())
|
[
"1293266846@qq.com"
] |
1293266846@qq.com
|
1b5b8f50a106043c4e3d113b9d076523c1060bb4
|
356bdbebb47c1acc7122a8275ebb3f01771de785
|
/mcb.pyw
|
9f04d67abea79c6c8e4bc94bace3f6825816da08
|
[] |
no_license
|
rawbsrn/Automating-Python
|
e178b6a33f29e0332aa25605134b439da1089748
|
e4635aa0609d1fd07154e9cfad6ed574c2bdb9df
|
refs/heads/main
| 2023-07-30T07:39:38.641260
| 2021-09-17T00:55:24
| 2021-09-17T00:55:24
| 399,982,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
pyw
|
#! python3
#mcb.pyw - Save & load pieces of text to the clipboard
# Usage: py.exe mcb.pyw save <keyword> - Saves clipboard to keyword.
# py.exe mcb.pyw <keyword> - Loads keyword to clipboard.
# py.exe mcb.pyw list - Loads all keywords to clipboard.
# py.exe mcb.pyw delete <keyword> - delete specific keyword from list
import shelve, pyperclip, sys
mcbShelf = shelve.open('mcb')
#Save clipboard content
if len(sys.argv) == 3 and sys.argv[1].lower() == 'save':
mcbShelf[sys.argv[2]] = pyperclip.paste()
elif len(sys.argv) == 3 and sys.argv[1].lower() == 'delete':
del mcbShelf[sys.argv[2]]
elif len(sys.argv) == 2:
if sys.argv[1].lower() == 'list':
pyperclip.copy(str(list(mcbShelf.keys())))
elif sys.argv[1].lower() == 'delete':
for key in mcbShelf.keys():
del mcbShelf[key]
elif sys.argv[1] in mcbShelf:
pyperclip.copy(mcbShelf[sys.argv[1]])
mcbShelf.close()
|
[
"89554888+rawbsrn@users.noreply.github.com"
] |
89554888+rawbsrn@users.noreply.github.com
|
412e62dd7a5a5818adb3d0a2a10997b21864d00a
|
40efb62bc628ad3de7534051feccb97b6427e53a
|
/Homework/Python/ILPLarge/Q1.py
|
7247d04b51c3fa9c1d57729f3255704a51ff1488
|
[] |
no_license
|
nguyntyler/DigitalCrafts-Class
|
f1de5865fcc7dd0ed26429bd87f84ebc07797a97
|
ec275667c6a4f8823951fa1f69b596dea0981bd6
|
refs/heads/master
| 2023-02-14T18:48:11.607893
| 2021-01-10T00:16:44
| 2021-01-10T00:16:44
| 303,833,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
x = range(1, 101)
for i in x:
print(i * (i + 1) // 2)
# This is true or should be
|
[
"nguyn.tyler@gmail.com"
] |
nguyn.tyler@gmail.com
|
5fd75c72f34d71f069aca48047fd059fac481dc1
|
ab3abc31e6c4c292088d390355844f04f46b9141
|
/src/detection_video.py
|
be5b6268ab6d577327b2a557b693c76953954ff1
|
[] |
no_license
|
zhikiat62/face-mask-detect
|
98a860147f6af9e7165ac7c732cb842e054cdf89
|
1ae132b38c4e24c2ed2033fdf341ea96dab7d89c
|
refs/heads/main
| 2023-06-02T13:59:23.174843
| 2021-06-18T10:02:58
| 2021-06-18T10:02:58
| 373,211,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,179
|
py
|
# import the necessary packages
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import numpy as np
import argparse
import imutils
import time
import cv2
import os
def detect_and_predict_mask(frame, faceNet, maskNet, args):
# grab the dimensions of the frame and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
faceNet.setInput(blob)
detections = faceNet.forward()
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
preds = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > args["confidence"]:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
# add the face and bounding boxes to their respective
# lists
faces.append(face)
locs.append((startX, startY, endX, endY))
# only make a predictions if at least one face was detected
if len(faces) > 0:
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
faces = np.array(faces, dtype="float32")
preds = maskNet.predict(faces, batch_size=35)
# return a 2-tuple of the face locations and their corresponding
# locations
return (locs, preds)
def main():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--face", type=str,
default="face_detect_model",
help="path to face detector model directory")
ap.add_argument("-m", "--model", type=str,
default="mask_detect_model",
help="path to trained face mask detector model")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
#change current working directory to the previous one
os.chdir("..")
# load our serialized face detector model from disk
print("[INFO] loading face detector model...")
prototxtPath = os.path.join(os.getcwd(), args["face"] , "deploy.prototxt")
weightsPath = os.path.join(os.getcwd(), args["face"] ,"res10_300x300_ssd_iter_140000.caffemodel")
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
print("[INFO] loading face mask detector model...")
maskModelPath = os.path.join(os.getcwd(), args["model"] , "mask-detector-model.model")
maskNet = load_model(maskModelPath)
# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=400)
# detect faces in the frame and determine if they are wearing a
# face mask or not
(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet, args)
# loop over the detected face locations and their corresponding
# locations
for (box, pred) in zip(locs, preds):
# unpack the bounding box and predictions
(startX, startY, endX, endY) = box
(withMask, withMaskIncorrect, withoutMask) = pred
# determine the class label and color we'll use to draw
# the bounding box and text
label = "Mask" if withMask > withoutMask and withMask > withMaskIncorrect else "Incorrect Mask" if withMaskIncorrect > withMask and withMaskIncorrect > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 255, 255) if label == "Incorrect Mask" else (0,0,255)
# include the probability in the label
label = "{}: {:.2f}%".format(label, max(withMask, withoutMask, withMaskIncorrect) * 100)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(frame, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
zhikiat62.noreply@github.com
|
469da1f8198f033b07ed962c63dd9cb8ed6ffa3d
|
3c73527b7553869700e43877ff4c16260d4a0b69
|
/algorithm/binary_tree.py
|
0ab38b84ed516097aab123b2ee4cd61a6ce23138
|
[] |
no_license
|
jeklen/programmingExercise
|
09dd5b4ff0733b1994fc6d3a79ca6ea2902c2629
|
50d9472d8b817653c7af94cd74dec25e99ae4de0
|
refs/heads/master
| 2021-06-05T12:18:26.758662
| 2016-10-24T08:00:54
| 2016-10-24T08:00:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,506
|
py
|
class Node:
"""
Tree node: left and right child + data which can be any object
"""
def __init__(self, data):
"""
Node constructor
@param data node data object
"""
self.left = None
self.right = None
self.data = data
def insert(self, data):
"""
Insert new node with data
@param data node data object to insert
"""
if self.data:
if data < self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
elif data > self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
def lookup(self, data, parent=None):
"""
Lookup node containing data
@param data node data object to look up
@param parent node's parent
@returns node and node's parent if found or None, None
"""
if data < self.data:
if self.left is None:
return None, None
return self.left.lookup(data, self)
elif data > self.data:
if self.right is None:
return None, None
return self.right.lookup(data, self)
else:
return self, parent
def delete(self, data):
"""Delete node containing data
@param data node's content to delete
"""
# get node containing data
node, parent = self.lookup(data)
if node is not None:
children_count = node.children_count()
if children_count == 0:
if parent:
if parent.left is node:
parent.left = None
else:
parent.right = None
del node
else:
self.data = None
elif children_count == 1:
# if children has 1 child
# replace node with its child
if node.left:
n = node.left
else:
n = node.right
if parent:
if parent.left is node:
parent.left = n
if parent.right is node:
parent.left = n
del node
else:
self.left = n.left
self.right = n.right
self.data = n.data
else:
# if node has 2 children
# find its successor
parent = node
successor = node.right
while successor.left:
parent = successor
successor = successor.left
# replace node data by its successor data
node.data = successor.data
# fix successor's parent's child
if parent.left == successor:
parent.left = successor.right
else:
parent.right = successor.right
def children_count(self):
"""
Returns the number of children
@returns number of children: 0, 1, 2
"""
cnt = 0
if self.left:
cnt += 1
if self.right:
cnt += 1
return cnt
def print_tree(self):
"""
Print tree content inorder
"""
if self.left:
self.left.print_tree()
print self.data,
if seft.right:
self.right.print_tree()
def compare_trees(self, node):
"""
Compare 2 trees
@param node tree's root node to compare to
@returns True if the tree passed is identical to this tree
"""
if node is None:
return False
if self.data != node.data:
return False
res = True
if self.left is None:
if node.left:
return False
else:
res = self.left.compare_trees(node.left)
if res is False:
return False
if self.right is None:
if node.right:
return False
else:
res = self.right.compare_trees(node.right)
return res
def tree_data(self):
"""
Generator to get tree nodes data
"""
# we use a stack to traverse the tree in a non-recursive way
stack = []
node = self
while stack or node
|
[
"zhql0907@outlook.com"
] |
zhql0907@outlook.com
|
af52d3bbef2942a8ee05414948f62f44ad230416
|
a9c29d5e0eb3593d4d02ee92317721bf0f8416c4
|
/keyboards/inline/__init__.py
|
f158e699b1f87b2790dad87e24d7d4a72cf81e25
|
[] |
no_license
|
Basilo142/aiobot
|
25c11b7a16fbea644dce1b3c1aff384dd91d0673
|
2506d83eab308afb03eaa7f133871fa1f90b3969
|
refs/heads/master
| 2023-08-24T07:12:01.579463
| 2021-11-05T16:34:41
| 2021-11-05T16:34:41
| 407,207,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52
|
py
|
from .key_inline import inline_buttons, drugaya_key
|
[
"diadiun@bitbucket.org"
] |
diadiun@bitbucket.org
|
827f9e02239c8b99b08743da6e5b2535b09bcdf9
|
0c5abd937d2e9bce0a5cd5b437b1e0bfc244ceb6
|
/common/utils.py
|
a82588d7c61fe3e3080dfa3962ec3d044270c734
|
[] |
no_license
|
pda-gb/Client-server_applications_2.BD_and_PyQT
|
2787000094b027978cd359cbc59acdf2a9f3d4c6
|
681c7bdaf83fa4905a6167d10d2aace774088a3b
|
refs/heads/master
| 2023-02-22T21:47:21.265299
| 2021-01-22T01:39:42
| 2021-01-22T01:39:42
| 331,393,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
"""Утилиты"""
import json
from common.variables import MAX_PACKAGE_LENGTH, ENCODING
def get_message(_sock):
"""
Приём сообщения, декодирование из байт, если ошибка, выдать текст ошибки
"""
response_as_byte = _sock.recv(MAX_PACKAGE_LENGTH)
if response_as_byte != b'':
if isinstance(response_as_byte, bytes):
response_as_json = response_as_byte.decode(ENCODING)
response = json.loads(response_as_json)
if isinstance(response, dict):
return response
raise ValueError
raise ValueError
else:
# Клиент в режиме listen, после приёма сообщения юзера, будет
# получать - '', в результате вывалится ошибка JSONDecodeError
pass
def send_message(_sock, _message_dict):
"""Кодирует в байты и отправляет сообщение"""
message_as_json = json.dumps(_message_dict)
message_as_byte = message_as_json.encode(ENCODING)
_sock.send(message_as_byte)
|
[
"pda.prostor@gmail.com"
] |
pda.prostor@gmail.com
|
3f3c6507befaaa43634a91d4c7e360ccb848abf3
|
0166fe426b2dc36e974a26f8b406094c67550f46
|
/Broswer-Interaction/Executables/login_enabled_CT.command
|
796bf1000a7cfe5ea6f6393ac135c307d5ef47cb
|
[] |
no_license
|
DreadArceus/Automation
|
693ff18c2f47c85741ed1bc2b1337a93bdde150e
|
98736cc933de0c9e498c32b0e4ff320affaf70cc
|
refs/heads/master
| 2023-03-31T00:22:41.400544
| 2021-04-09T03:21:01
| 2021-04-09T03:21:01
| 291,393,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
command
|
#!/usr/bin/env python3
import time
import webbrowser
from selenium import webdriver
from bs4 import BeautifulSoup
driver = webdriver.Safari()
driver.get('https://erp.lnmiit.ac.in/ugadm/Dashboard.aspx')
soupI = BeautifulSoup()
while True:
if driver.current_url != 'https://erp.lnmiit.ac.in/ugadm/Dashboard.aspx':
login = driver.find_elements_by_class_name('form-control')
login[0].send_keys('LNMLZVLC')
login[1].send_keys('MPPYLBR7UA')
button = driver.find_element_by_name('Button1')
button.click()
print('login successful')
soupN = BeautifulSoup(driver.page_source, 'lxml')
if soupI == BeautifulSoup():
soupI = soupN
if str(soupI).find('Second') != str(soupN).find('Second'):
print('IT\'S CHANGED')
webbrowser.open(driver.current_url)
soupI = soupN
time.sleep(30)
driver.refresh()
driver.quit()
|
[
"dcmtalwar@gmail.com"
] |
dcmtalwar@gmail.com
|
66153ba19dfeb3194214bb436dfa73f2c1b4926d
|
84197b3cb75d2c2ea8ba40252e146375941b077a
|
/flask_app/__init__.py
|
8259943fd7d955ba3c6b6a07d549483f6c19bb66
|
[] |
no_license
|
bkhurley/beat_the_crowd
|
aa5451cb5fddffc904ad211b5fe7168656a42664
|
f291038e77dbd158dcfb156e01655f2ddfecd436
|
refs/heads/master
| 2021-05-11T21:43:04.294702
| 2018-05-17T17:36:46
| 2018-05-17T17:36:46
| 117,475,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
from flask import Flask
app = Flask(__name__)
from flask_app import views2
|
[
"hurley.brian@gmail.com"
] |
hurley.brian@gmail.com
|
c14f7e2a3991057490ce8edbd390f6aea7335c0d
|
a1c6b5467a9c1dddeffd66ab3f415b9dbdcb56e9
|
/COVIDIC/manage.py
|
ceec6756d6103201d803e41c219c41dcc03f79dd
|
[] |
no_license
|
hanpikeu/Covidic-Frontends
|
9569267d09866db106d38f6121c75d00e5157df3
|
a565a1713bcbafa9542851ea9b622bd29acc98fc
|
refs/heads/master
| 2021-04-01T07:57:44.627392
| 2020-03-18T16:58:01
| 2020-03-18T16:58:01
| 248,170,555
| 0
| 0
| null | 2020-03-18T07:57:01
| 2020-03-18T07:57:00
| null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'COVIDIC.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"viasta20311@gmail.com"
] |
viasta20311@gmail.com
|
c5a8b2ac1773693bce5629c33839801ad745a20c
|
4888923eb948d61b54d87750c9e4810e5b6dc7df
|
/backend/venv/bin/isort
|
eb426718aef2de9b32464aea804666524a5eb79c
|
[] |
no_license
|
EricRobertCampbell/auth0-poc
|
5305f8bee8408d84e174c9b593053581f555901c
|
0d5718760d47777ccabca2fdc67e689cd37ba81b
|
refs/heads/master
| 2022-12-30T02:52:03.962759
| 2020-10-16T23:04:05
| 2020-10-16T23:04:05
| 304,756,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
#!/home/eric/documents/auth0-poc/backend/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"eric.robert.campbell@gmail.com"
] |
eric.robert.campbell@gmail.com
|
|
1652435f5d485a9de09c979c03c31ba7d17307fc
|
ab4f0df599159b2c3929c24b12e2766718efdacb
|
/cubepy/index.py
|
3787946118858decc7656993b76f715643b3cacf
|
[
"MIT"
] |
permissive
|
jorgedouglas71/pyplan-ide
|
f01ec438f727ee0dea01b0d265155b49a26ccdb8
|
5ad0e4a2592b5f2716ff680018f717c65de140f5
|
refs/heads/master
| 2020-09-25T03:10:01.201707
| 2019-12-04T15:39:55
| 2019-12-04T15:39:55
| 225,904,173
| 0
| 0
|
MIT
| 2019-12-04T15:57:06
| 2019-12-04T15:57:05
| null |
UTF-8
|
Python
| false
| false
| 2,793
|
py
|
import numpy as np
from cubepy.axis import Axis
class Index(Axis):
"""A named sequence of unique indexed values. Can be used as indexable axis in Cube.
Name is a string. Values are stored in one-dimensional numpy array.
"""
def __init__(self, name, values):
"""Initialize a new Index object. The values must be unique, otherwise ValueError is raised.
:param name: str
:param values: sequence of values (must be unique),
sequence of cubepy.Index
:raise: ValueError if there are duplicate values
"""
if isinstance(values,list) and len(values)>0 and isinstance(values[0],self.__class__):
nn=0
for idx in values:
if nn==0:
fullValues = np.copy(idx.values)
else:
fullValues = np.concatenate( (fullValues ,idx.values), axis=0)
nn+=1
values = np.unique(fullValues)
super(Index, self).__init__(name, values)
# create dictionary
self._indices = {x: i for i, x in enumerate(self._values)}
# values must not be change once the index has been created
self._values.flags.writeable = False
if len(self._indices) != len(self._values):
raise ValueError('Index cannot have duplicate values')
self._vectorized_index = np.vectorize(self._indices.__getitem__, otypes=[np.int])
self._vectorized_contains = np.vectorize(self._indices.__contains__, otypes=[np.bool])
def __contains__(self, item):
"""Implementation of 'in' operator.
:param item: a value to be looked up whether exists
:return: bool
"""
return item in self._indices
def contains(self, item):
"""Tests whether item or items exist among values.
If item is single value, then return a single boolean value.
If item is a sequence, then return numpy array of booleans.
:param item: a single value or a sequence of values
:return: bool or numpy array of bools
"""
v = self._vectorized_contains(item)
if v.ndim > 0:
return v
return v.item()
def indexof(self, item):
"""If item is single value, then return a single integer value.
If item is a sequence, then return numpy array of integers.
:param item: a single value or a sequence of values
:return: int or numpy array of ints
:raise: KeyError if value does not exist
"""
v = self._vectorized_index(item)
if v.ndim > 0:
return v
return v.item()
@property
def pos(self):
from cubepy.cube import Cube
return Cube([self],range(len(self)))
|
[
"fbrussa@novix.com"
] |
fbrussa@novix.com
|
175fb277a49966e29ff8712330bb24e13e1ad305
|
fa7c420ef42b33f9488e5e6b75462ebe7c7a3e54
|
/DataProcessingPipeline/pycharm/datamaking_real_time_data_pipeline/real_time_data_pipeline.py
|
18b84f209f2da83a820e82a864227b772dae1165
|
[] |
no_license
|
sravyapagadala1/DataProcessingPipelineGCP
|
0ca2d4b16f60afea4b5b9f07a24ce999d248f389
|
21cee99f51306583ac57be693a286216fc7750c4
|
refs/heads/master
| 2022-09-13T18:33:40.675803
| 2020-05-28T11:51:56
| 2020-05-28T11:51:56
| 267,575,666
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,443
|
py
|
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *
KAFKA_TOPIC_NAME_CONS = "transmsg"
KAFKA_BOOTSTRAP_SERVERS_CONS = '35.238.42.190:9092'
if __name__ == "__main__":
print("Real-Time Data Pipeline Started ...")
spark = SparkSession \
.builder \
.appName("Real-Time Data Pipeline Demo") \
.master("local[*]") \
.config("spark.jars", "file:///C://Users//sravy//Documents//Projects//DataProcessingPipeline//jar_files//spark-sql-kafka-0-10_2.11-2.4.0.jar,file:///C://Users//sravy//Documents//Projects//DataProcessingPipeline//jar_files//kafka-clients-2.0.0.jar") \
.config("spark.executor.extraClassPath", "file:///C://Users//sravy//Documents//Projects//DataProcessingPipeline//jar_files//spark-sql-kafka-0-10_2.11-2.4.0.jar:file:///C://Users//sravy//Documents//Projects//DataProcessingPipeline//jar_files//kafka-clients-2.0.0.jar") \
.config("spark.executor.extraLibrary", "file:///C://Users//sravy//Documents//Projects//DataProcessingPipeline//jar_files//spark-sql-kafka-0-10_2.11-2.4.0.jar:file:///C://Users//sravy//Documents//Projects//DataProcessingPipeline//jar_files//kafka-clients-2.0.0.jar") \
.config("spark.driver.extraClassPath", "file:///C://Users//sravy//Documents//Projects//DataProcessingPipeline//jar_files//spark-sql-kafka-0-10_2.11-2.4.0.jar:file:///C://Users//sravy//Documents//Projects//DataProcessingPipeline//jar_files//kafka-clients-2.0.0.jar") \
.getOrCreate()
spark.sparkContext.setLogLevel("ERROR")
# Construct a streaming DataFrame that reads from transmsg
transaction_detail_df = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", KAFKA_BOOTSTRAP_SERVERS_CONS) \
.option("subscribe", KAFKA_TOPIC_NAME_CONS) \
.option("startingOffsets", "latest") \
.load()
print("Printing Schema of transaction_detail_df: ")
transaction_detail_df.printSchema()
# Write result dataframe into console for debugging purpose
trans_detail_write_stream = transaction_detail_df \
.writeStream \
.trigger(processingTime='5 seconds') \
.outputMode("update") \
.option("truncate", "false")\
.format("console") \
.start()
trans_detail_write_stream.awaitTermination()
print("Real-Time Data Pipeline Completed.")
|
[
"noreply@github.com"
] |
sravyapagadala1.noreply@github.com
|
daa036edc8eb665395f09338474ee089044a4872
|
59731d4fe8ba2778a25c4345dedcb4ad4e759fe0
|
/deepService/eventHandlers/UpdateJobHandler.py
|
9b14ccdac0dc3ba661c09b15d14e694bf55bae93
|
[] |
no_license
|
goodshark/kube-deep
|
cbf7d4ac2af001d7c504937b9fee0cf64b8b1e97
|
0c07366fa998fbf82df5a2a91f91be32031eb703
|
refs/heads/master
| 2020-03-27T08:30:05.556710
| 2018-12-14T08:56:19
| 2018-12-14T08:56:19
| 146,261,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,126
|
py
|
# coding: utf-8
from EventHandler import EventHandler
from util.ApiConfiger import ApiConfig
from util.RedisHelper import RedisHelper
import kubernetes
from kubernetes import client, config, watch
from kubernetes.client.rest import ApiException
import json
import re
import traceback
class UpdateJobHandler(EventHandler):
def removePs(self, psList):
print 'deleting ps list: ' + str(psList)
config.load_kube_config()
configuration = kubernetes.client.Configuration()
delJobInstance = kubernetes.client.BatchV1Api(kubernetes.client.ApiClient(configuration))
delSvcInstance = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient(configuration))
body = kubernetes.client.V1DeleteOptions()
body.propagation_policy = 'Foreground'
namespace = ApiConfig().get("namespace", "tensorflow")
for ps in psList:
try:
delJobInstance.delete_namespaced_job(ps, namespace, body)
delSvcInstance.delete_namespaced_service(ps, namespace, body)
except:
traceback.print_exc()
'''
tf-3e8f3702-d10b-11e8-abe4-fa163ef8da8a-ps-0-1
{3e8f3702-d10b-11e8-abe4-fa163ef8da8a-1: 0}
tf-3e8f3702-d10b-11e8-abe4-fa163ef8da8a-worker-0-3
{3e8f3702-d10b-11e8-abe4-fa163ef8da8a-3: 0}
'''
def modifEvent(self, objName, eStatus):
print '*************** UpdateJobHandler modify event: ' + str(objName)
rc = RedisHelper().getRedis()
psPt = "tf-([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})-ps-([0-9].*)-([0-9].*)"
res = re.match(psPt, objName)
if res:
# ps may be shutdown itself through singal from worker
print 'ps modified'
#psKey = res.group(1)
#rc.hincrby(ApiConfig().get("event", "ps_key"), psKey, -1)
else:
workerPt = "tf-([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})-worker-([0-9].*)-([0-9].*)"
res = re.match(workerPt, objName)
if not res:
return
if eStatus.succeeded and eStatus.succeeded == 1:
workerKey = res.group(1)
curCount = rc.hincrby(ApiConfig().get("event", "worker_key"), workerKey, -1)
if (int(curCount) == 0):
print 'prepare delete ps ++++++++++++++++++++++++++++++'
psCnt = rc.hget(ApiConfig().get("event", "ps_key"), res.group(1))
allPs = ['tf-'+res.group(1)+'-ps-'+str(i)+'-'+psCnt for i in xrange(int(psCnt))]
allWorker = ['tf-'+res.group(1)+'-worker-'+str(i)+'-'+res.group(3) for i in xrange(int(res.group(3)))]
print 'all ps: ' + str(allPs)
print 'all worker: ' + str(allWorker)
tfInfo = {'ps': allPs, 'worker': allWorker}
rc.rpush(ApiConfig().get("event", "delete_queue"), json.dumps(tfInfo))
else:
print 'one tf worker done successfully ......'
else:
# TODO mark failed
pass
def delEvent(self, objName, eStatus):
print '************* UpdateJobHandler delete event: ' + str(objName)
rc = RedisHelper().getRedis()
psPt = "tf-([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})-ps-([0-9].*)-([0-9].*)"
res = re.match(psPt, objName)
if res:
print 'delete event matched'
psKey = res.group(1)
print 'delete event ps_key: ' + psKey
try:
psCurCount = rc.hincrby(ApiConfig().get("event", "ps_key"), psKey, -1)
except:
print 'got error'
traceback.print_exc()
print 'after hincrby ......'
print 'delete event ps cur count: ' + str(psCurCount)
if (int(psCurCount) == 0):
print ''
rc.hdel(ApiConfig().get("event", "ps_key"), psKey)
rc.hdel(ApiConfig().get("event", "worker_key"), psKey)
else:
print 'del event not matched'
|
[
"root@tf-1.novalocal"
] |
root@tf-1.novalocal
|
6926911d576e910dfcf6812e7dd08b64e45c509e
|
ed0a4b5ebe0a93054766b1d6948fe31d006a19d8
|
/GradeCalculator.py
|
61890e98deb52972643ef4ca341176b5c8a04bee
|
[] |
no_license
|
max-conroy/Python
|
5c19ce989d2f0bd942f2c23917b31c69ca81b4b6
|
d058de635005d05d1dc7fc45de98e7f571877de1
|
refs/heads/master
| 2021-11-24T19:59:04.068020
| 2016-01-29T01:24:46
| 2016-01-29T01:24:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,036
|
py
|
# Group #2 - Program 2 - Grade Averaging Program
# Andrew Weaver, Jordan Westfall, Max Conroy
# CSC 485 - Special Topics in Computer Science - Python
#Basic grade calculator program
# Command handler
def main():
printTitle()
promptInput()
# This function is used to dipslay the title for the program
def printTitle():
print("-----------------------------------------------------------------------------------")
print(" Welcome to the Grade Calculator Program ")
print("-----------------------------------------------------------------------------------")
print(" - Enter a valid grade between 0 and 100 ")
print(" - Entering a grade outside of the range will terminate input")
print(" - After entering an invalid grade the program will calculate all of the grades")
print(" - If no valid grades are entered the program will terminate")
print("-----------------------------------------------------------------------------------")
# promptInput() is a loop that prompts the user for input as long as the grades are in a valid range
def promptInput():
programCounter = 0 # Program counter for the input
gradeInput = 0 # Variable used to store grade input
gradeList = [] # List that stores all of the grades input by the user
while gradeInput >= 0 and gradeInput <= 100: #Loop executes while the users input is within the valid range
gradeInput = raw_input("Enter a grade: ") #Prompt for grade input
if isNumber(gradeInput) == True: #Handle if the user entered a valid number and not a letter/word
gradeInput = float(gradeInput) #Converts the local variable to a float
if gradeInput < 0 or gradeInput > 100: #Checks to see if the grade is valid
promptCalculation(gradeList) #Calls the invalid Grade function to terminate input and calculate totals
else:
gradeList.append(gradeInput) #Inserts the valid grade into the list at the address of the program counter
programCounter += 1 #Increment the program counter
else:
print("You did not enter a valid number, please enter valid number.") #Warning message to the user
gradeInput = 0 #Reset the input to meet loop conditions
# promptCalculation() is accepts the program counter and the list of grades to call the calculation functions
def promptCalculation(gradesList):
numberElements = len(gradesList) # Retrieves the number of elements in a list
if numberElements > 0: # Handle if the user didn't enter any values
gradeSum = sum(gradesList) # Sums the elements in the list
averageGrades = gradeSum/numberElements # Divides the sum of the grades by the number of grades to get the average
letterGrade = calculateLetterGrade(averageGrades) # Determines the letter grade
printTotal(numberElements, gradeSum, averageGrades, letterGrade)
else:
print("Sorry, you did not enter any valid grades for calculation!")
print("Program Terminated, restart the program to enter valid grades.")
# This functions prints out the information to the user
def printTotal(numberOfElements, sumOfGrades, averageOfGrades, letterOfGrades):
print("Grade out of valid range totals now being calculated...")
print("-----------------------------------------------------------------------------------")
print("There were a total of "+ str(numberOfElements) +" grades entered that totaled up to "+ str(sumOfGrades))
print("The average of the grades entered was " + str("{:.2f}".format(averageOfGrades)) + " which is an average letter grade of: " + str(letterOfGrades))
# This function handles the letter grade using simple if based logic
def calculateLetterGrade(gradeAverage):
if gradeAverage <= 60:
letterGrade = "F"
elif gradeAverage > 60 and gradeAverage < 70:
letterGrade = "D"
elif gradeAverage > 70 and gradeAverage < 80:
letterGrade = "C"
elif gradeAverage > 80 and gradeAverage < 90:
letterGrade = "B"
elif gradeAverage >= 90:
letterGrade = "A"
return letterGrade
# Checks to see if the number is a number or not
def isNumber(x):
try: #Try statement to handle exception if the string cannot be cast
x = float(x) #Casting statement
return True #If the cast is successful return true
except ValueError: #Catch the ValueError exception if the string cannot be converted to a float
return False #Return False back to the function
main()
|
[
"conroy316@yahoo.com"
] |
conroy316@yahoo.com
|
4b6de09716039e8e42ef1b4f23ea314d2d2a8e8e
|
4e28a9fe973d7f6dc318aa191e25c44032b89f54
|
/PycharmProjects/demo0/描述符.py
|
677f10e086ca595b607deb7904fdd469e9da3656
|
[] |
no_license
|
octolittlefish/python
|
0f159cc85c92a4a5328fe31c988f1999480cbe11
|
ef7b08d658566baf81578d594d6d6c256c17017e
|
refs/heads/master
| 2021-01-22T20:55:13.867795
| 2017-05-14T05:14:39
| 2017-05-14T05:14:39
| 85,378,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
# class MyDecriptor:
# def __get__(self, instance, owner):
# print("getting...",self,instance,owner)
# def __set__(self, instance, value):
# print("setting...",self,instance,value)
# def __delete__(self, instance):
# print("deleting...",self,instance)
#
# class Test:
# x = MyDecriptor()
#
# test = Test()
# test.x
# test.x="X-man"
# del test.x
# print(123)
class Celsius:
def __init__(self,value = 26.0):
self.value = float(value)
def __get__(self, instance, owner):
return self.value
def __set__(self, instance, value):
self.value=float(value)
class Fahrenheit:
def __get__(self, instance, owner):
return instance.cel * 1.8 +32
def __set__(self, instance, value):
instance.cel=(float(value)-32)/1.8
class Temperature:
cel = Celsius()
fah = Fahrenheit()
temp =Temperature()
print("初始摄氏温度是: ")
print(temp.cel)
temp.cel = 30
print("摄氏温度设置为")
print(temp.cel)
print("对应华氏温度是: ")
print(temp.fah)
|
[
"278715853@qq.com"
] |
278715853@qq.com
|
6d5fe15c4c31ed1dfb107a67ad6c71c152d2c48b
|
d9272149dbf3b1157324291cefa55cacedad1082
|
/testing.py
|
501a3f585a6af61014c131fe59829f4af42d5deb
|
[] |
no_license
|
mymindcastadrift/gen-eigh
|
680147e625e9dcc5b0d9aecc5b58fec8f3693871
|
c33326655b05d24fef287fe2b449483181243139
|
refs/heads/master
| 2016-09-16T13:09:09.965067
| 2015-08-23T22:00:18
| 2015-08-23T22:00:18
| 39,964,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,934
|
py
|
import numpy as np
from scipy import linalg
import matrixgenerator as matgen
from choleskywilkinson import cholesky_wilkinson
from fixheiberger import fix_heiberger
# Error functions =====================================
def column_vect_norms(X):
norm =[]
for i in range(X.shape[1]):
x = linalg.norm(X[:,i])
norm.append(x)
return norm
def average_error(A, M, eigenval, eigenvect, n):
err_matrix = A.dot(eigenvect) - np.multiply(eigenval, M.dot(eigenvect))
norm = column_vect_norms(err_matrix)
return sum(norm[0:n])/n
def normalize(n, eigenvect):
for i in range(n):
eigenvect[:,i] = eigenvect[:,i]/linalg.norm(eigenvect[:,i])
# Basic Correctness Tests ==============================================
def run_test(A,M, r):
[fh_val, fh_vect] = fix_heiberger(A,M,r)
[cw_val, cw_vect] = cholesky_wilkinson(A,M)
[def_val, def_vect] = linalg.eigh(A,M)
normalize(len(def_val), def_vect)
# Excessive tolerance limits error
if fh_val == None:
return
print "Fix-Heiberger: with error: ", average_error(A,M, fh_val, fh_vect, len(fh_val)), " on ", len(fh_val), fh_val#, fh_vect
print "Cholesky-Wilkinson: with error: ", average_error(A,M, cw_val, cw_vect, len(fh_val)), len(cw_val), cw_val#, cw_vect
print "Scipy: with error: ", average_error(A,M, def_val, def_vect, len(fh_val)), len(def_val), "\n", def_val, "\n"
def test_correct_1(n):
print "Testing with dimensions:", n
z = range(1,n+1)
A = matgen.rand_by_eigenval(n, z[::-1])
M = matgen.diag([1]*n)
run_test(A,M,0.01)
def test_correct_1a(n):
print "Testing with dimensions:", n
z = range(1,n+1)
Q = matgen.rand_unitary(n)
A = Q*matgen.diag(z[::-1])*Q.getH()
r = [1]*n
r[n-1:] = [10**-10]*1
M = Q*matgen.diag(r)*Q.getH()
run_test(A,M,0.01)
def test_correct_2(n):
print "Testing with dimensions:", n
A = matgen.rand_symm(n)
M = matgen.rand_by_eigenval(n, [1]*n)
run_test(A,M,0.01)
def test_correct_3(a,d,e):
print "Testing with alpha, delta, epsilon:", a, d, e
A = np.matrix([[1, a, 0, d],[a, 2, 0, 0], [0,0,3,0],[d,0,0,e]])
M = matgen.diag([1,1,e,e])
run_test(A,M,0.001)
def test_correct_3c(a,d,e):
print "[PATHOLOGICAL] Testing with alpha, delta, epsilon:", a, d, e
A = np.matrix([[1,a,0,0,0,d],[a,2,0,0,0,0],[0,0,3,0,0,0],[0,0,0,e,0,0],[0,0,0,0,e,0],[d,0,0,0,0,e]])
M = matgen.diag([1,1,e,e,e,e])
run_test(A,M,0.001)
def test_correct_4(d):
print "Testing with delta ", d
A = matgen.diag([6,5,4,3,2,1,0,0])
A[0,6] = A[1,7] = A[6,0] = A[7,1] = 1
M = matgen.diag([1,1,1,1,d,d,d,d])
run_test(A,M,0.000011)
def test_correct_5(n,w):
print "Testing with negative eigenvalues in A_22"
A_11 = matgen.rand_symm(n)
A_22 = matgen.rand_by_eigenval(w, matgen.rand_eigenval(w,-1000,100))
A_13 = matgen.rand_mat(n,w)
A = linalg.block_diag(A_11, A_22)
A[0:n, n:n+w] = A_13
A[n:n+w, 0:n] = A_13.getH()
M =linalg.block_diag( matgen.diag(matgen.rand_eigenval(n,100,1000)), matgen.diag([10**-10]*w))
run_test(A,M,0.01)
def test_correct_6(n,w, e, v=3):
print "[PATHOLOGICAL] Testing with near singular A with w = ", w
Q = matgen.rand_unitary(n+2*w)
A_11 = matgen.rand_symm(n)
A_22 = matgen.rand_by_eigenval(2*w, np.concatenate((matgen.rand_eigenval(w, 1000,10000), matgen.rand_eigenval(w, e, 10*e)), axis=1))
A = linalg.block_diag(A_11, A_22)
A_13 = np.matrix(np.diag(matgen.rand_eigenval(w, e, 10*e)))
A[0:w, n+w:n+2*w] = A_13
A[n+w:n+2*w, 0:w] = A_13.getH()
M = matgen.diag(np.concatenate((matgen.rand_eigenval(n,10000,100000), matgen.rand_eigenval(2*w, 0.0001, 0.001)), axis = 1))
A = Q * A * Q.getH()
M = Q * M * Q.getH()
run_test(A,M,0.01)
# Unit testing module =====================================================
if __name__ == "__main__":
'''print "\nTest 1: Identity M"
for i in [5,100]:
test_correct_1(i)
print "\nTest 1a: Near singular A,M"
for i in range(2,10):
test_correct_1a(i)
print "\nTest 2: Non-singular M"
for i in [5,100]:
test_correct_2(i)'''
'''print "\nTest 3a: Pg 86 test - Limiting values of epsilon"
for i in range(20,50):
test_correct_3(0.00001, 0.00005, 10**(-i))'''
'''print "\nTest 3b: Pg 86 test - Limiting values of delta"
for i in range(10,100):
test_correct_3(0.00001, 10**(-i), 0.00001)
# Note how the latter claims to be a pathological input for Fix-Heiberger due to the lack of condition (2.14)
# BUT THE RANK CONDITION STILL HOLDS!!! n_1 = 2, n_4 = 1
# The problem is in trying to solve for a A_13 with near zero singular values.
print "\nTest 3c: Pg 86 test - Limiting values of delta with modified matrix"
for i in range(50,60):
test_correct_3c(0.01, 10**(-i), 0.000001)
print "\nTest 4: Pg 87 test - Limiting values of delta"
for i in range(5, 10):
test_correct_4(10**(-i))'''
'''print "\nTest 5: A_22 with negative values"
for i in range(5, 10):
test_correct_5(20*i,10)'''
# Note that higher error for "less singular matrices" is expected since F-H assumes singularity for low eigenvalues.
'''print "\nTest 7: Near singular A"
for i in range(1,5):
test_correct_6(500-40*i,20*i, 10**-50)
#print "\nTest 8: Perturbation Test"'''
print "\nTest: Higher Dimensional Performance"
[A, M] = matgen.rand_pair(10)
run_test(A,M, 0.0001)
fp = open("testresult.csv", 'w')
print "Beginning Testing ... "
a = 0.00001
d = 0.00005
fh_output = [0]*3
num_output =[0]*3
for i in range(1,101):
print i
fp.write("{}".format(i*10))
#for j in range (10):
[A,M] = matgen.rand_pair(i*10)
[test_val, test_vect] = cholesky_wilkinson(A,M)
err_2 = average_error(A, M, test_val, test_vect, len(test_val))
fp.write(",{},".format(err_2))
for k in range(1,4):
[eigenval, eigenvect] = fix_heiberger(A,M, 10**(-k*2))
fh_output[k-1] = average_error(A, M , eigenval, eigenvect, len(eigenval))
num_output[k-1] = len(eigenval)
for k in range(1,4):
fp.write(",{}".format(fh_output[k-1]))
for k in range(1,4):
fp.write(",{}".format(num_output[k-1]))
fp.write("\n")
fp.close()
|
[
"="
] |
=
|
1df1b029fa86fd9b6464ffa2209d9062ebf5005d
|
e82e417d06b41f3ff28bfd69b251595ed3865e5e
|
/BattleFire.py
|
54675e125de6457e02ed97b431ece5f3de65f56a
|
[] |
no_license
|
ameyabhamare/pokemon-xyz
|
540c0d0f94d7e5f91e514bb2b320057edbd110fe
|
eab61a58f4d5375e7997c3e5c71b99e28e9be893
|
refs/heads/master
| 2022-11-15T07:37:10.425054
| 2020-07-14T05:02:23
| 2020-07-14T05:02:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,684
|
py
|
import pygame
import random
WIDTH = 950
HEIGHT = 700
FPS = 15
pygame.font.init()
smallText=pygame.font.Font('freesansbold.ttf',15)
smallText1=pygame.font.Font('freesansbold.ttf',22)
def text_objects(text, font):
textSurface=font.render(text,True,BLACK)
return textSurface, textSurface.get_rect()
#colour definitions so that we need not look at internet
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
phealth=136
ehealth=252
# initialize pygame and create window(do not copy this part)
#make sure you have the same data names as i have used
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
photo1 = pygame.image.load('Xerneas.png')
photo2= pygame.image.load('Delphox.png')
pygame.display.set_caption("Pokemon X,Y and Z")
background_img = pygame.image.load('battle_ground.png').convert()
clock = pygame.time.Clock()
#now the actual sprites begin
#i have created a list of sprtes so that we can update everything at once
class Wall(pygame.sprite.Sprite):
""" Wall the player can run into. """
def __init__(self, x, y, width, height):
""" Constructor for the wall that the player can run into. """
# Call the parent's constructor
super(Wall,self).__init__()
# Make a blue wall, of the size specified in the parameters
self.image = pygame.Surface([width, height])
self.image.fill(BLUE)
self.image.set_colorkey(BLUE)
# Make our top-left corner the passed-in location.
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
def Attack1():
global ehealth
global phealth
ehealth-=20
phealth-=random.randint(0,40)
def Attack2():
global ehealth
global phealth
ehealth-=50
phealth-=random.randint(0,40)
def Attack3():
global ehealth
global phealth
ehealth-=60
phealth-=random.randint(0,40)
def Attack4():
global ehealth
global phealth
ehealth-=30
phealth-=random.randint(0,40)
class Player(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image=pygame.transform.scale(photo2,(200,236))
self.rect=self.image.get_rect()
self.rect.centerx=WIDTH/2
self.rect.bottom=HEIGHT-2
self.speedx=0
self.speedy=0
self.walls=None
def update(self):
self.speedx=0
self.speedy=0
keystate=pygame.key.get_pressed()
if keystate[pygame.K_LEFT]:
self.speedx=-5
if keystate[pygame.K_RIGHT]:
self.speedx=5
if keystate[pygame.K_DOWN]:
self.speedy=5
if keystate[pygame.K_UP]:
self.speedy=-5
self.rect.x+=self.speedx
self.rect.y+=self.speedy
if self.rect.right>WIDTH:
self.rect.right=WIDTH
if self.rect.left<0:
self.rect.left=0
if self.rect.top>HEIGHT:
self.rect.top=HEIGHT
if self.rect.bottom<0:
self.rect.bottom=0
block_hit_list = pygame.sprite.spritecollide(self, self.walls, False)
for block in block_hit_list:
# If we are moving right, set our right side to the left side of
# the item we hit
if self.speedx > 0:
self.rect.right = block.rect.left
else:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
# Move up/down
self.rect.y += self.speedy
# Check and see if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.walls, False)
for block in block_hit_list:
# Reset our position based on the top/bottom of the object.
if self.change_y > 0:
self.rect.bottom = block.rect.top
else:
self.rect.top = block.rect.bottom
class Mob(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image=pygame.transform.scale(photo1,(200,236))
self.rect=self.image.get_rect()
self.rect.x=659
self.rect.y=254
#self.speedx=random.randrange(1,8)
#self.speedy=random.randrange(1,3)
mobs=pygame.sprite.Group()
player=Player()
all_sprites = pygame.sprite.Group()
all_sprites.add(player)
m=Mob()
all_sprites.add(m)
mobs.add(m)
wall_list=pygame.sprite.Group()
wall = Wall(20, 40, 10, 600)
wall1 = Wall(745,7, 203, 333)
wall2 = Wall(20, 40, 10, 600)
wall3 = Wall(20, 40, 10, 600)
wall4 = Wall(20, 40, 10, 600)
wall5 = Wall(20, 40, 10, 600)
wall6 = Wall(20, 40, 10, 600)
wall7 = Wall(20, 40, 10, 600)
all_sprites.add(wall)
wall_list.add(wall)
all_sprites.add(wall1)
wall_list.add(wall1)
all_sprites.add(wall2)
wall_list.add(wall2)
all_sprites.add(wall3)
wall_list.add(wall3)
all_sprites.add(wall4)
wall_list.add(wall4)
player.walls=wall_list
health=142
# Game loop
running = True
while running:
# keep loop running at the right speed
clock.tick(FPS)
# Process input (events)
for event in pygame.event.get():
# check for closing window
if event.type == pygame.QUIT:
running = False
# Update
all_sprites.update()
#collision
collide= pygame.sprite.spritecollide(player,mobs,False)
block_hit_list = pygame.sprite.spritecollide(player,wall_list, False)
'''for block in block_hit_list:
# Reset our position based on the top/bottom of the object.
if player.speedy > 0:
player.rect.bottom = player.rect.bottom
else:
player.rect.top = block.rect.bottom'''
if collide:
health-=10
if health<0:
collide= pygame.sprite.spritecollide(player,mobs,True)
# Draw / render
screen.blit(background_img,[0,0])
mouse=pygame.mouse.get_pos()
click=pygame.mouse.get_pressed()
pygame.draw.rect(screen,RED,(0,565,180,105))
pygame.draw.rect(screen,RED,(190,565,180,105))
pygame.draw.rect(screen,RED,(380,565,180,105))
pygame.draw.rect(screen,RED,(570,565,180,105))
textSurf1, textRect1=text_objects('Fire Spin',smallText)
textSurf2, textRect2=text_objects('Flame Charge',smallText)
textSurf3, textRect3=text_objects('Fire Pledge',smallText)
textSurf4, textRect4=text_objects('Fire Spin',smallText)
textSurf5, textRect5=text_objects("Player:"+str(phealth)+"/136",smallText1)
textSurf6, textRect6=text_objects("Enemy:"+str(ehealth)+"/252",smallText1)
textRect1.center=((0+(180/2)),(565+(105/2)))
textRect2.center=((190+(180/2)),(565+(105/2)))
textRect3.center=((380+(180/2)),(565+(105/2)))
textRect4.center=((570+(180/2)),(565+(105/2)))
textRect5.center=((760+(180/2)),(565+(105/2)))
textRect6.center=((760+(180/2)),(600+(105/2)))
if 180>mouse[0]>0 and 670>mouse[1]>565:
if click[0]==1:
Attack1()
if 370>mouse[0]>190 and 670>mouse[1]>565:
if click[0]==1:
Attack2()
if 560>mouse[0]>380 and 670>mouse[1]>565:
if click[0]==1:
Attack3()
if 750>mouse[0]>570 and 670>mouse[1]>565:
if click[0]==1:
Attack4()
screen.blit(textSurf1, textRect1)
screen.blit(textSurf2, textRect2)
screen.blit(textSurf3, textRect3)
screen.blit(textSurf4, textRect4)
screen.blit(textSurf5, textRect5)
screen.blit(textSurf6, textRect6)
if phealth<0:
import tt1
pygame.quit()
if ehealth<0:
import Win
pygame.display.update()
all_sprites.draw(screen)
# *after* drawing everything, flip the display
pygame.display.flip()
pygame.quit()
|
[
"aditeya.baral@gmail.com"
] |
aditeya.baral@gmail.com
|
388935c6fb29e2aa10a63eb2d25d24fa7bcf8547
|
e653913c3e1a294d3294790cbf96da4d0d45b9de
|
/lib/fast_rcnn/softmax_loss.py
|
a2fcd289773978cef3c292732404cdd87dd27363
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
dereyly/faster-rcnn-my
|
895919325a0e24bf822e4a914fe91691647fbf44
|
8f30c996102ab59ece8bf683cdbe4bbab4e8a29b
|
refs/heads/master
| 2021-04-15T13:33:07.377526
| 2018-04-27T18:05:47
| 2018-04-27T18:05:47
| 126,897,428
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,222
|
py
|
__author__ = 'dereyly'
import sys
#sys.path.append('/home/dereyly/progs/caffe_cudnn33/python_33')
#sys.path.append('/home/dereyly/progs/caffe-master-triplet/python')
import caffe
import numpy as np
'''
layer {
name: 'rcls_lost_my'
type: 'Python'
bottom: 'feats'
bottom: 'labels'
top: 'cls_lost_my'
python_param {
module: 'fast_rcnn.softmax_loss'
layer: 'SoftmaxLossLayer'
#param_str: "{'ratios': [0.5, 1, 2], 'scales': [2, 4, 8, 16, 32]}"
}
}
'''
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
sf = np.exp(x)
sum_sf=np.sum(sf, axis=1)
for i in range(x.shape[0]):
sf[i]/=sum_sf[i]
return sf
class SoftmaxLossLayer(caffe.Layer):
def setup(self, bottom, top):
# check input pair
if len(bottom) != 2:
raise Exception("Need two inputs to compute distance.")
def reshape(self, bottom, top):
# check input dimensions match
# difference is shape of inputs
sz=bottom[0].data.shape
self.batch_sz=sz[0]
self.diff = np.zeros((sz[0],sz[1]),dtype=np.float32)
self.lbl_gt=np.zeros((sz[0],sz[1]),dtype=np.float32)
# loss output is scalar
top[0].reshape(1)
#top[1].reshape(self.batch_sz)
def forward(self, bottom, top):
sz=bottom[0].data.shape
self.lbl_gt=np.zeros((sz[0],sz[1]),dtype=np.float32)
lbl_idx=bottom[1].data
lbl_idx=lbl_idx.astype(dtype= int)
for i in range(self.batch_sz):
self.lbl_gt[i,lbl_idx[i]]=1
soft_max=softmax(bottom[0].data)
#loss = -self.lbl_gt*np.log(np.maximum(soft_max,np.finfo(np.float32).eps))
loss=0
for i in range(self.batch_sz):
loss -= np.log(np.maximum(soft_max[i][lbl_idx[i]],np.finfo(np.float32).eps))
#loss2=-np.log(soft_max)
#for i in range(self.batch_sz):
# loss[i,lbl_idx[i]]=0
#print bottom[1].data.shape
self.diff[...] = soft_max-self.lbl_gt
top[0].data[...] = np.sum(loss) / bottom[0].num
#top[1].data[...] = loss
def backward(self, top, propagate_down, bottom):
#pass
bottom[0].diff[...] = self.diff / bottom[0].num
|
[
"dereyly@gmail.com"
] |
dereyly@gmail.com
|
ee4f68abfd11e4e51c357513e5d469d5820fcd77
|
4b6d8d8f7f9fb2a73c634b1915741a2fabd25f26
|
/main.py
|
a509b0f8d19546d423e5acff5f1bf577f1c748c6
|
[] |
no_license
|
gustavusd/TicTacToe
|
249366d5f5fc8f1ca36702b09789bf7c8e596873
|
d457ab1fc36eb60c9b2a19fc9e45f7d2bde67a93
|
refs/heads/master
| 2022-11-19T20:12:10.745007
| 2020-07-20T18:39:02
| 2020-07-20T18:39:02
| 281,197,876
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
from Board import Board
from Player import Player
from AI import AI
# As a precondition you must enter valid moves
def main():
board = Board([[]])
board.new_game()
player = Player(board.game_state)
ai = AI(board.game_state)
board.who_start = input("Enter x: ")
board.display_board()
while board.check_winner() == '-' or board.movesLeft():
if board.who_start == 'x':
player.move(int(input("Enter the desired row location: ")), int(input("Enter the desired column location:"
" ")))
ai.move()
board.display_board()
else:
ai.move()
player.move(int(input("Enter the desired row location: ")), int(input("Enter the desired column location:"
" ")))
board.display_board()
board.display_board()
main()
|
[
"gustavus7@gmail.com"
] |
gustavus7@gmail.com
|
ae4f8568a71b51624bc620ef95973987e0088220
|
6df36df3f0280f49b97b64db51089e63c3333eba
|
/Tor/registration_client.py
|
4e2199733d392206db688dbe425e2f3dbfb6dbe3
|
[] |
no_license
|
ErichL/Projects
|
efe38cdde6207c222e1705463ed34bee68a8e0b4
|
055c053603f3740ccd09888140d75925628e8de5
|
refs/heads/master
| 2021-01-18T18:16:27.542330
| 2017-09-30T06:52:08
| 2017-09-30T06:52:08
| 33,706,586
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,814
|
py
|
import socket
import struct
import select
import signal
import pyuv
import subprocess
import sys
import os
#-----------------------------------------------
# Usage: python registration_client.py servicePort serviceName serviceData
# Registers a service whose IP is that the host it's running
# on and whose port, name, and data are given on the command line.
# Renews the registration as necessary. Tries to unregister before
# terminating.
# It's intended that this app be launched by
# the process that wants to be registered.
#-----------------------------------------------
#print "parent pid = ", os.getppid()
#print "my pid = ", os.getpid()
REGISTRATION_SERVER_HOST = "war.cs.washington.edu"
REGISTRATION_SERVER_IP = socket.gethostbyname(REGISTRATION_SERVER_HOST);
REGISTRATION_SERVER_PORT = 46101
if ( len(sys.argv) != 4 ):
print "Usage: python ", sys.argv[0], " port name data"
sys.exit(1)
SERVICE_PORT = int(sys.argv[1])
SERVICE_NAME = sys.argv[2]
SERVICE_DATA = int(sys.argv[3])
SERVER_RESPONSE_TIMEOUT = 4
# DO NOT CHANGE THIS. The registration service
# will send us packets on SENDING_PORT + 1
LOCAL_IP = struct.unpack('!I', socket.inet_aton(socket.gethostbyname(socket.getfqdn())))[0]
#print "[registration client] Local IP:", socket.gethostbyname(socket.getfqdn())
# Do not change anything else below here unless
# you know what you're doing
ACTION_REGISTER = int(0x01)
ACTION_REGISTERED = int(0x02)
ACTION_UNREGISTER = int(0x05)
ACTION_PROBE = int(0x06)
ACTION_ACK = int(0x07)
seq_num = 0
outstanding_packets = {}
reregistration_timer = None
# An abstraction around turning packets into bytes and back
class Packet461:
PACKET_HEADER_FORMAT = '!HBB' # magic word, sequence number, action
HEADER_LENGTH = 4
MAGIC_WORD = int(0xC461)
def __init__(self, seq_num, action, payload):
self.magic = Packet461.MAGIC_WORD
self.seq_num = seq_num
self.action = action
self.payload = payload
@classmethod
def fromdata(cls, data):
(magic_word, seq_num, action) = struct.unpack(Packet461.PACKET_HEADER_FORMAT, data[:Packet461.HEADER_LENGTH])
if magic_word != Packet461.MAGIC_WORD:
# silently drop packet
return None
return cls(seq_num, action, data[Packet461.HEADER_LENGTH:])
def __str__(self):
return "Magic: " + str(self.magic) + "\nSeqno: " + str(self.seq_num) + "\nType: " + str(self.action)
def pack(self):
return struct.pack( Packet461.PACKET_HEADER_FORMAT + str(len(self.payload)) + 's',
self.magic,
self.seq_num,
self.action,
self.payload)
def next_seq_num():
global seq_num
result = seq_num
seq_num += 1
return result
def bytes_to_string(string):
return str(':'.join('{:02x}'.format(ord(c)) for c in string))
def shutdown(handle, signum):
#print "[registration cient] Shutting down"
signal_h.close()
payload = struct.pack('!IH', LOCAL_IP, SERVICE_PORT)
send(Packet461(next_seq_num(), ACTION_UNREGISTER, payload))
global sending_socket
sending_socket.close()
global loop
loop.stop()
def remove_packet(seq_num):
global outstanding_packets
if outstanding_packets[seq_num] == None:
sys.stderr.write( "[registration client] Unexpected packet seq_num: " + str(seq_num) + "\n")
else:
del outstanding_packets[seq_num]
# retransmits everything for which we haven't received a response
def rexmit(timer):
for k,v in outstanding_packets.iteritems():
send(v);
# convenient way of updating outstanding_packets, seq_num, and actually sending
def send(packet):
global outstanding_packets
outstanding_packets[packet.seq_num] = packet
#print "[registration client] sending", bytes_to_string(packet.pack()), (REGISTRATION_SERVER_HOST, REGISTRATION_SERVER_PORT)
global sending_socket
return sending_socket.send((REGISTRATION_SERVER_IP, REGISTRATION_SERVER_PORT), packet.pack())
def on_read(handle, ip_port, flags, data, error):
if data is None:
return
# Check that this is from the server
if ip_port != (REGISTRATION_SERVER_IP, REGISTRATION_SERVER_PORT):
return
packet = Packet461.fromdata(data)
remove_packet(packet.seq_num)
if packet != None and packet.action == ACTION_REGISTERED:
reregistration_timer.stop()
(interval,) = struct.unpack("!H", packet.payload)
reregistration_timer.start(lambda x: send_registration(), 0.8*interval, 0.8*interval)
# isolate lease time
# set timer for registration
def send_registration():
register_payload = struct.pack('!IHIB' + str(len(SERVICE_NAME)) + 's',
LOCAL_IP,
SERVICE_PORT,
SERVICE_DATA,
len(SERVICE_NAME),
SERVICE_NAME)
send(Packet461(next_seq_num(),ACTION_REGISTER, register_payload))
#---------------------------------------------------------------------
# mainline
#---------------------------------------------------------------------
loop = pyuv.Loop.default_loop()
reregistration_timer = pyuv.Timer(loop)
rexmit_timer = pyuv.Timer(loop)
rexmit_timer.start(rexmit, SERVER_RESPONSE_TIMEOUT, SERVER_RESPONSE_TIMEOUT)
# Make sure we clean up and exit on Ctrl-C
signal_h = pyuv.Signal(loop)
signal_h.start(shutdown, signal.SIGINT)
#sending_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sending_socket = pyuv.UDP(loop)
sending_socket.start_recv(on_read)
# Kick things off by registering with the server
send_registration()
#print "[registration client] Sent registration!"
loop.run();
|
[
"erichlee96@gmail.com"
] |
erichlee96@gmail.com
|
3f88725fdeac52cf836f310d51cfec6c8abd73d7
|
a7e5d600a938d2746ab40f90c42b539f93d076cb
|
/population_script.py
|
9d39fc8232fd3bfa97811cfe03f37855ab59a251
|
[] |
no_license
|
TheWerewolves/IT_Team_Project
|
2fee7dd8f9b9774eb11327b81de3d4055a184a2b
|
215fa77174980a54de2b9c5353f873721c257950
|
refs/heads/master
| 2022-12-09T21:49:21.930089
| 2020-03-31T20:20:34
| 2020-03-31T20:20:34
| 246,424,879
| 0
| 0
| null | 2022-12-08T03:56:12
| 2020-03-10T22:51:52
|
Python
|
UTF-8
|
Python
| false
| false
| 14,557
|
py
|
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'IT_Team_Project.settings')
import django
django.setup()
from django.contrib.auth.models import User
from gamers_havn.models import Account, Game, Article
def populate():
accounts = [
{
'user': {
'username': 'yuuto', 'password': 'pw000000',
'email': 'yuuto@mymail.com',
},
'age': 15
},
{
'user': {
'username': 'Benny', 'password': 'pw000000',
'email': 'benny@mymail.com',
},
'age': 16
},
{
'user': {
'username': 'baobao', 'password': 'pw000000',
'email': 'baobao@mymail.com',
},
'age': 10
},
{
'user': {
'username': 'BBLover', 'password': 'pw000000',
'email': 'blackopiumlover@mymail.com',
},
'age': 1
},
]
games = [
{'title': 'League of Legends',
'url': 'https://euw.leagueoflegends.com/en-gb/'},
{'title': 'Overwatch',
'url': 'https://playoverwatch.com/en-us/'},
{'title': "Playerunknown's Battleground",
'url': 'https://www.pubg.com/'},
{'title': 'Dark Souls',
'url': 'https://store.steampowered.com/app/570940/DARK_SOULS_REMASTERED/'},
{'title': 'Minecraft',
'url': 'https://minecraft.net/'},
{'title': 'Teamfight Tactic',
'url': 'https://teamfighttactics.leagueoflegends.com/en-gb/'},
{'title': 'Call of Duty: Modern Warfare',
'url': 'https://www.callofduty.com/modernwarfare'},
{'title': 'Hearthstone',
'url': 'https://playhearthstone.com/en-gb/'},
{'title': 'World of Warcraft',
'url': 'https://worldofwarcraft.com/'},
{'title': 'Grand Theft Auto V',
'url': 'https://www.rockstargames.com/V/'},
{'title': 'Battlefield 4',
'url': 'https://www.ea.com/games/battlefield/battlefield-4'},
]
articles = [
{'title': "League Of Legends One For All Mode Returns",
'content': '''
##After two years in limbo, One For All returns to League of Legends for a limited time.
One For All is making a surprise comeback to Riot's League of Legends in patch 10.6. Previously, One For All was an arcade game mode wherein both opposing teams only have one champion each, with players needing to work together to take on the other team's single champion.
As you can imagine, having to work together in League of Legends is an ambitious notion, much less putting five players in control of the same champion at once and expecting a competent level of cooperation. Nevertheless, Riot is set on bringing the arcade mode back after a two-year absence, although only for a limited time.
The last time we saw One For All was April Fools Day in 2018. Since then, League of Legends has added eight new champions and been through a number of patches and balance changes. How this will affect One For All is yet to be seen, but some interesting strategies are sure to emerge from the game mode.
Original One For All strategies aren¡¯t completely unusable, but older strategies are sure to resurface with the reemergence of the game mode. Ultimately it may even come down to old vs new in the first week of release.
Riot has promised to keep a close eye on the game mode, checking that no one champion becomes too over or underpowered in the mode. The whole point of One For All is cooperation, not just picking a strong champion every round. Balancing will be similar to the already existing changes on ARAM and URF.
One For All is currently available to play on the Public Beta Environment (PBE) and will go live alongside Patch 10.6 on March 18. League of Legends' Arcade games generally stick around for at least two patches, so check it out before the mode disappears again.
*Notice: This article is originally From https://www.gamespot.com/articles/league-of-legends-one-for-all-mode-returns/1100-6474417// The poster in our website do not own the articles*
''',
'author': 'yuuto',
'game': 'League of Legends'},
{'title': "DARK SOULS 3 GUIDE",
'content': '''
Dark Souls 3 is here, and From Software's notoriously difficult action role-playing game series is bigger than it's ever been.
We've scoured every inch of the Kingdom of Lothric to help you uncover its mysteries and overcome odds stacked to beat you down. We've organized our guide into a few different sections, based on what you might need at any given moment.
If you're just starting out, be sure to read through our Beginner's Guide, which will teach new and experienced players what you should know in your first hours. Lothric is brutal, and this will help you overcome seemingly insurmountable odds. And we'll teach you how to build the best character we've ever built in a Souls game.
Our full walkthrough of the game begins with the enemies, items and secrets in the Cemetery of Ash and Firelink Shrine. Then it guides you through all of the the areas you'll visit in Dark Souls 3, from the required to the optional and secret ¡ª and there's more than a few of the latter. Stuck on a boss? Just visit the section it appears in, and you'll find strategies and videos.
From its earliest days, the Dark Souls series asked players to prepare to die. We've done that plenty. We suspect you will, too. But with Polygon's Dark Souls 3 guide by your side, you can prepare to die a lot less than you otherwise would have.


Dark Souls 3's maps are a confusing, intriguing mass of overlapping spaghetti. But with this guide, you'll be able to find your place ¡ª as well as every enemy and item ¡ª on every map. You'll also find links to every part of Polygon's full walkthrough, so you can find your way to where you need to be.


What class should you choose? What starting item? Our guide will make your first dozen hours in Lothric much easier. Think of this as a way to learn the language of Dark Souls 3 without cheating ¡ª or a way to even the odds in a game where you're always outnumbered, always outgunned.

You'll see most of Dark Souls 3 as you progress through its bonfires, but there are a few hidden, optional areas reserved for the most daring players. Most have challenging foes. Some end with optional boss fights. All of them balance out the difficulty with great rewards.

*Notice: This article is originally From https://www.polygon.com/2016/4/12/11412098/dark-souls-3-guide The poster in our website do not own the articles*
''',
'author': 'yuuto',
'game': 'Dark Souls'},
{'title': "Overwatch Adds A New Doomfist Legendary Skin For A Limited Time",
'content': '''
## Overwatch has a new Doomfist skin on PC, PS4, Xbox One, and Switch in honor of Overwatch League Season 2 MVP Jay "Sinatraa" Won
The Overwatch League has revealed a new legendary skin for Blizzard's hero shooter. The skin is for Doomfist and is called Thunder, and it's in celebration of one of its most important players. You can see it in the tweet embedded below.

Thunder will only be available for purchase in-game for a limited-time, becoming available from March 26 through April 9. The skin is in celebration of the professional esports player Jay "Sinatraa" Won, a member of esports team Shock, who won the Overwatch League Season 2 MVP award. Sinatraa is Shock's resident DPS player and has regularly used Doomfist during the Overwatch League.
Overwatch is set to get one more new hero before the release of Overwatch 2. First teased in animated shorts, Echo will be joining the hero shooter as another DPS fighter (Blizzard has followed up to tell fans to not worry, more support and tank heroes are in development).
Originally conceived for Blizzard's cancelled Titan project, Echo is a versatile hero--able to mimic the forms and abilities of any other character. In describing Echo, GameSpot editor Phil Hornshaw writes, "Key to Echo is her adaptability, and from what Goodman and Fonville described, players who try out the new hero should have a lot of chances to find creative ways to use her abilities. The character's versatility means she can be used to control territory with sticky bombs, as a straight damage-dealer using sticky bombs and Focusing Beam, or to shore up a team in an emergency by duplicating other characters. We'll have to wait to see what other creative ways players find to use Echo--and to counter her."
If you're looking to have a go at Echo early, the hero is live on Overwatch's Public Test Realm right now.
*Notice: This article is originally From https://www.gamespot.com/articles/overwatch-adds-a-new-doomfist-legendary-skin-for-a/1100-6475184/ The poster in our website do not own the articles*
''',
'author': 'Benny',
'game': 'Overwatch'},
{'title': "How to Play Nasus in League of Legends",
'content': '''
Nasus is a melee Fighter, Tank and Mage who excels in Solo Top and in the Jungle. His skins are Galactic Nasus, Pharaoh Nasus, Dreadknight Nasus, Riot K9 Nasus and Infernal Nasus.
##1 Leveling and Abilities

###1 Learn the abilities.
Passive (Soul Eater) Nasus has life steal for his attacks
Q (Siphoning Strike) Nasus' axe is empowered and his next basic attack will deal bonus damage. If Siphoning Strike kills a unit, it will gain stacks of Siphoning Strike which will permanently deal more damage on usage of Siphoning Strike.
W (Wither) Nasus slows an enemy champion for 5 seconds. The slow percentage increases over time.
E (Spirit Fire) Nasus puts a spirit flame in a circular area. This deals magic damage and reduces foes' armor every second. This has a high mana cost so make sure you don't use it too often.
R (Fury of the Sands) Nasus transforms into a big version and increases damage, life steal and attack range. This also gives a massive health boost. Enemies near Nasus are damaged by a percentage of their maximum health. Siphoning Strike deals extra damage in this duration too. Nasus turns back into his normal form after a few seconds.

###2
Level up as follows:
Take Siphoning Strike at Level one and max it immediately.
Take Wither at Level two and max it second.
Take Spirit Fire at Level four and max it last.
Take Fury of the Sands at level six, eleven and sixteen.
##2 Build Guide

###1 Use the following recommendations for starts: Doran's Shield, Amplifying Tone, Cloth Armor, Ancient Coin or Boots of Speed.

###2 Use the following recommendations for mid game: Iceborn Gauntlet, Frozen Heart, Rylai's Crystal Scepter, Seeker's Armguard, and Ninja Tabi.
As long as you focus ability power, mana and armor, you are fine.

###3 Get masteries. Take all armor and magic resist masteries as well as ability power masteries.

###4 Get runes. Focus on ability power and mana.

###5 Get flash. It is a must take for summoner spells. Teleport and Ignite are also good choices. Take smite for jungling.
*Notice: This article is originally From https://https://www.wikihow.com/Play-Nasus-in-League-of-Legends/ The poster in our website do not own the articles*
''',
'author': 'baobao',
'game': 'League of Legends'},
]
for account in accounts:
add_account(account['user']['username'], account['user']['email'],
account['user']['password'], account['age'])
for game in games:
add_game(game['title'], game['url'])
for article in articles:
account = Account.objects.get(user__username=article['author'])
game = Game.objects.get(title=article['game'])
content = article['content']
add_article(article['title'], content, account, game)
def add_account(name, email, password, age, portrait=None):
user = User.objects.get_or_create(username=name, email=email, password=password)[0]
account = Account.objects.get_or_create(user=user)[0]
account.age = age
account.portrait = portrait
account.save()
print(f"Added user {user}")
def add_game(title, url):
g = Game.objects.get_or_create(title=title)[0]
g.url = url
g.save()
print(f"Added game '{g}'")
def add_article(title, content, author, game):
try:
a = Article.objects.get(title=title)
a.content = content
a.author = author
a.game = game
a.save()
except:
a = Article.objects.create(title=title, content=content, author=author, game=game)
print(f"Added article '{a}'")
if __name__ == '__main__':
print("Starting Gamer's Havn population script...")
populate()
|
[
"yuutojurgen@gmail.com"
] |
yuutojurgen@gmail.com
|
9d31cb6fbc29af9594d709b85b0aae24e8c96cd9
|
373035950bdc8956cc0b74675aea2d1857263129
|
/spar_python/report_generation/ta2/ta2_report_generator.py
|
60f4d6ca6486e898b11b055d04aadb3851bd8bee
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
limkokholefork/SPARTA
|
5d122cd2e920775d61a5404688aabbafa164f22e
|
6eeb28b2dd147088b6e851876b36eeba3e700f16
|
refs/heads/master
| 2021-11-11T21:09:38.366985
| 2017-06-02T16:21:48
| 2017-06-02T16:21:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,345
|
py
|
# *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: SY
# Description: TA2 report generator
#
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 21 Oct 2013 SY Original version
# *****************************************************************
# general imports:
import logging
import os
import functools
# SPAR imports:
import spar_python.report_generation.common.report_generator as report_generator
import spar_python.report_generation.ta2.ta2_schema as t2s
import spar_python.report_generation.common.section as section
import spar_python.report_generation.ta2.ta2_section_correctness as t2sc
import spar_python.report_generation.ta2.ta2_section_performance_totalelapsedtime as t2sptet
import spar_python.report_generation.ta2.ta2_section_performance_keygen as t2spkg
import spar_python.report_generation.ta2.ta2_section_performance_ingestion as t2spi
import spar_python.report_generation.ta2.ta2_section_performance_encryption as t2spenc
import spar_python.report_generation.ta2.ta2_section_performance_evaluation as t2speval
import spar_python.report_generation.ta2.ta2_section_performance_decryption as t2spd
import spar_python.report_generation.ta2.ta2_section_performance_singlegatetype as t2spsgt
import spar_python.report_generation.ta2.ta2_section_system_utilization as t2su
# LOGGER:
LOGGER = logging.getLogger(__name__)
class Ta2ReportGenerator(report_generator.ReportGenerator):
"""A TA2 report generator.
This class creates all the necessary section objects, and combines their
outputs to create the full report.
Attributes:
config: a configuration object
jinja_env: a jinja environment
"""
def __init__(self, config, jinja_env):
"""Initializes the report generator with a configuration object and
a jinja environment."""
super(Ta2ReportGenerator, self).__init__(config, jinja_env)
# the following dictionary maps each section name to the name of the
# corresponding template and the class which is responsible for
# populating it:
self._section_name_to_template_name_and_class = {
"ta2_other_sections": ("ta2_other_sections.txt", section.Section),
"ta2_correctness": ("ta2_correctness.txt",
t2sc.Ta2CorrectnessSection),
"ta2_performance": ("ta2_performance.txt", section.Section),
"ta2_performance_totalelapsedtime": (
"ta2_performance_totalelapsedtime.txt",
t2sptet.Ta2TotalElapsedTimeSection),
"ta2_performance_keygen": ("ta2_performance_keygen.txt",
t2spkg.Ta2KeygenSection),
"ta2_performance_ingestion": ("ta2_performance_ingestion.txt",
t2spi.Ta2IngestionSection),
"ta2_performance_encryption": ("ta2_performance_encryption.txt",
t2spenc.Ta2EncryptionSection),
"ta2_performance_evaluation": ("ta2_performance_evaluation.txt",
t2speval.Ta2EvaluationSection),
"ta2_performance_decryption": ("ta2_performance_decryption.txt",
t2spd.Ta2DecryptionSection),
"ta2_performance_singlegatetype": (
"ta2_performance_singlegatetype.txt",
t2spsgt.Ta2SingleGateTypeSection),
"ta2_system_utilization": ("ta2_system_utilization.txt", t2su.Ta2SystemUtilizationSection)}
# the following is the name of the report template:
self._report_template_name = "report.txt"
# the following is to be populated in create_sections:
self._sections = []
# perform all of the pre-processing:
self._populate_ground_truth()
def _populate_ground_truth(self):
"""Populates the ground truth with the baseline outputs."""
fields = [(t2s.INPUT_TABLENAME, t2s.INPUT_IID),
(t2s.INPUT_TABLENAME, t2s.INPUT_CORRECTOUTPUT),
(t2s.PEREVALUATION_TABLENAME, t2s.PEREVALUATION_OUTPUT),
(t2s.PEREVALUATION_TABLENAME, t2s.PEREVALUATION_CORRECTNESS),
(t2s.PEREVALUATION_TABLENAME, "ROWID")]
baseline_constraint_list = self.config.get_constraint_list(
fields=fields, require_correct=False, usebaseline=True)
baseline_values = self.config.results_db.get_values(
fields=fields,
constraint_list=baseline_constraint_list)
for (inputid, correctoutput, baselineoutput, stored_correctness,
evaluationid) in zip(*baseline_values):
# all baseline evaluations are assumed to be correct:
if correctoutput != baselineoutput:
self.config.results_db.update(
t2s.INPUT_TABLENAME, t2s.INPUT_CORRECTOUTPUT, baselineoutput,
constraint_list=[(t2s.INPUT_TABLENAME, t2s.INPUT_IID,
inputid)])
if not stored_correctness:
self.config.results_db.update(
t2s.PEREVALUATION_TABLENAME, t2s.PEREVALUATION_CORRECTNESS,
True, constraint_list=[
(t2s.PEREVALUATION_TABLENAME, "ROWID", evaluationid)])
performer_constraint_list = self.config.get_constraint_list(
fields=fields, require_correct=False, usebaseline=False)
performer_values = self.config.results_db.get_values(
fields=fields, constraint_list=performer_constraint_list)
for (inputid, correctoutput, output, stored_correctness,
evaluationid) in zip(*performer_values):
correctness = correctoutput == output
if stored_correctness != correctness:
self.config.results_db.update(
t2s.PEREVALUATION_TABLENAME, t2s.PEREVALUATION_CORRECTNESS,
correctness,
constraint_list=[(t2s.PEREVALUATION_TABLENAME,
"ROWID", evaluationid)])
|
[
"mitchelljd@ll.mit.edu"
] |
mitchelljd@ll.mit.edu
|
72620f4f255b14b91a7e4a7879e1322dd751f2e5
|
5e4d1d78fa3a7bf292e8f61b6f71cfac7b9afae5
|
/Approx.py
|
e0baca63c22ff280d1dc4467705a0795a9d6d7b1
|
[
"MIT"
] |
permissive
|
MUYANGGUO/MVC-6140-Project
|
205bca63982c5b01018c8da236d081168fcf9505
|
64ddac497802fd27b8895e264db44c552968d642
|
refs/heads/main
| 2023-01-29T10:54:03.528061
| 2020-12-05T04:02:00
| 2020-12-05T04:02:00
| 310,880,048
| 1
| 1
|
MIT
| 2020-11-22T22:42:51
| 2020-11-07T15:58:04
|
Python
|
UTF-8
|
Python
| false
| false
| 893
|
py
|
import time
import sys
sys.setrecursionlimit(100000)
def Approx(graph, cutoff_time, seed):
start = time.time()
print('Approximation method\n')
visited = set()
num_vertex = len(graph.nodes)
non_leaf = set()
for i in range(1, num_vertex + 1):
if i not in visited:
dfs(i, visited, num_vertex, graph, non_leaf)
runtime = time.time() - start
trace = [str(runtime) + ',' + str(len(non_leaf))]
if cutoff_time < runtime:
return set(), []
else:
return non_leaf, trace
def dfs(i, visited: set, number_vertex: int, graph, non_leaf):
visited.add(i)
if not graph.has_node(i):
non_leaf.add(i)
return
for neighbor in list(graph[i]):
if neighbor not in visited:
visited.add(neighbor)
non_leaf.add(i)
dfs(neighbor, visited, number_vertex, graph, non_leaf)
|
[
"yjiao47@gatech.edu"
] |
yjiao47@gatech.edu
|
164c0db846e167a5197716060bd51aa999e4c08c
|
5f18c3201b7f411ea300ee96e302b8cbda2c44a6
|
/braille/BrailleFile.py
|
c1336e19d6a4d1c614f4425e0f6db1ab008b3cbd
|
[
"MIT"
] |
permissive
|
sulealothman/arbraille
|
2204fac70e30bdc58f721e84b0748519754ad55b
|
80cf33bfc0368814778c76e3fd82da27042235f1
|
refs/heads/master
| 2023-01-19T18:07:15.859978
| 2020-11-29T16:36:08
| 2020-11-29T16:36:08
| 316,991,437
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
class BrailleFile:
def read(self, filePath=""):
path = filePath if(filePath.strip() != "") else input("Enter file path for translate : ")
path = path if(path.find('\\')) else path.replace('\\', '\\\\')
with open(path, "r", encoding='utf-8') as f: content = f.read()
return content
def write(self, content, filePath=""):
path = filePath if(filePath.strip() != "") else input("Enter output file path for translate : ")
path = path if(path.find('\\')) else path.replace('\\', '\\\\')
with open(path, 'w', encoding='utf-8') as w: w.write(content)
return content
|
[
"sothman@mte.sa"
] |
sothman@mte.sa
|
830e866737f1edd33ffd9bb50f6062cbdd7a64d7
|
d626c65a4d4990c733b72f9d93a8365c789cd000
|
/RFID.py
|
6ee8bc3e1abbc092168240877398383f814c1d2a
|
[] |
no_license
|
Alex5200/Raspi-rfid-temp
|
486b6c77ed285a2d990d6b47710400dba27f9440
|
d4a072b9395d7ae9599855c06ee1b23b7ab5ec4e
|
refs/heads/main
| 2023-02-28T21:56:31.546140
| 2021-02-13T17:13:10
| 2021-02-13T17:13:10
| 333,729,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
#!/usr/bin/env python3.5
#-- coding: utf-8 --
import RPi.GPIO as GPIO #Importe la bibliothèque pour contrôler les GPIOs
from pirc522 import RFID
import time
GPIO.setmode(GPIO.BOARD) #Définit le mode de numérotation (Board)
GPIO.setwarnings(False) #On désactive les messages d'alerte
rc522 = RFID() #On instancie la lib
print('En attente dun badge (pour quitter, Ctrl + c): ') #On affiche un message demandant à l'utilisateur de passer son badge
#On va faire une boucle infinie pour lire en boucle
while True :
rc522.wait_for_tag() #On attnd qu'une puce RFID passe à portée
(error, tag_type) = rc522.request() #Quand une puce a été lue, on récupère ses infos
if not error : #Si on a pas d'erreur
(error, uid) = rc522.anticoll() #On nettoie les possibles collisions, ça arrive si plusieurs cartes passent en même temps
if not error : #Si on a réussi à nettoyer
print('Vous avez passé le badge avec l id : {}.format(uid)') #On affiche l'identifiant unique du badge RFID
time.sleep(1) #On attend 1 seconde pour ne pas lire le tag des centaines de fois en quelques milli-secondes
|
[
"noreply@github.com"
] |
Alex5200.noreply@github.com
|
c6e04bd7d9ecc645f515936cf297d14781787c56
|
d748e95c78bd3d259fd977a6496591abb70c9bf8
|
/main.py
|
053a9502d6c2c60c97b5766c339164d831d95f0f
|
[] |
no_license
|
Martikos/spiderman
|
ff36ea0c25490c2f620dc72a3588c8244d592317
|
6be33cb0762c2891fd7e90e201c3720b5fdfffa2
|
refs/heads/master
| 2020-04-22T00:14:07.050561
| 2013-12-02T15:38:08
| 2013-12-02T15:38:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,686
|
py
|
from datetime import datetime
import json
import operator
import requests
import sys
from collections import deque
from tornado.httpclient import AsyncHTTPClient
from tornado.web import asynchronous
import tornado.ioloop
import tornado.web
# globals
network = {}
n_videos = 100
video_requests = []
video_queue = deque(video_requests)
# parameters
max_results = 25
pages = 10
start_date = 0
end_date = 0
completed = False
new_request = False
n_requests = 0
new_search_key = str(datetime.now())
""" Available functions:
all posted objects should have the properties:
- 'function': specifies the function to be performed.
- 'input': input object
- 'videos': number of videos to return
expand: given a seed set of videos, expand to related videos
'search' object example:
{
'function': 'search',
'input': {
'seed': ['adidas', 'basketball', 'shoes']
}
'count': 1000
}
expand object example:
{
'function': 'expand',
'input': {
'seed': ['1928398123', '1289371923', '8912739172']
}
'count': 1000
}
"""
def videos_to_string(net):
videos = sorted(net.iteritems(), key=operator.itemgetter(1))
import json
ll = []
for video, count in videos:
ll.append({
'id': video,
'relevancy': count
})
return str(json.dumps(ll))
def related_search(response, client, search_key):
global n_requests
n_requests -= 1
global new_search_key
if len(network) < n_videos:
related_result = json.loads(response.body)
video_ids = [r['id'] for r in related_result['data']['items']]
for index, r in enumerate(related_result['data']['items']):
sys.stdout.write('\b')
sys.stdout.flush()
sys.stdout.write(' %d videos found\r' % len(network))
if r['id'] in network:
network[r['id']] += 1
elif len(network) < n_videos:
network[r['id']] = 1
http_client = AsyncHTTPClient()
cb = lambda x: related_search(x, client, search_key)
http_client.fetch("http://gdata.youtube.com/feeds/api/videos/{}/related?alt=jsonc&v=2".format(r['id']),
callback=cb)
n_requests += 1
elif search_key == new_search_key:
global completed
if completed == False:
completed = True
try:
client.write(videos_to_string(network))
client.finish()
except:
client.finish()
pass
sys.stdout.write('\b')
sys.stdout.flush()
sys.stdout.write(' %d videos found\r' % len(network))
return
else:
pass
def search_related(video_id, client):
network = {}
global n_requests
global new_search_key
new_search_key = str(datetime.now())
http_client = AsyncHTTPClient()
cb = lambda x: related_search(x, client, new_search_key)
http_client.fetch("http://gdata.youtube.com/feeds/api/videos/{}/related?alt=jsonc&v=2".format(video_id),
callback=cb)
n_requests += 1
def search_related(video_ids, client):
network = {}
global n_requests
global new_search_key
new_search_key = str(datetime.now())
print "Launching The Spiderman on {0}.".format(search_query)
done = False
for video_id in video_ids:
sys.stdout.write('\b')
sys.stdout.flush()
sys.stdout.write(' %d\r' % len(network))
http_client.fetch("http://gdata.youtube.com/feeds/api/videos/{}/related?alt=jsonc&v=2".format(video_id),
callback=cb)
http_client = AsyncHTTPClient()
cb = lambda x: related_search(x, client, new_search_key)
http_client.fetch(request_url, callback=cb)
n_requests += 1
def search(keywords, client):
network = {}
global n_requests
global new_search_key
new_search_key = str(datetime.now())
search_query = "+".join(keywords)
search_query = search_query.replace(' ', '+')
print "Launching The Spiderman on {0}.".format(search_query)
done = False
for start_index in range(1, pages):
sys.stdout.write('\b')
sys.stdout.flush()
sys.stdout.write(' %d\r' % len(network))
request_url = "http://gdata.youtube.com/feeds/api/videos?q={0}&orderby=relevance&alt=jsonc&v=2&max-results={1}&start-index={2}".format(
search_query,
max_results,
start_index*25)
http_client = AsyncHTTPClient()
cb = lambda x: related_search(x, client, new_search_key)
http_client.fetch(request_url, callback=cb)
n_requests += 1
def on_fetch(response):
if len(network) < n_videos:
related_result = json.loads(response.body)
video_ids = [r['id'] for r in related_result['data']['items']]
spider(video_ids)
else:
return
def spider(feed):
if len(network) < n_videos:
videos = feed
related = [0] * 25 * (len(videos)-1)
for (i, video_id) in enumerate(videos):
try:
network[video_id] += 1
except:
network[video_id] = 0
http_client = AsyncHTTPClient()
try:
http_client.fetch("http://gdata.youtube.com/feeds/api/videos/{}/related?alt=jsonc&v=2".format(video_id),
callback=on_fetch)
except:
pass
else:
return
class ResultsHandler(tornado.web.RequestHandler):
def get(self):
done = False
import operator
videos = sorted(network.iteritems(), key=operator.itemgetter(1))
print "first 50 elements:"
print videos[:50]
print "------------------"
print "last 50 elements:"
print videos[len(videos)-50:]
new_file = open('videos.dict', 'w+')
for entry in videos:
new_file.write(entry[0] + ':' + str(entry[1]) + '\n')
new_file.close( )
import json
# return json.dumps(videos)
return_str = ''
for v, count in videos:
new_str = "http://www.youtube.com/video/{}".format(v)
return_str += new_str + '\n'
self.write(return_str)
class MainHandler(tornado.web.RequestHandler):
@asynchronous
def post(self):
from spiderman import *
import json
post_request = json.loads(self.request.body)
print "POST request"
# parse POST request
new_post_request = {
'function': 'search',
'input': {
'seed': ['beats', 'dr dre', 'headphones', 'noise cancelling']
},
'count': 100
}
function_name = post_request['function']
print function_name
handler = Spiderman.get_handler(function_name)
global tornado_loop
spiderman = handler(self, post_request, tornado_loop).search()
return
def get(self):
self.render('index.html', some_var="Hey Marc!")
application = tornado.web.Application([
(r"/", MainHandler),
(r"/results", ResultsHandler)
])
def handle_request(response):
if response.error:
print "Error:", response.error
else:
print response.body
tornado_loop = []
if __name__ == "__main__":
import os
application.listen(os.environ.get("PORT", 5000))
tornado_loop = tornado.ioloop.IOLoop.instance().start()
|
[
"marc.m.adam@gmail.com"
] |
marc.m.adam@gmail.com
|
610eee5ef0c52adcdcf9e1dd3953f8605cce1f25
|
f043fee89c0e2030386adcebb74d08164b7b974f
|
/reagent/ope/estimators/estimator.py
|
f53db8aecd9b5a794e2b422655c7546d20379046
|
[
"BSD-3-Clause"
] |
permissive
|
IronOnet/ReAgent
|
c2d22e7dc63eaf61e0a50e9343110c6df79a9b40
|
67434f458cde1f2c946237e866a73392279a7ede
|
refs/heads/master
| 2023-04-06T17:31:59.751700
| 2021-04-12T21:56:19
| 2021-04-12T21:57:05
| 357,700,053
| 2
| 0
|
BSD-3-Clause
| 2021-04-13T22:04:09
| 2021-04-13T22:04:09
| null |
UTF-8
|
Python
| false
| false
| 9,051
|
py
|
#!/usr/bin/env python3
import logging
import math
import pickle
import tempfile
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from multiprocessing import Pool
from typing import Iterable, List, Mapping, Optional, Tuple, Union
import torch
from reagent.evaluation.cpe import bootstrapped_std_error_of_mean
from torch import Tensor
logger = logging.getLogger(__name__)
SCORE_THRESHOLD = 1e-6
class ResultDiffs:
"""
Statistics for differences, e.g., estimates vs ground truth
"""
def __init__(self, diffs: Tensor):
self._diffs = diffs
self._rmse = None
self._bias = None
self._variance = None
@property
def rmse(self) -> Tensor:
if self._rmse is None:
self._rmse = (self._diffs ** 2.0).mean().sqrt()
return self._rmse
@property
def bias(self) -> Tensor:
if self._bias is None:
self._bias = self._diffs.mean()
return self._bias
@property
def variance(self) -> Tensor:
if self._variance is None:
# pyre-fixme[16]: `Tensor` has no attribute `var`.
self._variance = self._diffs.var()
return self._variance
def __repr__(self):
return (
f"samples={self._diffs.shape[0]}, rmse={self.rmse.item()}"
f", bias={self.bias}, variance={self.variance}"
)
@dataclass(frozen=True)
class EstimatorResult:
log_reward: float
estimated_reward: float
ground_truth_reward: Optional[float] = 0.0
estimated_weight: float = 1.0
estimated_reward_normalized: Optional[float] = None
estimated_reward_std_error: Optional[float] = None
estimated_reward_normalized_std_error: Optional[float] = None
@dataclass
class EstimatorResults:
"""
Estimator results
"""
results: List[EstimatorResult] = field(default_factory=list)
device = None
def append(self, result: EstimatorResult):
"""Append a data point
Args:
result: result from an experimental run
"""
er = result.estimated_reward
if math.isnan(er) or math.isinf(er):
logging.warning(f" Invalid estimate: {er}")
return
lr = result.log_reward
gr = (
result.ground_truth_reward
if result.ground_truth_reward is not None
else 0.0
)
logging.info(
f" Append estimate [{len(self.results) + 1}]: "
f"log={lr}, estimated={er}, ground_truth={gr}"
)
self.results.append(
EstimatorResult(
log_reward=result.log_reward,
estimated_reward=result.estimated_reward,
ground_truth_reward=gr,
estimated_weight=result.estimated_weight,
)
)
def report(self):
ert = torch.tensor(
[res.estimated_reward for res in self.results],
dtype=torch.double,
device=self.device,
)
lrt = torch.tensor(
[res.log_reward for res in self.results],
dtype=torch.double,
device=self.device,
)
grt = torch.tensor(
[
res.ground_truth_reward if res.ground_truth_reward is not None else 0.0
for res in self.results
],
dtype=torch.double,
device=self.device,
)
self._estimated_log_diff = ResultDiffs(ert - lrt)
self._estimated_ground_truth_diff = ResultDiffs(ert - grt)
return (
lrt.mean().item(),
ert.mean().item(),
grt.mean().item(),
ResultDiffs(ert - grt),
ResultDiffs(ert - lrt),
torch.tensor([float(res.estimated_weight) for res in self.results])
.mean()
.item(),
)
@dataclass(frozen=True)
class EstimatorSampleResult:
log_reward: float
target_reward: float
ground_truth_reward: float
weight: float
def __repr__(self):
return (
f"EstimatorSampleResult(log={self.log_reward}"
f",tgt={self.target_reward},gt={self.ground_truth_reward}"
f",wgt={self.weight}"
)
class Estimator(ABC):
"""
Estimator interface
"""
def __init__(self, device=None):
self._device = device
def _compute_metric_data(
self, tgt_rewards: Tensor, logged_score: float
) -> Tuple[float, float, float]:
"""
Given a sequence of scores, normalizes the target score by the average logged score
and computes the standard error of the target score. Normalizing by the logged score
can provide a better metric to compare models against.
"""
if len(tgt_rewards.shape) > 1:
assert tgt_rewards.shape[1] == 1
tgt_rewards = tgt_rewards.reshape((tgt_rewards.shape[0],))
if logged_score < SCORE_THRESHOLD:
normalizer = 0.0
else:
normalizer = 1.0 / logged_score
std_err = bootstrapped_std_error_of_mean(tgt_rewards)
return (
torch.mean(tgt_rewards).item() * normalizer,
std_err,
std_err * normalizer,
)
@abstractmethod
def evaluate(
self, input, **kwargs
) -> Optional[Union[EstimatorResult, EstimatorResults]]:
pass
def __repr__(self):
return f"{self.__class__.__name__}(device({self._device}))"
def run_evaluation(
file_name: str,
) -> Optional[Mapping[str, Iterable[EstimatorResults]]]:
logger.info(f"received filename {file_name}")
try:
with open(file_name, "rb") as fp:
estimators, inputs = pickle.load(fp)
except Exception as err:
return None
results = {}
for estimator in estimators:
estimator_name = repr(estimator)
estimator_results = []
for input in inputs:
try:
estimator_results.append(estimator.evaluate(input))
except Exception as err:
logger.error(f"{estimator_name} error {err}")
results[repr(estimator)] = estimator_results
return results
class Evaluator:
"""
Multiprocessing evaluator
"""
def __init__(
self,
experiments: Iterable[Tuple[Iterable[Estimator], object]],
max_num_workers: int,
):
"""
Args:
estimators: estimators to be evaluated
experiments:
max_num_workers: <= 0 no multiprocessing
otherwise create max_num_workers processes
"""
self._experiments = experiments
self._tasks = None
if max_num_workers > 0:
self._tasks = [[] for _ in range(max_num_workers)]
for i, experiment in enumerate(experiments):
self._tasks[i % max_num_workers].append(experiment)
def evaluate(self) -> Mapping[str, EstimatorResults]:
results = {}
if self._tasks is None:
for estimators, input in self._experiments:
for estimator in estimators:
estimator_name = repr(estimator)
if estimator_name in results:
result = results[estimator_name]
else:
result = EstimatorResults()
results[estimator_name] = result
result.append(estimator.evaluate(input))
else:
tmp_files = []
tmp_file_names = []
for task in self._tasks:
fp = tempfile.NamedTemporaryFile()
pickle.dump(task, fp, protocol=pickle.HIGHEST_PROTOCOL)
fp.flush()
tmp_files.append(fp)
tmp_file_names.append(fp.name)
with Pool(len(tmp_file_names)) as pool:
evaluation_results = pool.map(run_evaluation, tmp_file_names)
for tmp_file in tmp_files:
tmp_file.close()
for evaluation_result in evaluation_results:
if evaluation_result is None:
continue
for estimator_name, estimator_results in evaluation_result.items():
if estimator_name in results:
result = results[estimator_name]
else:
result = EstimatorResults()
results[estimator_name] = result
for estimator_result in estimator_results:
result.append(estimator_result)
return results
@staticmethod
def report_results(results: Mapping[str, EstimatorResults]):
for name, result in results.items():
log_r, tgt_r, gt_r, tgt_gt, tgt_log, weight = result.report()
print(
f"{name} rewards: log_reward{log_r} tgt_reward[{tgt_r}] gt_reward[{gt_r}]"
f", diffs: tgt-gt[{tgt_gt}] tgt-log[{tgt_log}]",
flush=True,
)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
5ef3284f1486ca9ee29e8dec423629dcea7fd80f
|
8b40cbc0b0f61d79f02e91ae77e495ba3b261f41
|
/docs/src/main/sphinx/conf.py
|
25dec8253fa455ff834174bf74d22741ab87e212
|
[
"Apache-2.0"
] |
permissive
|
wutao0914/Addax
|
bbf94bf41747dd6c2a6e23cb00bd073d2e4a0873
|
2a07616997a8196450ccb06aec8470bfc55aef0b
|
refs/heads/master
| 2023-06-29T17:45:38.094235
| 2021-08-03T07:43:56
| 2021-08-03T07:43:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,549
|
py
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Presto documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
import os
import sys
import xml.dom.minidom
try:
sys.dont_write_bytecode = True
except:
pass
sys.path.insert(0, os.path.abspath('ext'))
def child_node(node, name):
for i in node.childNodes:
if (i.nodeType == i.ELEMENT_NODE) and (i.tagName == name):
return i
return None
def node_text(node):
return node.childNodes[0].data
def maven_version(pom):
dom = xml.dom.minidom.parse(pom)
project = dom.childNodes[0]
version = child_node(project, 'version')
if version:
return node_text(version)
parent = child_node(project, 'parent')
version = child_node(parent, 'version')
return node_text(version)
def get_version():
version = os.environ.get('ADDAX_VERSION', '').strip()
return version or maven_version('../../../pom.xml')
# -- General configuration -----------------------------------------------------
needs_sphinx = '2.0'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'recommonmark',
'sphinx_markdown_tables',
]
# templates_path = ['templates']
source_suffix = ['.rst','.md']
# source_parsers = {
# '.md': CommonMarkParser,
# }
master_doc = 'index'
project = u'Addax'
version = get_version()
release = version
exclude_patterns = ['_build']
highlight_language = 'sql'
# default_role = 'backquote'
# -- Options for HTML output ---------------------------------------------------
# html_theme_path = ['themes']
html_theme = 'sphinx_rtd_theme'
html_title = '%s %s Documentation' % (project, release)
html_logo = 'images/addax-logo.png'
html_add_permalinks = '#'
html_show_copyright = False
html_show_sphinx = False
# html_sidebars = {
# "**": ['logo-text.html', 'globaltoc.html', 'localtoc.html', 'searchbox.html']
# }
|
[
"wgzhao@gmail.com"
] |
wgzhao@gmail.com
|
eea391d7180a0c41cb83d97792b66fc90728bfd0
|
6cceeeea6fb4f103ef01b31881aab56982290af1
|
/supervised_learning/0x03-optimization/11-learning_rate_decay.py
|
f5a6728c6040a52b8f936b824da447b117ba7c24
|
[] |
no_license
|
KatyaKalache/holbertonschool-machine_learning
|
7be17a2abf5873f2eb0e82804074ef388eb8cda6
|
7a23ec62d7deee6260067c8125f0e32ac9ef9f0e
|
refs/heads/master
| 2020-04-11T10:46:14.714349
| 2020-04-10T08:35:21
| 2020-04-10T08:35:21
| 161,725,673
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
#!/usr/bin/env python3
"""
Updates the learning rate using inverse time decay in numpy
"""
import tensorflow as tf
def learning_rate_decay(alpha, decay_rate, global_step, decay_step):
"""
Returns: the updated value for alpha
"""
alpha = 1 / (1+decay_rate * global_step) * alpha
return alpha
|
[
"katya@kalache.fr"
] |
katya@kalache.fr
|
e14fa0f754db6caa0b959315e215e069d526c9a5
|
1470e5ae228a9e6758dfa2f9b658e09004f70b26
|
/order/models.py
|
88c68b18f760d1815ff8b2f40345b714a1f0884c
|
[] |
no_license
|
stephenmom/supermarket
|
927c0d14c4a4715f6711d81405c47b5dbe1a3417
|
b35e999872d4cadbc9a013ede5aaadb47513e755
|
refs/heads/master
| 2020-04-07T04:35:22.338232
| 2018-12-01T08:24:26
| 2018-12-01T08:24:26
| 158,059,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,422
|
py
|
from django.db import models
# Create your models here.
class Payment(models.Model):
"""
支付方式
"""
pay_name = models.CharField(verbose_name='支付方式',
max_length=20
)
payment_add_time = models.DateTimeField(auto_now_add=True, verbose_name="支付方式添加时间")
payment_edit_time = models.DateTimeField(auto_now=True, verbose_name="支付方式修改时间")
payment_status = models.BooleanField(default=False, verbose_name="支付方式删除状态")
class Meta:
verbose_name = "支付方式"
verbose_name_plural = verbose_name
def __str__(self):
return self.pay_name
class Transport(models.Model):
"""
名称
价格
"""
name = models.CharField(verbose_name='配送方式',
max_length=20
)
money = models.DecimalField(verbose_name='金额',
max_digits=9,
decimal_places=2,
default=0
)
transport_add_time = models.DateTimeField(auto_now_add=True, verbose_name="配送方式添加时间")
transport_edit_time = models.DateTimeField(auto_now=True, verbose_name="配送方式修改时间")
transport_status = models.BooleanField(default=False, verbose_name="配送方式删除状态")
class Meta:
verbose_name = "配送方式"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class OderInfo(models.Model):
"""
订单信息表
ID
订单编号
订单金额
用户ID
收货人姓名
收货人电话
订单地址
订单状态
运输方式
付款方式
实付金额
添加时间
修改时间
是否删除
"""
ORDER_STATUS = (
(0, "待付款"),
(1, "待发货"),
(2, "已发货"),
(3, "完成"),
(4, "已评价"),
(5, "申请退款"),
(6, "已退款"),
(7, "取消订单")
)
order_number = models.CharField(max_length=64, verbose_name="订单编号", unique=True)
order_money = models.DecimalField(max_digits=9, decimal_places=2, verbose_name="订单总价格", default=0)
user_id = models.ForeignKey(to="user_info.Users", verbose_name="用户id")
order_user_name = models.CharField(max_length=32, verbose_name="收货人")
order_user_phone = models.CharField(max_length=16, verbose_name="收货人电话")
order_address = models.CharField(max_length=50, verbose_name="订单地址")
order_status_now = models.IntegerField(choices=ORDER_STATUS, verbose_name='订单状态', default=0)
order_transport = models.CharField(max_length=50, verbose_name="运输方式", null=True, blank=True)
order_transport_price = models.DecimalField(max_digits=9, decimal_places=2, verbose_name="运输价格")
order_payment = models.ForeignKey(to="Payment", verbose_name="付款方式", null=True, blank=True)
really_pay_money = models.DecimalField(max_digits=9, decimal_places=2, verbose_name="实付金额", default=0)
ORDER_add_time = models.DateTimeField(auto_now_add=True, verbose_name="订单信息添加时间")
ORDER_edit_time = models.DateTimeField(auto_now=True, verbose_name="订单信息修改时间")
ORDER_status = models.BooleanField(default=False, verbose_name="订单信息删除状态")
class Meta:
verbose_name = "订单信息管理"
verbose_name_plural = verbose_name
def __str__(self):
return self.order_number
class OrderGoods(models.Model):
"""
订单商品表
订单ID
商品SKU ID
商品数量
商品价格
"""
order = models.ForeignKey(to="OderInfo", verbose_name="订单ID")
goods_sku = models.ForeignKey(to="product.ProSKU", verbose_name="订单商品的SKUID")
count = models.IntegerField(verbose_name="订单商品的数量")
price = models.DecimalField(verbose_name="订单商品的价格", max_digits=9, decimal_places=2)
def __str__(self):
return self.order.order_number
class Meta:
db_table = "order_goods"
verbose_name = "订单商品管理"
verbose_name_plural = verbose_name
|
[
"378399539@qq.com"
] |
378399539@qq.com
|
6067bbd37e8a224f7631ed115119aaf1aadcdd13
|
b578620339b4ec1512aa7b5600b2743979eb6277
|
/order/admin.py
|
f624eef469c78013ec2bdee4d187a9fb8a9fbb02
|
[] |
no_license
|
yurabysaha/de_market
|
4c8e62e075d8105b74192b413a38fad05a3d91df
|
7f62245d9016f793ba22444273ea30304d279f88
|
refs/heads/master
| 2022-12-14T14:48:24.784171
| 2019-07-05T11:40:29
| 2019-07-05T11:40:29
| 131,822,227
| 0
| 1
| null | 2022-12-08T02:04:12
| 2018-05-02T08:36:41
|
HTML
|
UTF-8
|
Python
| false
| false
| 572
|
py
|
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from order.models import Order
class ItemInline(admin.TabularInline):
model = Order.items.through
extra = 0
model._meta.verbose_name_plural = _("Order Items")
class OrderAdmin(admin.ModelAdmin):
"""Add items to Order in Admin"""
inlines = [ItemInline]
list_filter = ('status',)
search_fields = ('pk',)
list_display = ('__str__', 'total', 'status', 'payment_method')
exclude = ('items', 'user_session')
admin.site.register(Order, OrderAdmin)
|
[
"yurabysaha@gmail.com"
] |
yurabysaha@gmail.com
|
1a769f9ffd96e7be46b22935ea0efd109b45e026
|
508100dd49629021d4dcc7c761ee64fac5e0210b
|
/watson/framework/debug/panels/__init__.py
|
485157ccb8a8baa9d6aa19a22f085fda0392b9d2
|
[
"BSD-2-Clause"
] |
permissive
|
watsonpy/watson-framework
|
e3f4c92ea9c35e789365a8a007bb883ad5dee928
|
ffe157cb3fe24366ee55869d4260cce778012b4a
|
refs/heads/master
| 2021-05-24T02:48:19.753723
| 2019-10-03T04:28:39
| 2019-10-03T04:28:39
| 16,093,806
| 69
| 8
|
BSD-3-Clause
| 2021-03-29T20:36:27
| 2014-01-21T05:39:46
|
Python
|
UTF-8
|
Python
| false
| false
| 411
|
py
|
# -*- coding: utf-8 -*-
from watson.framework.debug.panels.application import Panel as Application # noqa
from watson.framework.debug.panels.framework import Panel as Framework # noqa
from watson.framework.debug.panels.profile import Panel as Profile # noqa
from watson.framework.debug.panels.request import Panel as Request # noqa
from watson.framework.debug.panels.logging import Panel as Logging # noqa
|
[
"simon.coulton@gmail.com"
] |
simon.coulton@gmail.com
|
05df224ac370a076e23826a50a19a9f620729ab2
|
cfb6883e85b799756440411c13c6c8044490dc52
|
/kaggle/safe_driver/safe_driver_lgb_FM.py
|
fa661e57f050fd41aedd9a1602a14cc32c0ae8b8
|
[] |
no_license
|
WonderAndMaps/CodeStorage
|
28d1b4ff7cbcd814402dcc60b6982d59a17c8d88
|
9a21cbe586c2c91dc6ccde788bf32e78f22e86a0
|
refs/heads/master
| 2020-03-22T10:29:51.864519
| 2019-04-22T19:31:12
| 2019-04-22T19:31:12
| 139,907,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,226
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 19 18:03:00 2017
@author: dell
"""
import lightgbm as lgb
from lightgbm.sklearn import LGBMClassifier
import os
import pandas as pd
from sklearn import metrics
import datetime
os.chdir('C:\\Users\\dell\\Desktop\\Safe driver')
train = pd.read_csv("train.csv")
cat_cols = [col for col in train.columns if '_cat' in col]
#one-hot
train = pd.get_dummies(train, columns=cat_cols)
predictors = [x for x in train.columns if x not in ['target', 'id']]
def lgbfit(alg, dtrain, predictors, useTrainCV=True, cv_folds=5, early_stopping_rounds=50, dtest=None):
starttime = datetime.datetime.now()
if useTrainCV:
lgb_param = alg.get_params()
ltrain = lgb.Dataset(dtrain[predictors].values, label=dtrain['target'].values)
# ltest = lgb.Dataset(dtest[predictors].values)
cvresult = lgb.cv(lgb_param, ltrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
early_stopping_rounds=early_stopping_rounds, verbose_eval=False, metrics='auc')
alg.set_params(n_estimators=len(cvresult['auc-mean']))
print("cv score:", cvresult['auc-mean'][-1])
#fit
alg.fit(dtrain[predictors], dtrain['target'], eval_metric='auc')
#prediction on train set
dtrain_predictions = alg.predict(dtrain[predictors])
dtrain_predprob = alg.predict_proba(dtrain[predictors])[:,1]
endtime = datetime.datetime.now()
#output
print("accuracy: ", metrics.accuracy_score(dtrain['target'].values, dtrain_predictions))
print("AUC score:", metrics.roc_auc_score(dtrain['target'], dtrain_predprob))
print("time spent: ", (endtime - starttime).seconds, "s")
lgb1 = LGBMClassifier(
boosting_type = 'gbdt',
learning_rate = 0.1,
n_estimators = 1000,
max_depth = 5,
min_child_weight = 1,
subsample = 0.8,
colsample_bytree = 0.8,
objective = 'binary',
n_jobs = 4,
random_state = 66)
lgbfit(lgb1, train, predictors, useTrainCV=True)
#predicted leaves as new features
predLeaf = pd.DataFrame(lgb1.apply(train[predictors]))
predLeaf = pd.get_dummies(predLeaf, columns=predLeaf.columns)
predLeaf.shape
#FM
|
[
"yfu@bazean.com"
] |
yfu@bazean.com
|
fc7948279f7238a48bcf8b27b3e480e488b08328
|
c4dfd3aacb7449b574bc8a337d4b449b84a10250
|
/4_kafka_connect_rest_proxy/exercise4.3.py
|
1a4bcd0ff4de0d38bc10ce176dd955239cebb6a0
|
[
"MIT"
] |
permissive
|
seoruosa/streaming-data-nanodegree
|
1a4c7549857e9dfcac09cf10e381c6e7ff5ed7fd
|
14961bc42c626e74ac23cc94f69e25eab39a2da1
|
refs/heads/master
| 2023-05-28T11:16:47.174927
| 2020-03-26T04:19:21
| 2020-03-26T04:19:21
| 231,152,564
| 0
| 0
|
MIT
| 2023-08-14T22:07:44
| 2019-12-31T22:27:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,662
|
py
|
# Please complete the TODO items in this code
import asyncio
import json
import requests
KAFKA_CONNECT_URL = "http://localhost:8083/connectors"
CONNECTOR_NAME = "jdbc-connector"
def configure_connector():
"""Calls Kafka Connect to create the Connector"""
print("creating or updating kafka connect connector...")
rest_method = requests.post
resp = requests.get(f"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}")
if resp.status_code == 200:
return
#
# TODO: Complete the Kafka Connect Config below for a JDBC source connector.
# You should whitelist the `clicks` table, use incrementing mode and the
# incrementing column name should be id.
#
# See: https://docs.confluent.io/current/connect/references/restapi.html
# See: https://docs.confluent.io/current/connect/kafka-connect-jdbc/source-connector/source_config_options.html
#
resp = rest_method(
KAFKA_CONNECT_URL,
headers={"Content-Type": "application/json"},
data=json.dumps(
{
"name": "clicks-jdbc", # TODO
"config": {
"connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector", # TODO
"topic.prefix": "exercise3-", # TODO
"mode": "incrementing", # TODO
"incrementing.column.name": "id", # TODO
"table.whitelist": "clicks", # TODO
"tasks.max": 1,
"connection.url": "jdbc:postgresql://localhost:5432/classroom",
"connection.user": "root",
"key.converter": "org.apache.kafka.connect.json.JsonConverter",
"key.converter.schemas.enable": "false",
"value.converter": "org.apache.kafka.connect.json.JsonConverter",
"value.converter.schemas.enable": "false",
},
}
),
)
# psql classroom
# kafka-topics --list --zookeeper localhost:2181
# kafka-console-consumer --topic exercise3-clicks --bootstrap-server localhost:9092 --from-beginning
# tail -f -n 10 /var/log/journal/confluent-kafka-connect.service.log
# https://www.postgresqltutorial.com/postgresql-administration/psql-commands/
# Ensure a healthy response was given
try:
resp.raise_for_status()
except:
print(f"failed creating connector: {json.dumps(resp.json(), indent=2)}")
exit(1)
print("connector created successfully.")
print("Use kafka-console-consumer and kafka-topics to see data!")
if __name__ == "__main__":
configure_connector()
|
[
"thiago.giachetto@gmail.com"
] |
thiago.giachetto@gmail.com
|
e340f100361951a498bb07f1744c08dc926eb9c5
|
9929ba720faf432a5bf3f5cc51dc9f429c24cb84
|
/QUANTTOOLS/Market/MarketTools/TradingTools/trading.py
|
6a435ccfa9a49189aa85ab900c650755f61fb1e8
|
[] |
no_license
|
chaopaoo12/QuantTools
|
45fb344fc085bd7a40d94f646d0982d6b93db1a8
|
2bb1c5ad6aab3d454cfe32b6e6c86107992bed0c
|
refs/heads/master
| 2023-08-18T04:03:11.944128
| 2023-08-13T10:58:49
| 2023-08-13T10:58:49
| 174,860,433
| 9
| 11
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,224
|
py
|
from QUANTAXIS.QAUtil import QA_util_log_info, QA_util_get_pre_trade_date
from QUANTTOOLS.Message.message_func.wechat import send_actionnotice
from QUANTTOOLS.Trader.account_manage.base_func.Client import get_Client,check_Client
from QUANTTOOLS.Trader.account_manage.TradAction.BUY import BUY
from QUANTTOOLS.Trader.account_manage.TradAction.SELL import SELL
from QUANTTOOLS.Trader.account_manage.TradAction.HOLD import HOLD
from QUANTTOOLS.Market.MarketTools.TradingTools.BuildTradingFrame import build
from QUANTTOOLS.QAStockETL.QAFetch.QATdx import QA_fetch_get_stock_realtm_bid
from QUANTTOOLS.QAStockETL.QAFetch.QAQuery import QA_fetch_stock_name
from QUANTTOOLS.Model.FactorTools.QuantMk import get_quant_data_hour, get_index_quant_hour
from QUANTTOOLS.Trader.account_manage.base_func.Client import get_UseCapital, get_StockPos, get_hold
import time
import datetime
def open_control(trading_date):
time_contrl_bf("09:30:00")
QA_util_log_info('##JOB Now Start Trading ==== {}'.format(str(trading_date)), ui_log = None)
def close_control(strategy_id, trading_date):
time_contrl_af("15:00:00")
QA_util_log_info('##JOB Trading Finished ==================== {}'.format(trading_date), ui_log=None)
send_actionnotice(strategy_id,'Trading Report:{}'.format(trading_date),'Trading Finished',direction = 'Trading',offset='Finished',volume=None)
def time_contrl_bf(tm_mark):
tm = int(datetime.datetime.now().strftime("%H%M%S"))
while tm <= int(time.strftime("%H%M%S",time.strptime(tm_mark, "%H:%M:%S"))):
time.sleep(15)
tm = int(datetime.datetime.now().strftime("%H%M%S"))
return(tm_mark)
def time_contrl_af(tm_mark):
tm = int(datetime.datetime.now().strftime("%H%M%S"))
while tm >= int(time.strftime("%H%M%S",time.strptime(tm_mark, "%H:%M:%S"))):
time.sleep(15)
tm = int(datetime.datetime.now().strftime("%H%M%S"))
return(tm_mark)
def trade_roboot(target_tar, account, trading_date, percent, strategy_id, type='end', exceptions = None, test = False):
QA_util_log_info('##JOB Now Get Account info ==== {}'.format(str(trading_date)), ui_log = None)
client = get_Client()
QA_util_log_info('##JOB Now Cancel Orders ===== {}'.format(str(trading_date)), ui_log = None)
client.cancel_all(account)
QA_util_log_info(target_tar, ui_log = None)
sub_accounts, frozen, positions, frozen_positions = check_Client(client, account, strategy_id, trading_date, exceptions=exceptions)
account_info = client.get_account(account)
if target_tar is None:
QA_util_log_info('触发清仓 ==================== {}'.format(trading_date), ui_log=None)
send_actionnotice(strategy_id,'触发清仓:{}'.format(trading_date),'触发清仓',direction = 'SELL',offset='SELL',volume=None)
#e = send_trading_message(account, strategy_id, account_info, None, "触发清仓", None, 0, direction = 'SELL', type='MARKET', priceType=4,price=None, client=client)
QA_util_log_info('##JOB Now Build Trading Frame ===== {}'.format(str(trading_date)), ui_log = None)
res = build(target_tar, positions, sub_accounts, percent)
res1 = res
QA_util_log_info(res[['NAME','INDUSTRY','deal','close','目标持股数','股票余额','可用余额','冻结数量']])
send_actionnotice(strategy_id,'交易报告:{}'.format(trading_date),'开始交易',direction = 'HOLD',offset='HOLD',volume=None)
while res.deal.apply(abs).sum() > 0:
QA_util_log_info('##JOB Now Start Trading ===== {}'.format(str(trading_date)), ui_log = None)
QA_util_log_info('##JOB Now Check Timing ===== {}'.format(str(trading_date)), ui_log = None)
tm = int(datetime.datetime.now().strftime("%H%M%S"))
target_ea = int(time.strftime("%H%M%S", time.strptime("09:25:00", "%H:%M:%S")))
target_af = int(time.strftime("%H%M%S", time.strptime("15:00:00", "%H:%M:%S")))
if tm >= target_af:
QA_util_log_info('已过交易时段 {hour} ==================== {date}'.format(hour = tm, date = trading_date), ui_log=None)
send_actionnotice(strategy_id,'交易报告:{}'.format(trading_date),'已过交易时段',direction = 'HOLD',offset='HOLD',volume=None)
if test == False:
break
#if tm >= target_af:
# break
QA_util_log_info('##JOB Now Start Selling ===== {}'.format(str(trading_date)), ui_log = None)
if res[res['deal']<0].shape[0] == 0:
QA_util_log_info('无卖出动作 ==================== {}'.format(trading_date), ui_log=None)
else:
for code in res[res['deal'] < 0].index:
QA_util_log_info('##JOB Now Prepare Selling {code} Info ==== {date}'.format(code = code, date = str(trading_date)), ui_log = None)
target_pos = float(res.loc[code]['目标持股数'])
target = float(res.loc[code]['股票余额'])
name = res.loc[code]['NAME']
industry = res.loc[code]['INDUSTRY']
deal_pos = abs(float(res.loc[code]['deal']))
close = float(res.loc[code]['close'])
QA_util_log_info('##JOB Now Start Selling {code} ==== {date}'.format(code = code, date = str(trading_date)), ui_log = None)
SELL(client, account, strategy_id, account_info,trading_date, code, name, industry, deal_pos, target_pos, target, close, type = type, test = test)
time.sleep(3)
time.sleep(15)
QA_util_log_info('##JOB Now Start Holding ===== {}'.format(str(trading_date)), ui_log = None)
if res[res['deal'] == 0].shape[0] == 0:
QA_util_log_info('无持续持仓动作 ==================== {}'.format(trading_date), ui_log=None)
else:
for code in res[res['deal'] == 0].index:
QA_util_log_info('##JOB Now Prepare Holding {code} Info ===== {date}'.format(code = code, date = str(trading_date)), ui_log = None)
target_pos = float(res.loc[code]['目标持股数'])
target = float(res.loc[code]['股票余额'])
name = res.loc[code]['NAME']
industry = res.loc[code]['INDUSTRY']
deal_pos = abs(float(res.loc[code]['deal']))
close = float(res.loc[code]['close'])
QA_util_log_info('##JOB Now Start Holding {code} ===== {date}'.format(code = code, date = str(trading_date)), ui_log = None)
HOLD(strategy_id, account_info,trading_date, code, name, industry, target_pos, target)
time.sleep(1)
QA_util_log_info('##JOB Now Start Buying ===== {}'.format(str(trading_date)), ui_log = None)
if res[res['deal'] > 0].shape[0] == 0:
QA_util_log_info('无买入动作 ==================== {}'.format(trading_date), ui_log=None)
else:
for code in res[res['deal'] > 0].index:
QA_util_log_info('##JOB Now Prepare Buying {code} Info ===== {date}'.format(code = code, date = str(trading_date)), ui_log = None)
target_pos = float(res.loc[code]['目标持股数'])
target = float(res.loc[code]['股票余额'])
name = res.loc[code]['NAME']
industry = res.loc[code]['INDUSTRY']
deal_pos = abs(float(res.loc[code]['deal']))
close = float(res.loc[code]['close'])
QA_util_log_info('##JOB Now Start Buying {code} ===== {date}'.format(code = code, date = str(trading_date)), ui_log = None)
BUY(client, account, strategy_id, account_info,trading_date, code, name, industry, deal_pos, target_pos, target, close, type, test = test)
time.sleep(3)
time.sleep(30)
if type == 'end':
QA_util_log_info('##JOB Now Cancel Orders ===== {}'.format(str(trading_date)), ui_log = None)
client.cancel_all(account)
QA_util_log_info('##JOB Now Refresh Account Info ==== {}'.format(str(trading_date)), ui_log = None)
sub_accounts, frozen, positions, frozen_positions = check_Client(client, account, strategy_id, trading_date, exceptions=exceptions)
sub_accounts = sub_accounts - frozen
QA_util_log_info('##JOB Now ReBuild Trading Frame ==== {}'.format(str(trading_date)), ui_log = None)
res = build(target_tar, positions, sub_accounts, percent)
elif type == 'morning':
QA_util_log_info('##JOB Now Morning Trading Success ==== {}'.format(str(trading_date)), ui_log = None)
break
else:
QA_util_log_info('##Trading type must in [end, morning] ==== {}'.format(str(trading_date)), ui_log = None)
break
QA_util_log_info('交易完成 ==================== {}'.format(trading_date), ui_log=None)
send_actionnotice(strategy_id,'交易报告:{}'.format(trading_date),'交易完成',direction = 'HOLD',offset='HOLD',volume=None)
return(res1)
def trade_roboot2(target_tar, account, trading_date, percent, strategy_id, type='end', exceptions = None, test = False):
QA_util_log_info('##JOB Now Check Timing ==== {}'.format(str(trading_date)), ui_log = None)
if target_tar is None:
t_list = []
else:
t_list = list(target_tar.index)
tm = datetime.datetime.now().strftime("%H:%M:%S")
morning_begin = "09:30:00"
morning_end = "11:30:00"
afternoon_begin = "13:00:00"
afternoon_end = "15:00:00"
ontm_list = ["10:30:00","11:30:00","14:00:00", "15:00:00"]
marktm_list = ['10:00:00',"10:30:00",'11:00:00',"11:30:00",'13:30:00',"14:00:00",'14:30:00',"15:00:00"]
action_list = ["09:30:00",'10:00:00',"10:30:00",'11:00:00','13:00:00','13:30:00',"14:00:00",'14:30:00']
##init+确定时间
a = marktm_list + [tm]
a.sort()
if a.index(tm) == 0:
mark_tm = '09:30:00'
elif a.index(tm) == len(a)-1:
mark_tm = '15:00:00'
else:
mark_tm = a[a.index(tm)-1]
if mark_tm == '11:30:00':
action_tm='13:00:00'
else:
action_tm=mark_tm
QA_util_log_info('##JOB Now Init Time Mark mark_tm:{}, action_tm:{}'.format(mark_tm, action_tm), ui_log = None)
source_data = None
tm = int(time.strftime("%H%M%S",time.strptime(tm, "%H:%M:%S")))
QA_util_log_info('##JOB Now Start Trading ==== {}'.format(str(trading_date)), ui_log = None)
while tm <= int(time.strftime("%H%M%S",time.strptime(afternoon_end, "%H:%M:%S"))):
QA_util_log_info('##JOB Now Get Account info ==== {}'.format(str(trading_date)), ui_log = None)
client = get_Client()
try:
client.cancel_all(account)
time.sleep(2)
except:
QA_util_log_info('##JOB Cancel Orders Failed==== {}'.format(str(trading_date)), ui_log = None)
sub_accounts, frozen, positions, frozen_positions = check_Client(client, account, strategy_id, trading_date, exceptions=exceptions)
positions = positions[positions['股票余额'] > 0]
account_info = client.get_account(account)
QA_util_log_info('##JOB Now Build Trading Frame ==== {}'.format(str(trading_date)), ui_log = None)
if mark_tm == "09:30:00":
stm = QA_util_get_pre_trade_date(trading_date) + ' ' + '15:00:00'
else:
stm = trading_date + ' ' + mark_tm
QA_util_log_info('##JOB Now Time {} ==== {}'.format(str(mark_tm),str(stm)), ui_log = None)
##分析数据
while tm < int(time.strftime("%H%M%S",time.strptime(mark_tm, "%H:%M:%S"))):
time.sleep(60)
tm = int(datetime.datetime.now().strftime("%H%M%S"))
if tm >= int(time.strftime("%H%M%S",time.strptime(mark_tm, "%H:%M:%S"))):
index = get_index_quant_hour(QA_util_get_pre_trade_date(trading_date,10),trading_date,code=['000001','399001','399005','399006'],type='real').reset_index()
index = index[index.datetime == stm].set_index('code')
if index.loc['000001'].SKDJ_K_30M >= 75 and index.loc['000001'].SKDJ_TR_30M > 0 and index.loc['000001'].SKDJ_K_HR > 75:
QA_util_log_info('##JOB 暂停追高 ==== {}'.format(str(stm)), ui_log = None)
#sell and no buy 高位盘整
buy = True
pass
elif index.loc['000001'].SKDJ_K_30M < 75 or index.loc['000001'].SKDJ_TR_30M > 0:
QA_util_log_info('##JOB 追涨 ==== {}'.format(str(stm)), ui_log = None)
buy=True
pass
elif index.loc['000001'].SKDJ_K_30M >= 75 and index.loc['000001'].SKDJ_TR_30M < 0:
QA_util_log_info('##JOB 高位下跌 ==== {}'.format(str(stm)), ui_log = None)
#hold 下跌中继
buy = True
pass
elif index.loc['000001'].SKDJ_K_30M <= 25 and index.loc['000001'].SKDJ_TR_30M < 0:
QA_util_log_info('##JOB 进入低位 ==== {}'.format(str(stm)), ui_log = None)
buy=True
pass
else:
buy=True
pass
if mark_tm in ontm_list or source_data is None:
#整点
QA_util_log_info('##Now Mark Time {},Stm {}, Stock {}'.format(mark_tm,str(stm),len(list(set(positions.code.tolist()+t_list)))))
data = get_quant_data_hour(QA_util_get_pre_trade_date(trading_date,10),trading_date,list(set(positions.code.tolist()+t_list)), type= 'real')
hour_data = data[['SKDJ_K_15M','SKDJ_TR_15M','SKDJ_K_30M','SKDJ_TR_30M','SKDJ_K_HR','SKDJ_TR_HR','SKDJ_CROSS2_30M','SKDJ_CROSS1_30M','CROSS_JC_30M','SKDJ_CROSS2_HR','SKDJ_CROSS1_HR','CROSS_JC_HR','CROSS_SC_HR','MA5_HR','MA5_30M','MA10_HR','MA60_HR','CCI_HR','CCI_CROSS1_HR','CCI_CROSS2_HR']]
source_data = hour_data.reset_index()
source_data = source_data[source_data.datetime == stm].set_index('code')
else:
QA_util_log_info('##Now Mark Time {},Stm {}, Stock {}'.format(mark_tm,str(stm),len(list(set(positions.code.tolist()+t_list)))))
hour_data = get_quant_data_hour(QA_util_get_pre_trade_date(trading_date,10),trading_date,list(set(positions.code.tolist()+t_list)), type= 'real')
source_data = hour_data.reset_index()
source_data = source_data[source_data.datetime == stm].set_index('code')[['SKDJ_K_15M','SKDJ_TR_15M','SKDJ_K_30M','SKDJ_TR_30M','SKDJ_K_HR','SKDJ_TR_HR','SKDJ_CROSS2_30M','SKDJ_CROSS1_30M','CROSS_JC_30M','SKDJ_CROSS2_HR','SKDJ_CROSS1_HR','CROSS_JC_HR','CROSS_SC_HR','MA5_HR','MA5_30M','MA10_HR','MA60_HR','CCI_HR','CCI_CROSS1_HR','CCI_CROSS2_HR']]
####job1 小时级报告 指数小时级跟踪
target_list = list(source_data.sort_values('SKDJ_K_HR').index)
#QA_util_log_info('##JOB Now cross1 ==== {}: {}'.format(str(stm), str(source_data[source_data.SKDJ_CROSS1_30M == 1][['SKDJ_K_30M','SKDJ_TR_30M','SKDJ_TR_HR','SKDJ_CROSS2_30M','SKDJ_CROSS1_30M','SKDJ_CROSS1_HR','SKDJ_CROSS2_HR','MA5_30M','SKDJ_K_HR','MA5_HR']])), ui_log = None)
#QA_util_log_info('##JOB Now cross2 ==== {}: {}'.format(str(stm), str(source_data[source_data.SKDJ_CROSS2_30M == 1][['SKDJ_K_30M','SKDJ_TR_30M','SKDJ_TR_HR'','SKDJ_CROSS1_30M','SKDJ_CROSS1_HR','SKDJ_CROSS2_HR','MA5_30M','SKDJ_K_HR','MA5_HR']])), ui_log = None)
while tm <= int(time.strftime("%H%M%S",time.strptime(morning_begin, "%H:%M:%S"))):
QA_util_log_info('##JOB Not Start Time ==== {}'.format(str(trading_date)), ui_log = None)
time.sleep(15)
tm = int(datetime.datetime.now().strftime("%H%M%S"))
while tm >= int(time.strftime("%H%M%S",time.strptime(morning_end, "%H:%M:%S"))) and tm <= int(time.strftime("%H%M%S",time.strptime(afternoon_begin, "%H:%M:%S"))):
QA_util_log_info('##JOB Not Trading Time ==== {}'.format(str(trading_date)), ui_log = None)
time.sleep(60)
tm = int(datetime.datetime.now().strftime("%H%M%S"))
QA_util_log_info(tm)
##action
while tm <= int(time.strftime("%H%M%S",time.strptime(action_tm, "%H:%M:%S"))) and action_tm is not None:
time.sleep(60)
tm = int(datetime.datetime.now().strftime("%H%M%S"))
if tm > int(time.strftime("%H%M%S",time.strptime(action_tm, "%H:%M:%S"))) and action_tm is not None:
for code in positions[positions['股票余额'] > 0].code.tolist() + target_list:
name = QA_fetch_stock_name(code).values[0]
QA_util_log_info('##JOB Now Code {stm} ==== {code}({name})'.format(stm=str(stm),code=str(code),name=str(name)), ui_log = None)
try:
res2 = source_data.loc[code]
QA_util_log_info(res2)
QA_util_log_info('{code}{name}-{stm}:hourly: {hourly}'.format(code=code,name=name,stm=stm,hourly=res2.SKDJ_TR_HR))
except:
res2 = None
QA_util_log_info('error')
#try:
if res2 is not None and 'DR' not in name:
QA_util_log_info('##JOB not DR Day ==== {}'.format(code), ui_log = None)
if code in positions[positions['可用余额'] > 0].code.tolist():
QA_util_log_info('##JOB Now Selling Check ==== {}'.format(code), ui_log = None)
if res2.SKDJ_CROSS1_HR == 1 and res2.SKDJ_TR_30M < 0 and round(res2.MA5_HR,2) < 0:
msg = 'SKDJ死叉'
elif res2.SKDJ_TR_30M < 0 and round(res2.MA5_HR,2) < 0:
msg = 'SKDJ止损:30M跌破MA5'
# msg = None
elif res2.SKDJ_TR_HR < 0 and res2.SKDJ_TR_30M < 0 and round(res2.MA5_HR,2) < 0:
msg = 'SKDJ止损:HR跌破MA5'
else:
msg = None
###卖出信号1
if msg is not None:
QA_util_log_info('##JOB Now Selling ==== {}'.format(code), ui_log = None)
deal_pos = get_StockPos(code, client, account)
target_pos = 0
industry = str(positions.set_index('code').loc[code].INDUSTRY)
try_times = 0
while deal_pos > 0 and try_times <= 5:
client.cancel_all(account)
send_actionnotice(strategy_id,'{code}{name}:{stm}{msg}'.format(code=code,name=name,stm=stm, msg=msg),'卖出信号',direction = 'SELL',offset=mark_tm,volume=None)
deal_pos = get_StockPos(code, client, account)
QA_util_log_info('##JOB Now Start Selling {code} ==== {stm}{msg}'.format(code = code, stm = str(stm), msg=msg), ui_log = None)
SELL(client, account, strategy_id, account_info, trading_date, code, name, industry, deal_pos, target_pos, target=None, close=0, type = 'end', test = test)
time.sleep(3)
deal_pos = get_StockPos(code, client, account)
try_times += 1
else:
QA_util_log_info('##JOB Not On Selling ==== {}'.format(code))
if code in [i for i in t_list if i not in positions[positions['股票余额'] > 0].code.tolist()]:
QA_util_log_info('##JOB Now Buying Ckeck==== {}'.format(code), ui_log = None)
QA_util_log_info('##JOB Not On Buying ==== {} SKDJ_CROSS2_HR:{} CROSS_JC_HR:{} SKDJ_K_30M:{} SKDJ_TR_30M:{}'.format(code, res2.SKDJ_CROSS2_HR, res2.CROSS_JC_HR, res2.SKDJ_K_30M, res2.SKDJ_TR_30M))
if res2.SKDJ_CROSS2_HR == 1 and res2.SKDJ_K_30M < 60 and res2.SKDJ_TR_30M > 0 and round(res2.MA5_30M,2) >= 0:
msg = 'SKDJ:60MIN金叉追涨 小时线K:{}'.format(res2.SKDJ_K_HR)
#elif res2.SKDJ_CROSS2_30M == 1 and res2.SKDJ_K_15M <= 50 and res2.SKDJ_K_15M > 0 and round(res2.MA5_30M,2) >= 0:
# msg = 'SKDJ:30MIN金叉抄底 30M线K:{}'.format(res2.SKDJ_K_15M)
elif res2.SKDJ_CROSS2_30M == 1 and res2.SKDJ_K_30M <= 40 and res2.SKDJ_TR_HR < 0 and res2.SKDJ_K_HR < 40 and round(res2.MA5_30M,2) >= 0:
msg = 'SKDJ:30MIN金叉追涨 小时线K:{}'.format(res2.SKDJ_K_HR)
#elif res2.CROSS_JC_HR == 1 and res2.SKDJ_K_30M < 70 and res2.SKDJ_TR_30M > 0 and round(res2.MA5_30M,2) >= 0:
# msg = 'MACD金叉'
#elif res2.SKDJ_CROSS2_30M == 1 and res2.SKDJ_K_HR <= 40 and res2.SKDJ_TR_HR < 0 and round(res2.MA5_30M,2) >= 0:
# msg = 'SKDJ:30MIN金叉抄底 小时线K:{}'.format(res2.SKDJ_K_HR)
#elif res2.SKDJ_CROSS2_30M == 1 and res2.SKDJ_TR_HR == 1:
# msg = 'SKDJ金叉'
else:
msg = None
if msg is not None:
if get_UseCapital(client, account) >= 10000:
QA_util_log_info('##JOB Now Buying==== {}'.format(code), ui_log = None)
###买入信号
send_actionnotice(strategy_id,'{code}{name}:{stm}{msg}'.format(code=code,name=name,stm=stm, msg=msg),'买入信号',direction = 'BUY',offset=mark_tm,volume=None)
price = round(QA_fetch_get_stock_realtm_bid(code)+0.01,2)
deal_pos = round(80000 / price,0)
target_pos = deal_pos
industry = str(target_tar.loc[code].INDUSTRY)
try_times = 0
QA_util_log_info('##JOB Now Start Buying {code} ===== {stm}{msg}'.format(code = code, stm = str(stm), msg=msg), ui_log = None)
while get_hold(client, account) <= percent and deal_pos > 0 and buy is True and try_times <= 5:
BUY(client, account, strategy_id, account_info,trading_date, code, name, industry, deal_pos, target_pos, target=None, close=0, type = 'end', test = test)
try_times += 1
time.sleep(3)
hold_pos = get_StockPos(code, client, account)
deal_pos = target_pos - hold_pos
if get_hold(client, account) > percent:
QA_util_log_info('##JOB Now Full {code} {percent}/{hold} ===== {stm}'.format(code = code,percent=percent,hold=get_hold(client, account), stm = str(stm)), ui_log = None)
elif buy is False:
QA_util_log_info('##JOB Now Index Under Control {code} {percent}/{hold} ===== {stm}'.format(code = code,percent=percent,hold=get_hold(client, account), stm = str(stm)), ui_log = None)
elif try_times > 5:
QA_util_log_info('##JOB Now NO More Times {code} {percent}/{hold} ===== {stm}'.format(code = code,percent=percent,hold=get_hold(client, account), stm = str(stm)), ui_log = None)
else:
pass
else:
QA_util_log_info('##JOB Now Not Enough Money==== {}'.format(code), ui_log = None)
else:
QA_util_log_info('##JOB Now Not On Buying==== {}'.format(code), ui_log = None)
###update time
tm = int(datetime.datetime.now().strftime("%H%M%S"))
QA_util_log_info('##JOB Now Update Time'.format(str(tm)), ui_log = None)
##收市
if tm >= int(time.strftime("%H%M%S", time.strptime(afternoon_end, "%H:%M:%S"))) or action_tm == '15:00:00':
###time out
QA_util_log_info('##JOB Trading Finished ==================== {}'.format(trading_date), ui_log=None)
send_actionnotice(strategy_id,'Trading Report:{}'.format(trading_date),'Trading Finished',direction = 'Trading',offset='Finished',volume=None)
else:
QA_util_log_info('##JOB Now Update Next MarkTM&ActionTM==== mark_tm: {} action_tm {}'.format(str(mark_tm),str(action_tm)), ui_log = None)
if mark_tm == '09:30:00':
mark_tm = marktm_list[0]
else:
mark_tm = marktm_list[marktm_list.index(mark_tm) + 1]
if marktm_list.index(mark_tm) == len(marktm_list) -1:
action_tm = '15:00:00'
else:
action_tm = action_list[action_list.index(action_tm) + 1]
QA_util_log_info('##JOB Now Update Next MarkTM&ActionTM==== mark_tm: {} action_tm {}'.format(str(mark_tm),str(action_tm)), ui_log = None)
if __name__ == '__main__':
pass
|
[
"chaopaoo12@hotmail.com"
] |
chaopaoo12@hotmail.com
|
2467bdec3fd716f87345d0e6f34cb4d81bdc4fc5
|
fe7d80aa667ea7f34b60fc927e54d279f7bf81cb
|
/cnn_simple_v3/tools/use_isl_tfrecord.py
|
b3060f33736a9a7769b8c6705c72fe2260d59235
|
[] |
no_license
|
qq191513/myRecognize
|
3b20b8ca7f1935d6b177b368eb72f0282db8799e
|
8a183ca1e8ababd4f52b87a86f92c78eda5f4dc5
|
refs/heads/master
| 2020-03-21T18:26:24.286107
| 2019-06-26T08:21:40
| 2019-06-26T08:21:40
| 138,891,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,762
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
import time
import os
import tools.config_isl as cfg
preprocess_paraments={}
example_name = {}
##########################要改的东西#######################################
#tfrecords文件的路径
tfrecord_path = '/home/mo/work/data_set/italy_tf/'
#根据关键字搜索tfrecord_path目录下的所有相关文件
train_keywords = 'train'
test_keywords = 'validation'
labels_txt_keywords = 'labels.txt'
# 解码部分:填入解码键值和原图大小以便恢复
example_name['image'] = 'image/encoded' #主要是这个(原图)p
example_name['label'] = 'image/class/label' #主要是这个(标签)
origenal_size =[32,32,1] #要还原原先图片尺寸
#预处理方式
to_random_brightness = True
to_random_contrast = True
to_resize_images = False
resize_size =[20,20]
to_random_crop = True
crop_size= [28, 28, 1]
#多队列、多线程、batch读图部分
num_threads = 8
batch_size = cfg.batch_size
shuffle_batch =True
#训练多少轮,string_input_producer的num_epochs就写多少,
# 否则会爆出OutOfRangeError的错误(意思是消费量高于产出量)
num_epochs = cfg.epoch
#显示方式
cv2_show = False # 用opencv显示或plt显示
####################### end ############################################
def ReadTFRecord(tfrecords,example_name):
if len(tfrecords) == 1:
record_queue = tf.train.string_input_producer(tfrecords,num_epochs=num_epochs+1)#只有一个文件,谈不上打乱顺序
else:
# shuffle=False,num_epochs为3,即每个文件复制成3份,再打乱顺序,否则按原顺序
record_queue = tf.train.string_input_producer(tfrecords,shuffle=True, num_epochs=num_epochs+1)
reader = tf.TFRecordReader()
key, value = reader.read(record_queue)
features = tf.parse_single_example(value,
features={
# 取出key为img_raw和label的数据,尤其是int位数一定不能错!!!
example_name['image']: tf.FixedLenFeature([],tf.string),
example_name['label']: tf.FixedLenFeature([], tf.int64)
})
img = tf.decode_raw(features[example_name['image']], tf.uint8)
# 注意定义的为int多少位就转换成多少位,否则容易出错!!
if len(origenal_size) == 2:
w, h = origenal_size[0],origenal_size[1]
else:
w, h, c = origenal_size[0],origenal_size[1],origenal_size[2]
img = tf.reshape(img, [w, h, c])
# 不清楚为何加了这个tf.cast会让cv2.imshow显示不正常,图片变成黑白二值图,模糊不清
img = tf.cast(img, tf.float32)
label = tf.cast(features[example_name['label']], tf.int64)
label = tf.cast(label, tf.int32)
return img, label
def preprocess_data(is_train,image, label):
if is_train:
if to_random_brightness:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
if to_random_contrast:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
if to_resize_images:
# 只有method = 1没有被破坏最严重
image = tf.image.resize_images(image, resize_size,method=1)
if to_random_crop:
image = tf.random_crop(image, crop_size)
else:
if to_resize_images:
image = tf.image.resize_images(image, [28, 28])
if to_random_crop:
image = tf.random_crop(image, crop_size)
return image, label
def feed_data_method(image,label):
if shuffle_batch:
images, labels = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_threads,
capacity=batch_size*64,
min_after_dequeue=batch_size*32,
allow_smaller_final_batch=False)
else:
images, labels = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=num_threads,
capacity=batch_size*64,
allow_smaller_final_batch=False)
return images, labels
def plt_imshow_data(data):
#调成标准格式和标准维度,免得爆BUG
data = np.asarray(data)
if data.ndim == 3:
if data.shape[2] == 1:
data = data[:, :, 0]
plt.imshow(data)
plt.show()
time.sleep(2)
def get_files_list(path):
# work:获取所有文件的完整路径
files_list = []
for parent,dirnames,filenames in os.walk(path):
for filename in filenames:
files_list.append(os.path.join(parent,filename))
return files_list
#根据关键字筛选父目录下需求的文件,按列表返回全部完整路径
def search_keyword_files(path,keyword):
keyword_files_list = []
files_list = get_files_list(path)
for file in files_list:
if keyword in file:
keyword_files_list.append(file)
return keyword_files_list
def read_label_txt_to_dict(labels_txt =None):
if os.path.exists(labels_txt):
labels_maps = {}
with open(labels_txt) as f:
while True:
line = f.readline()
if not line:
break
line = line[:-1] # 去掉换行符
line_split = line.split(':')
labels_maps[line_split[0]] = line_split[1]
return labels_maps
return None
def show_loaded(data_tfrecord=None):
print('load tfrecord:')
for each in data_tfrecord:
print(each)
def create_inputs_isl(is_train):
if is_train:
data_tfrecord = search_keyword_files(tfrecord_path,train_keywords)
else:
data_tfrecord = search_keyword_files(tfrecord_path,test_keywords)
show_loaded(data_tfrecord)
image, label = ReadTFRecord(data_tfrecord,example_name) #恢复原始数据
image, label = preprocess_data(is_train,image, label) #预处理方式
images,labels = feed_data_method(image, label) #喂图方式
return images,labels
if __name__== '__main__':
images, labels = create_inputs_isl(is_train = True)
labels_txt = search_keyword_files(tfrecord_path, labels_txt_keywords)
labels_maps = read_label_txt_to_dict(labels_txt=labels_txt[0]) #标签映射
#观察自己设置的参数是否符合心意,合适的话在别的项目中直接调用 create_inputs_xxx() 函数即可喂数据
with tf.Session() as sess:
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# 输出100个batch观察
batch_size_n = 5 #观察batch_size的第n张图片
if cv2_show:
for i in range(100):
x, y = sess.run([images, labels])
label = y[batch_size_n]
label = str(label)
label_name = labels_maps[label]
title = 'label:{}'.format(label_name)
print('image_shape:',x.shape, 'label_shape:', y.shape,title)
cv2.namedWindow(title, 0)
cv2.startWindowThread()
cv2.imshow(title, x[batch_size_n])
cv2.waitKey(2000)
cv2.destroyAllWindows()
else:
for i in range(100):
x, y = sess.run([images, labels])
label = y[batch_size_n]
label = str(label)
label_name =labels_maps[label]
title = 'label:{}'.format(label_name)
print('image_shape:',x.shape, 'label_shape:', y.shape,title)
plt_imshow_data(x[batch_size_n])
coord.request_stop()
coord.join(threads)
|
[
"1915138054@qq.com"
] |
1915138054@qq.com
|
7b7fe151ae6868ef06c88f81d3b03699cae95014
|
f334b883a00a55703fb023f3e0027970687d23e4
|
/Desafios/des058.py
|
bddb48534b5f9514624d0daebf72c6007375b3f0
|
[] |
no_license
|
RodrigoMorosky/CursoPythonCV
|
7f99ed049e2b33c161f408206de053bc77c636b6
|
ff68d0a9bd399837048ae12309963086820e49ab
|
refs/heads/main
| 2023-08-02T13:52:21.412302
| 2021-09-16T12:11:01
| 2021-09-16T12:11:01
| 407,150,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
import random
n = random.randint(0, 10)
print('Estou pensando em um número... tente adivinhar!')
a = 0
b = 0
while a != n:
a = int(input('Digite o número: '))
b = b + 1
print("Você tentou {} até acertar!".format(b))
|
[
"80373831+RodrigoMorosky@users.noreply.github.com"
] |
80373831+RodrigoMorosky@users.noreply.github.com
|
84d9b06cb86e28aea0e8281e0b9a358f030fa30a
|
a15d770809eb3ca8b780ec7a20ee0d95275a5c1d
|
/scraper/create_html.py
|
daeacc5f3f175709330a6cc27da567b521921b70
|
[] |
no_license
|
altcoins-code/altcoins
|
924b7d2b7e151820254461466d27797d991677c7
|
e048d42da91f584cd4650a18103a3724b4393631
|
refs/heads/master
| 2021-01-01T17:45:52.642810
| 2017-10-12T06:08:34
| 2017-10-12T06:08:34
| 98,150,336
| 1
| 0
| null | 2017-07-24T04:57:34
| 2017-07-24T04:57:34
| null |
UTF-8
|
Python
| false
| false
| 1,545
|
py
|
import pandas as pd
import pytz
from base64 import b64decode as use
'''
Hacky code for generating web assets and stuff
'''
def create_plots(data):
# iterate of array of raw data and make svg plot
plots = []
for i, row in enumerate(data):
id = 'plot%d' % i
fcn = "plot('%s', %s)" % (id, str(row))
svg_str = '<svg id="%s" onload="%s"/>' % (id, fcn)
plots.append(svg_str)
return plots
def url_from_coin(name):
coin_key = 'aHR0cHM6Ly9jb2lubWFya2V0Y2FwLmNvbS9jdXJyZW5jaWVzLw==\n'
coin_url = use(coin_key).decode() + name.lower()
return '<a href="%s" target="_blank">%s</a>' % (coin_url, name)
def df_to_html(df, ORDER):
pd.set_option('display.max_colwidth', -1)
df = df[ORDER].sort_values('overall score', ascending=False)
df['week data'] = create_plots(df['week data'].values)
df['name'] = df['name'].apply(url_from_coin)
# df = df.drop(['week data'], axis=1)
cols = list(df) # reorder
cols.insert(0, cols.pop(cols.index('img')))
cols.insert(9, cols.pop(cols.index('week data')))
df = df[cols]
return df.to_html(escape=False).replace('class="dataframe"', 'class="sortable"')
def get_df_from_db(db):
entry = db.pop()
date = entry['date'].replace(tzinfo=pytz.UTC)
timestamp = date.astimezone(pytz.timezone('US/Pacific')).strftime('%m-%d-%Y %H:%M')
return pd.DataFrame(entry['data']).T, timestamp
def save_html(html, path):
with open(path, 'w') as f:
f.write(html)
|
[
"jk@jk.com"
] |
jk@jk.com
|
eab7b2f16184650f664061fcc7cb8ee4042760bd
|
eaf4287cf189cf0f87a60aafe4b6935f20a30d22
|
/code/old_stubs/stub_from_start_thresh.py
|
4c5eb277fbc5f0b023ce8cc4c2857a5aac1e6d0e
|
[] |
no_license
|
zatsiorsky/p4
|
bdd3a945cacfb64522e8305bf87eecd3257cf3ab
|
ad88d1203989d6344d336b8014e84541bcc94209
|
refs/heads/master
| 2021-01-20T04:08:05.441804
| 2017-04-30T02:23:11
| 2017-04-30T02:23:11
| 89,639,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,206
|
py
|
# Imports.
import numpy as np
import numpy.random as npr
import pandas as pd
import os.path
import datetime
from itertools import product
from SwingyMonkey import SwingyMonkey
from collections import Counter
class Learner(object):
'''
This agent jumps randomly.
'''
def __init__(self, eta = 0.2, gamma = 1, epsilon = 0.2,
mx = 50, my = 25, ms = 1, thresh = .08):
self.n_iters = 0
self.last_state = None
self.last_action = None
self.last_reward = None
# Store the last velocity independent of the state
self.last_vel = None
# Multipliers for horizontal and vertical distance + gravity
self.mx = mx
self.my = my
self.ms = ms
# This is the cutoff for high/low gravity
self.high_g_thresh = 2
# Initialize Q
# It has keys of the form (state, action)
self.Q = Counter()
# Learning rate
self.eta = eta
# Discount factor
self.gamma = gamma
# Exploration rate
self.epsilon = epsilon
def reset(self):
self.last_state = None
self.last_action = None
self.last_reward = None
self.last_vel = None
def action_callback(self, state):
'''
Implement this function to learn things and take actions.
Return 0 if you don't want to jump and 1 if you do.
'''
# You might do some learning here based on the current state and the last state.
# You'll need to select and action and return it.
# Return 0 to swing and 1 to jump.
new_state = self.transform_state(state)
new_action = self.choose_action(new_state)
self.last_action = new_action
self.last_state = new_state
return self.last_action
def reward_callback(self, reward):
'''This gets called so you can see what reward you get.'''
### Update Q
# Get current state and transform it
try:
current_state = self.transform_state(self.swing.get_state(), update_vel=False)
except:
current_state = self.last_state
# Maximize Q from current state
Qmax = max(self.Q[(current_state, 0)], self.Q[(current_state, 1)])
# Last Qsa value
Qsa = self.Q[(self.last_state, self.last_action)]
self.Q[(self.last_state, self.last_action)] = Qsa - self.eta * (Qsa - (reward + self.gamma * Qmax))
self.last_reward = reward
def choose_action(self, state):
if state[1] > 0:
return 0
epsilon = 0.5 * np.exp(-self.n_iters / float(500))
self.n_iters += 1
# With probability epsilon, explore
if npr.uniform() < epsilon:
# We don't want to explore the space randomly,
# since we should not jump much more frequently than
# we do jump
return 1 if npr.uniform() < thresh else 0
# Otherwise, follow the optimal policy based on Q
else:
Qs1 = self.Q[(state, 1)]
Qs0 = self.Q[(state, 0)]
if Qs0 == Qs1:
return 1 if npr.uniform() < thresh else 0
else:
return 1 if Qs1 > Qs0 else 0
def transform_state(self, state, update_vel = True):
# Rescaled horizontal distance to next tree
dx = state["tree"]["dist"] / self.mx
# Vertical distance from bottom of monkey to bottom of tree
dy = (state["monkey"]["top"] - state["tree"]["top"]) / self.my
# Velocity of the monkey
vel = state["monkey"]["vel"] / self.ms
# Determine if there is high or low gravity
# For the first time step, randomly choose high or low
if self.last_vel is None:
gravity = npr.choice([0, 1])
elif np.abs(state["monkey"]["vel"] - self.last_vel) > self.high_g_thresh:
gravity = 1
else:
gravity = 0
if update_vel:
self.last_vel = state["monkey"]["vel"]
return (dx, dy, vel, gravity)
def run_games(learner, iters = 100, t_len = 100):
'''
Driver function to simulate learning by having the agent play a sequence of games.
'''
# intialize df
df = pd.DataFrame(columns = ["gravity", "score", "death"])
# run iters games
for ii in range(iters):
# Make a new monkey object.
swing = SwingyMonkey(sound=False, # Don't play sounds.
text="Epoch %d" % (ii), # Display the epoch on screen.
tick_length = t_len, # Make game ticks super fast.
action_callback=learner.action_callback,
reward_callback=learner.reward_callback)
learner.swing = swing
# Loop until you hit something.
while swing.game_loop():
pass
# Save score history.
df.loc[len(df)] = [swing.gravity, swing.score, swing.death]
# Reset the state of the learner.
learner.reset()
return df
def stats(df):
"""Helper function to get stats from df"""
vals = [df.score.mean(), df.score.quantile(0.5),
df.score.quantile(0.8), df.score.max(),
df.death.mode()[0]]
return vals
if __name__ == '__main__':
etas = [0.8, 0.5, 0.1]
gammas = [1]
epsilons = [0.8, 0.5, 0.25, 0.1]
mds = [120, 80, 50, 30]
mys = [60, 35, 20]
mss = [60,58,56,54]
threshs = [0.8,0.4,0.2,0.05]
param_list = [etas, gammas, epsilons, mds, mys, mss, threshs]
params = product(*param_list)
now = datetime.datetime.now()
print "Starting time: {}".format(str(now))
i = 0
for eta, gamma, epsilon, md, my, ms, thresh in params:
### check that test hasn't been run
# initialize name
params = [eta, gamma, epsilon, md, my, ms, thresh]
name = "_".join(map(str, params))
# initialize logfile number
if os.path.isfile('csvs_from_start_thresh/' + name + ".csv"):
continue
### run games
# Select agent.
agent = Learner(eta, gamma, epsilon, md, my, ms, thresh)
# Run games, account for bug in distro code
while True:
try:
df = run_games(agent, 100, 0)
except UnboundLocalError:
continue
break
### log results
# log all individual scores in csv in folder
df.to_csv('csvs_from_start_thresh/' + name + ".csv", index=0)
# get all summary stats
full_stats = stats(df)
_30_above = stats(df[30:])
_30_above_high = stats(df[30:][df[30:].gravity == 1])
_30_above_low = stats(df[30:][df[30:].gravity == 4])
combined = params + full_stats + _30_above + _30_above_high + _30_above_low
# append summary stats to grid_csv
with open("grid_results_from_start_thresh.csv", "a") as myfile:
myfile.write(','.join(map(str,combined)) + "\n")
# shout at command line
i += 1
if i % 25 == 0:
elapsed = datetime.datetime.now() - now
print "Combos : {}, Elapsed time: {}".format(i, str(elapsed))
|
[
"RRADOVANOVIC@COLLEGE.HARVARD.EDU"
] |
RRADOVANOVIC@COLLEGE.HARVARD.EDU
|
f87e741810819e9f190a8b3bb658ed080e3c400b
|
5659621ec26b13d0b07b4ceb5055db296c575ac0
|
/task_bubble_sort.py
|
2764839607c73d2e71dd1760863d7058a51376d8
|
[] |
no_license
|
tungyr/itmo-home-work
|
2ce56d9d5932ff97c0898a09ced4f756d7e668ab
|
97e16d3ceec4be86b6ba05e3da93c7605ad6833f
|
refs/heads/master
| 2020-03-11T13:33:18.778798
| 2018-06-17T11:40:53
| 2018-06-17T11:40:53
| 130,028,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
def bubble_sort(lst): #объявление функции
for i in range(len(lst)): #перебор диапазона с длиной lst
for j in range(len(lst) - 1, i, -1): #перебор диапазона lst c обменом значений
if lst[j] < lst[j-1]:
lst[j], lst[j-1] = lst[j-1], lst[j]
return lst
bubble_sort([14, 8, 3, 1, 89, 2, 45])
|
[
"rotkivem@gmail.com"
] |
rotkivem@gmail.com
|
57d3eb4ff5547a801d23688a7a2426b29909ed15
|
a08aad7b213584198c4e82a10427443b09ff43ee
|
/Problem2.py
|
2e2294281753809b878aa5ef67147ec8f41a0774
|
[] |
no_license
|
shantanu609/DP-1
|
1a1365d741f71d0b8fd4c8f636405e7a60632cf0
|
a27aac431988a22b22fbef5a5908cc98a56a1716
|
refs/heads/master
| 2022-09-20T16:49:37.615034
| 2020-06-04T21:32:04
| 2020-06-04T21:32:04
| 269,257,060
| 0
| 0
| null | 2020-06-04T04:04:54
| 2020-06-04T04:04:54
| null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
# Time Complexity : O(n)
# Space Complexity : O(1)
# Did this code successfully run on Leetcode : Yes
# Any problem you faced while coding this : No
# Your code here along with comments explaining your approach
class Solution:
def rob(self, nums):
if not nums or len(nums) ==0:
return 0
if len(nums) == 1 :
return nums[0]
small = nums[0]
large = max(nums[0],nums[1])
for i in range(2,len(nums)):
current = max(large, small + nums[i])
small = large
large = current
return large
if __name__ == "__main__":
s = Solution()
nums = [2,7,9,3,1]
res = s.rob(nums)
print(res)
|
[
"shantanu_shinde_@mail.fresnostate.edu"
] |
shantanu_shinde_@mail.fresnostate.edu
|
c7c7a0d7d7981c861e381a3f4550b2c9e6a8748b
|
200b52c1fe17cd47e20522b989adb05bd9cc3cfa
|
/airflow/providers/dbt/cloud/sensors/dbt.py
|
5838f6d6247d2550a94b43600f4f72f056937a4f
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
fritzwijaya/airflow
|
cbb70fd8832b23e31e4426e72652c76e0df9fba8
|
5a4106d96e670b82004dd6fa221cf3c29b2496a9
|
refs/heads/master
| 2023-07-07T18:46:09.854983
| 2023-06-22T00:52:20
| 2023-06-22T00:52:20
| 206,764,905
| 1
| 0
|
Apache-2.0
| 2019-09-06T09:53:33
| 2019-09-06T09:53:33
| null |
UTF-8
|
Python
| false
| false
| 5,614
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import time
import warnings
from typing import TYPE_CHECKING, Any
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.dbt.cloud.hooks.dbt import DbtCloudHook, DbtCloudJobRunException, DbtCloudJobRunStatus
from airflow.providers.dbt.cloud.triggers.dbt import DbtCloudRunJobTrigger
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class DbtCloudJobRunSensor(BaseSensorOperator):
"""Checks the status of a dbt Cloud job run.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/operator:DbtCloudJobRunSensor`
:param dbt_cloud_conn_id: The connection identifier for connecting to dbt Cloud.
:param run_id: The job run identifier.
:param account_id: The dbt Cloud account identifier.
:param deferrable: Run sensor in the deferrable mode.
"""
template_fields = ("dbt_cloud_conn_id", "run_id", "account_id")
def __init__(
self,
*,
dbt_cloud_conn_id: str = DbtCloudHook.default_conn_name,
run_id: int,
account_id: int | None = None,
deferrable: bool = False,
**kwargs,
) -> None:
if deferrable:
if "poke_interval" not in kwargs:
# TODO: Remove once deprecated
if "polling_interval" in kwargs:
kwargs["poke_interval"] = kwargs["polling_interval"]
warnings.warn(
"Argument `poll_interval` is deprecated and will be removed "
"in a future release. Please use `poke_interval` instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
else:
kwargs["poke_interval"] = 5
if "timeout" not in kwargs:
kwargs["timeout"] = 60 * 60 * 24 * 7
super().__init__(**kwargs)
self.dbt_cloud_conn_id = dbt_cloud_conn_id
self.run_id = run_id
self.account_id = account_id
self.deferrable = deferrable
def poke(self, context: Context) -> bool:
hook = DbtCloudHook(self.dbt_cloud_conn_id)
job_run_status = hook.get_job_run_status(run_id=self.run_id, account_id=self.account_id)
if job_run_status == DbtCloudJobRunStatus.ERROR.value:
raise DbtCloudJobRunException(f"Job run {self.run_id} has failed.")
if job_run_status == DbtCloudJobRunStatus.CANCELLED.value:
raise DbtCloudJobRunException(f"Job run {self.run_id} has been cancelled.")
return job_run_status == DbtCloudJobRunStatus.SUCCESS.value
def execute(self, context: Context) -> None:
"""Run the sensor.
Depending on whether ``deferrable`` is set, this would either defer to
the triggerer or poll for states of the job run, until the job reaches a
failure state or success state.
"""
if not self.deferrable:
super().execute(context)
else:
end_time = time.time() + self.timeout
if not self.poke(context=context):
self.defer(
timeout=self.execution_timeout,
trigger=DbtCloudRunJobTrigger(
run_id=self.run_id,
conn_id=self.dbt_cloud_conn_id,
account_id=self.account_id,
poll_interval=self.poke_interval,
end_time=end_time,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any]) -> int:
"""Callback for when the trigger fires - returns immediately.
This relies on trigger to throw an exception, otherwise it assumes
execution was successful.
"""
if event["status"] in ["error", "cancelled"]:
raise AirflowException("Error in dbt: " + event["message"])
self.log.info(event["message"])
return int(event["run_id"])
class DbtCloudJobRunAsyncSensor(DbtCloudJobRunSensor):
"""This class is deprecated.
Please use :class:`airflow.providers.dbt.cloud.sensor.dbt.DbtCloudJobRunSensor`
with ``deferrable=True``.
"""
def __init__(self, **kwargs: Any) -> None:
warnings.warn(
"Class `DbtCloudJobRunAsyncSensor` is deprecated and will be removed in a future release. "
"Please use `DbtCloudJobRunSensor` and set `deferrable` attribute to `True` instead",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
super().__init__(deferrable=True, **kwargs)
|
[
"noreply@github.com"
] |
fritzwijaya.noreply@github.com
|
c4f5a91d526049e1800127af99d730c61ea08b2b
|
b06d8c770ba343f5c21780f5a47898bc12871d21
|
/source/api/urls.py
|
66fb1f2b1e8077b83ae3a40a68e14440fe637763
|
[] |
no_license
|
neroznik/home_work70
|
e6c897145fcffea08b2e204bdc449afcb6ef8100
|
fe2d3560415addf50304a10d09bac9f93de9fee7
|
refs/heads/main
| 2022-12-27T14:34:22.073983
| 2020-10-15T11:48:41
| 2020-10-15T11:48:41
| 304,308,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 933
|
py
|
"""main URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from api.views import add_view, multiply_view, subtract_view, divide_view
app_name = 'api'
urlpatterns = [
path('add/',add_view),
path('multiply/',multiply_view),
path('subtract/',subtract_view),
path('divide/',divide_view)
]
|
[
"neroznik@yandex.ru"
] |
neroznik@yandex.ru
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.