blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c845f6f485a435d3981ca72437d62c5756fe5c00 | 1b62be8a219ac18094fda7957546cb9b7e3f6062 | /gl0/urls.py | 5558dac5b98133d1465ffdfdbf91c383a7491722 | [] | no_license | jguaraz/gla0 | 7749e4cd9668ea0b223eeefacb103a0205a278ea | b7b2002f0eb20e85b0f3fbebcaa4785bfe2ecfce | refs/heads/master | 2020-07-01T12:22:27.732279 | 2017-01-15T19:58:12 | 2017-01-15T19:58:12 | 73,826,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | from django.conf.urls import url, include
from django.contrib.auth import views, logout
from . import views
from .forms import LoginForm
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^list/$', views.post_list, name='post_list'),
url(r'^g_new/$', views.g_new, name='g_new'),
url(r'^login/$', views.login, {'template_name': 'login.html', 'authentication_form': LoginForm}, name='login'),
url(r'^logout/$', views.logout, {'next_page': '/'}),
url(r'^chart2$', views.chart2, name='chart2'),
url(r'^guest$', views.guest, name='guest'),
url(r'^u_new$', views.u_new, name='u_new'),
url(r'^.*/$', views.forbidden, name='forbidden'),
]
| [
"jguaraz@openmailbox.org"
] | jguaraz@openmailbox.org |
e53c94360c91db293573eac3213b22e8a4f52306 | f79904b7e16768ef2360ae9ca3f75519d4c4c5d9 | /std_deviation.py | d4983c366f4658ec66a94e545a3119440f048967 | [] | no_license | Yash27112006/C105-std_deviation | 7deaeea48a76ef464a38df7f2e657730cc2c344a | e66fec6d544161b77c134ec3d26a98663eefb6d1 | refs/heads/main | 2023-04-15T00:51:16.084281 | 2021-04-19T20:59:59 | 2021-04-19T20:59:59 | 359,596,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | import csv
import math
with open ('data.csv', newline="") as f:
reader = csv.reader(f)
file_data = list(reader)
data = file_data[0]
def mean(data):
total = 0
total_entries = len(data)
for x in data:
total+=int(x)
mean = total/total_entries
return mean
squared_list = []
for number in data:
a = int(number)-mean(data)
a = a**2
squared_list.append(a)
sum = 0
for i in squared_list:
sum = sum+i
variance = sum/(len(data)-1)
std_deviation = math.sqrt(variance)
print(" ")
print("Standard Deviation = ", std_deviation)
print(" ")
| [
"noreply@github.com"
] | noreply@github.com |
556621b3bffb8ae2724e3701c79e7cadc3ba4758 | 746343ec01702270ddb8f101dc42f214bcec3ee8 | /moveit_fake_controller_manager/conf.py | 19a2de46bba656fcd8552277f50adb863930453e | [] | no_license | ros-planning/moveit_plugins | 162829d6dc430e372e1dc80f83ed7f87d01ddd3a | cf0ddc86cf843688c8d172cf233a5d0e63e7f9de | refs/heads/kinetic-devel | 2023-08-30T13:58:57.844330 | 2016-08-05T09:49:31 | 2016-08-05T09:49:31 | 10,380,404 | 13 | 12 | null | 2016-08-25T19:07:29 | 2013-05-30T10:20:05 | C++ | UTF-8 | Python | false | false | 603 | py | import sys, os
extensions = [ 'sphinx.ext.extlinks' ]
# The master toctree document.
master_doc = 'doc/tutorial'
# The suffix of source filenames.
source_suffix = '.rst'
project = u'moveit_rviz_plugin_tutorial'
copyright = u'2016, Bielefeld University'
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
html_theme = "moveit-theme"
html_theme_path = ["doc"]
extlinks = {'moveit_website': ('http://moveit.ros.org/%s/', '')}
| [
"me@v4hn.de"
] | me@v4hn.de |
4f0507e424a507f5a01bb9d4424aa4840f5b1127 | 0c4d93cfc1d3f35152a8aaa3e20b88c87892e0ef | /Functions/linkCollector.py | 292b6e5af17a6d7dd4d1d3d19f37c06b9e32971d | [] | no_license | AkibSadmanee/Mailfinder | fddd9b8a05a598d8eb7a8a00b6a9f0eafcb9e57d | 1744290ddbbbeccdd99f9069520c4887b74c819f | refs/heads/master | 2020-07-10T04:21:01.150509 | 2020-06-22T18:36:02 | 2020-06-22T18:36:02 | 204,166,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | def getLinks(respond,printProgressBar):
from bs4 import BeautifulSoup
soup = BeautifulSoup(respond, 'lxml')
links = soup.select('.rc .r a')
link_storage = []
pc = 0
for link in links:
pc += 1
printProgressBar(pc,len(links),"Collecting Links from google", "Completed")
temp = link.get('href')
if len(temp) > 1:
link_storage.append(temp)
return link_storage | [
"akibsadmanee@gmail.com"
] | akibsadmanee@gmail.com |
cf79b0c7467ad594ae0411f8467196f72af51882 | 5449037cb41fc12eaa30cdf9b4dd9b5ad536848a | /PyBank/main.py | b71b8f6b0d099afe9e0bd751770031264b89ac25 | [] | no_license | dlg410/Python-challenge | 61cf873e5d0f8d43404dd614fe70e808b509f348 | af5eb694a21bba6a5fa47c9ef0059a4ad3609aff | refs/heads/master | 2022-12-17T02:32:42.456055 | 2020-09-21T17:28:04 | 2020-09-21T17:28:04 | 265,031,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,679 | py | #Import Dependencies
import os
import csv
#Data File Name and Path
budget_path = os.path.join("Resources","budget_data.csv")
with open(budget_path) as csvfile:
csvreader = csv.reader(csvfile, delimiter= ',')
print(csvreader)
#Read Header Row
csv_header = next(csvreader)
# Variable
month = []
monthly_rev_change = []
avg_monthly_revenue_chg = []
total_revenue = 0
total_revenue_change = 0
prev_month_rev = 0
#Calculate Number of Months, and Sum Up total Profit or Loss
for row in csvreader:
month.append(row[0])
total_revenue = total_revenue + int(row[1])
revenue_change = int(row[1]) - prev_month_rev
prev_month_rev = int(row[1])
monthly_rev_change.append(revenue_change)
# Calculate average monthly change
average_monthly_change = round((sum(monthly_rev_change) - monthly_rev_change[0]) / (len(month) -1 ),2)
# Determine the greatest monthly profit increase amount, and identify month
largest_increase = max(monthly_rev_change)
lg_inc_mnth = monthly_rev_change.index(largest_increase)
largest_increase_month = month[lg_inc_mnth]
# Determine the greatest monthly decrease change amount, and identify month
largest_decrease = min(monthly_rev_change)
lg_dec_mnth = monthly_rev_change.index(largest_decrease)
largest_decrease_month = month[lg_dec_mnth]
# Print Results on Terminal
print("Financial Analysis")
print("----------------------------------------------")
print("Total Months: " + str(len(month)))
print("Total Profit: $" + str(total_revenue))
print("Average Change: $" + str(average_monthly_change))
print("Greatest Increase in Profits: " + largest_increase_month + " ($" + str(largest_increase) +")")
print("Greatest Decrease in Profits: " + largest_decrease_month + " ($" + str(largest_decrease) +")")
with open("Financial_Analysis.txt", "w") as text:
text.write("----------------------------------------------------------\n")
text.write(" Financial Analysis"+ "\n")
text.write("----------------------------------------------------------\n\n")
text.write(" Total Months: " + str(len(month))+ "\n")
text.write(" Total Profits: " + "$" + str(total_revenue) +"\n")
text.write(" Average Change: " + '$' + str((average_monthly_change)) + "\n")
text.write(" Greatest Increase in Profits: " + str(largest_increase_month) + " ($" + str(largest_increase) + ")\n")
text.write(" Greatest Decrease in Profits: " + str(largest_decrease_month) + " ($" + str(largest_decrease) + ")\n")
text.write("----------------------------------------------------------\n") | [
"noreply@github.com"
] | noreply@github.com |
87ee3ca0437c88123ea9d904ca36e0f460b1e91b | 54c657c96dcfb7f5f8dd32e24c0198312e438600 | /parlai/agents/fairseq/fairseq_py/fairseq/criterions/fairseq_criterion.py | bed7efe1be88eb9f019dd6706fb320c37e91f688 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | urikz/ParlAI | 09ab2024cd623327d0cc7c2fd3fdc3177b00a0e9 | 2813e9fb10562027ffb6c69e05fb1a8127b7141a | refs/heads/master | 2021-04-26T23:54:36.024365 | 2018-04-09T15:19:43 | 2018-05-21T22:42:00 | 123,878,487 | 2 | 1 | BSD-3-Clause | 2018-03-08T20:44:39 | 2018-03-05T07:02:20 | Python | UTF-8 | Python | false | false | 1,177 | py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
from torch.nn.modules.loss import _Loss
class FairseqCriterion(_Loss):
def __init__(self, args, dst_dict):
super().__init__()
self.args = args
self.padding_idx = dst_dict.pad()
def forward(self, model, sample):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss, as a Variable
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
raise NotImplementedError
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
raise NotImplementedError
@staticmethod
def grad_denom(sample_sizes):
"""Compute the gradient denominator for a set of sample sizes."""
return sum(sample_sizes)
| [
"kshuster@fb.com"
] | kshuster@fb.com |
a0626e7b3facbb1cfe1a1f7975b5c52787b70e78 | 21eb191e7ba0c6706c3858c0499e2cb659758b61 | /blackjack.py | 69fa84542b8c81cb2dbbfeef96bc81f89a93bf1e | [] | no_license | fortenforge/pwnable.kr | 0f4bb4369c3b63e10762825771f83c9432c787b6 | bf32644ece16e391b9d3b95495e80ac22a9a1a59 | refs/heads/master | 2021-03-12T19:32:05.854490 | 2017-11-14T22:56:57 | 2017-11-14T22:56:57 | 102,881,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from pwn import *
# input
host = 'pwnable.kr'
port = 9009
# Notes
# * Just bet a very large negative amount and then lose.
def attack():
s = remote(host, port)
print s.recv()
s.sendline('Y')
print s.recv()
s.sendline('1')
print s.recv()
s.sendline('-1000000')
print s.recv()
for i in range(5):
s.sendline('H')
print s.recv()
s.interactive()
if __name__ == '__main__':
attack()
| [
"sridhar.rahul@gmail.com"
] | sridhar.rahul@gmail.com |
8c01cd644e3947c8c131e6da091a882cafdb17c4 | 3c4450ccd471f7720ef32cce3b5f5221981547ec | /openapi_client/models/instance_status.py | 7687a269856f24327f99f91d990cc8f7bc90e5ef | [] | no_license | ContatoGrupoOptimus/python-client | eb21d88a5725294609d589474e09463ab659d45b | f84ee64f741c096aadefc1088d1da88e97663fb1 | refs/heads/master | 2022-12-14T10:57:45.041040 | 2020-09-10T21:28:19 | 2020-09-10T21:28:19 | 294,526,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,387 | py | # coding: utf-8
"""
Chat API SDK
The SDK allows you to receive and send messages through your WhatsApp account. [Sign up now](https://app.chat-api.com/) The Chat API is based on the WhatsApp WEB protocol and excludes the ban both when using libraries from mgp25 and the like. Despite this, your account can be banned by anti-spam system WhatsApp after several clicking the \"block\" button. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: sale@chat-api.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class InstanceStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'account_status': 'str',
'qr_code': 'str',
'status_data': 'InstanceStatusStatusData'
}
attribute_map = {
'account_status': 'accountStatus',
'qr_code': 'qrCode',
'status_data': 'statusData'
}
def __init__(self, account_status=None, qr_code=None, status_data=None): # noqa: E501
"""InstanceStatus - a model defined in OpenAPI""" # noqa: E501
self._account_status = None
self._qr_code = None
self._status_data = None
self.discriminator = None
if account_status is not None:
self.account_status = account_status
if qr_code is not None:
self.qr_code = qr_code
if status_data is not None:
self.status_data = status_data
@property
def account_status(self):
"""Gets the account_status of this InstanceStatus. # noqa: E501
Instance Status # noqa: E501
:return: The account_status of this InstanceStatus. # noqa: E501
:rtype: str
"""
return self._account_status
@account_status.setter
def account_status(self, account_status):
"""Sets the account_status of this InstanceStatus.
Instance Status # noqa: E501
:param account_status: The account_status of this InstanceStatus. # noqa: E501
:type: str
"""
allowed_values = ["got qr code", "authenticated", "loading", "init", "not_paid"] # noqa: E501
if account_status not in allowed_values:
raise ValueError(
"Invalid value for `account_status` ({0}), must be one of {1}" # noqa: E501
.format(account_status, allowed_values)
)
self._account_status = account_status
@property
def qr_code(self):
"""Gets the qr_code of this InstanceStatus. # noqa: E501
Base64-encoded contents of the QR code # noqa: E501
:return: The qr_code of this InstanceStatus. # noqa: E501
:rtype: str
"""
return self._qr_code
@qr_code.setter
def qr_code(self, qr_code):
"""Sets the qr_code of this InstanceStatus.
Base64-encoded contents of the QR code # noqa: E501
:param qr_code: The qr_code of this InstanceStatus. # noqa: E501
:type: str
"""
self._qr_code = qr_code
@property
def status_data(self):
"""Gets the status_data of this InstanceStatus. # noqa: E501
:return: The status_data of this InstanceStatus. # noqa: E501
:rtype: InstanceStatusStatusData
"""
return self._status_data
@status_data.setter
def status_data(self, status_data):
"""Sets the status_data of this InstanceStatus.
:param status_data: The status_data of this InstanceStatus. # noqa: E501
:type: InstanceStatusStatusData
"""
self._status_data = status_data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InstanceStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"71100986+ContatoGrupoOptimus@users.noreply.github.com"
] | 71100986+ContatoGrupoOptimus@users.noreply.github.com |
59d1c6d9a4904b9ded3a33877749c4c95b801d4e | 5804abbaa9c54589284a2c14d0071b8348f14f7b | /Python/Cents.py | 4e30141949d687d65ddc32153cb73ac7ac182a0c | [] | no_license | steffanc/Practice | 76f138f849179cf544b567b0d9824b3f86a57122 | 7fb2c47804908ee3d3211482d126dc42eeb3b17e | refs/heads/master | 2020-06-07T04:02:17.895262 | 2020-06-06T21:17:46 | 2020-06-06T21:17:46 | 3,214,914 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | # Given an input number of cents, print all combinations of change that
# would total the number of cents
def cents(target):
doCents(target, [], 0, 4)
def doCents(target, output, current, level):
if current == target:
print output
else:
if current+100 <= target and level >= 4:
output.append("o")
doCents(target,output,current+100, 4)
output.pop()
if current+25 <= target and level >= 3:
output.append("q")
doCents(target,output,current+25, 3)
output.pop()
if current+10 <= target and level >= 2:
output.append("d")
doCents(target,output,current+10, 2)
output.pop()
if current+5 <= target and level >= 1:
output.append("n")
doCents(target,output,current+5, 1)
output.pop()
if current+1 <= target and level >= 0:
output.append("p")
doCents(target,output,current+1, 0)
output.pop()
cents(100)
| [
"schartrand@twitter.com"
] | schartrand@twitter.com |
843331de4602456e0bca3c39c6ddbf5c197b6d96 | ee7631bce162099cbbca0b7aecda0f3ffb3d31e8 | /www/region/models.py | 010e44dc1f7bcdd7ff9d73763e673279cd79d2dd | [] | no_license | poffey21/inventory | b0829b0dacd93e4248ba22efa8b6fa912f3cd76e | e65b9adf99618a1a25accd97781ce874dcb84bda | refs/heads/master | 2020-07-30T16:22:08.946036 | 2016-11-16T04:24:09 | 2016-11-16T04:24:09 | 73,626,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class City(models.Model):
""" city for stores and taxes """
class Tax(models.Model):
""" percentage by city """
| [
"poffey21@gmail.com"
] | poffey21@gmail.com |
1c738ef73bdc0768137d85581d244067c1e3ef73 | f9d7036649ff5d64660c33bc295ddf97e316d082 | /blog/settings.py | b9c6c73355a9da6b8c57b7e16e0b4b08e72fe807 | [] | no_license | jocsakesley/blog-jocsa-kesley | 1ebd6c11ad45c98a6b396ddfe58675da5cd113ec | d106a0870636542c08ee7791d971d77a948b3e0a | refs/heads/main | 2023-03-16T00:08:23.688040 | 2021-03-12T15:36:57 | 2021-03-12T15:36:57 | 322,912,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,687 | py | """
Django settings for blog project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from decouple import config, Csv
from dj_database_url import parse as dburl
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config("DEBUG", default=False, cast=bool)
ALLOWED_HOSTS = config("ALLOWED_HOSTS", default=[], cast=Csv())
# Application definition
INSTALLED_APPS = [
'blog.posts',
'blog.comentarios',
'blog.categorias',
'blog.sobre',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'widget_tweaks',
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
default_dburl = 'sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
DATABASES = {
'default': config('DATABASE_URL', default=default_dburl, cast=dburl)
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'pt-BR'
TIME_ZONE = 'America/Recife'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'templates/static'),)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
INSTALLED_APPS += ('django_summernote', )
X_FRAME_OPTIONS = 'SAMEORIGIN'
| [
"jocsadm@gmail.com"
] | jocsadm@gmail.com |
2ff03a3d07da824d69de0a7ba661cbf423b86218 | 7fab3415bae4c70d462c1a43cead5b0aabf67489 | /Research/Dijkstra.py | cd42ca3ffc1bcc6a1f2969fedf31b4701e409acd | [] | no_license | Alex-D-G/R2D2-Research | a495e32c198da6fc25392a45bc3a6f9a3ad7ca81 | 1f154aad22d8753cb824b50672e8d186cf6f277e | refs/heads/main | 2023-05-13T01:47:54.177485 | 2021-05-31T08:35:43 | 2021-05-31T08:35:43 | 372,437,869 | 0 | 0 | null | 2021-05-31T08:36:04 | 2021-05-31T08:36:04 | null | UTF-8 | Python | false | false | 4,630 | py | from matplotlib import pyplot as plt
import numpy as np
import copy
import time
import statistics as stats
# Een zeer generieke manier om een graaf de implementeren is er
# daarwerkelijk twee sets van te maken op basis van twee classes:
class Vertex:
def __init__(self, identifier, data_):
self.id = identifier
self.data = data_
def __eq__(self, other): # nodig om aan een set toe te voegen
return self.id == other.id
def __hash__(self): # nodig om aan een set toe te voegen
return hash(self.id)
def __repr__(self):
return str(self.id) + ":" + str(self.data)
class Edge:
def __init__(self, vertex1, vertex2, data_):
if (vertex1.id < vertex2.id):
self.v1 = vertex1
self.v2 = vertex2
else:
self.v1 = vertex2
self.v2 = vertex1
self.data = data_
def __eq__(self, other): # nodig om aan een set toe te voegen
return self.v1.id == other.v1.id and self.v2.id == self.v2.id
def __hash__(self): # nodig om aan een set toe te voegen
return hash(str(self.v1.id) + "," + str(self.v2.id))
def __repr__(self):
return "(" + str(self.v1.id) + "," + str(self.v2.id) + "):" + str(self.data)
class CGraph:
def __init__(self):
self.V = set()
self.E = set()
def __str__(self):
return "V: " + str(self.V) + "\nE: " + str(self.E)
def findNeighbours(n,graph):
neighbours = set()
for group in graph.E:
if (group.v1 == n) or (group.v2 == n):
if group.v1 != n:
neighbours.add(group.v1)
elif group.v2 != n:
neighbours.add(group.v2)
return neighbours
def minDist(N):
minValue = float('inf')
minDict = {}
for i in N:
if N[i][1]["dist"] < minValue:
minValue = N[i][1]["dist"]
minDict[i] = N[i]
return minDict
def findNeighboursD(n, graph, n_key):
neighbours = {}
keys = n[n_key][1].keys()
for key in keys:
if key != "dist" and key != "prev" and key != "solved":
neighbours[key] = graph[key]
return neighbours
def getPath(node, start, path):
key = list(node.keys())[0]
path.append(key)
if (key == start):
return path
getPath(node[key][1]["prev"], start, path)
def DPath(gr2, start, finish):
graph = copy.deepcopy(gr2)
for n in graph:
graph[n][1]["dist"] = float('inf')
graph[n][1]["prev"] = None
graph[n][1]["solved"] = False
graph[start][1]["dist"] = 0
S = {}
N = {}
N[start] = graph[start]
while (len(N) != 0):
n = minDist(N)
n_key = list(n.keys())[0]
n[n_key][1]["solved"] = True
S.update(n)
del N[n_key]
if n_key == finish:
break
neighbours = findNeighboursD(n, graph, n_key)
for m in neighbours:
if (neighbours[m][1]["solved"] == False):
if m not in N:
N[m] = graph[m]
altDistance = n[n_key][1]["dist"] + n[n_key][1][m]
if (neighbours[m][1]["dist"] > altDistance):
neighbours[m][1]["dist"] = altDistance
neighbours[m][1]["prev"] = n
node = {}
node[finish] = graph[finish]
path = []
getPath(node, start, path)
path.reverse()
return node[finish][1]["dist"], path
DGraph = dict
gr2 = {1: ("", {2:9}),
2: ("", {1:9, 3:11}),
3: ("", {2:11, 8:8.5}),
4: ("", {5:6, 6:5}),
5: ("", {4:6}),
6: ("", {4:5, 7:6, 11:4}),
7: ("", {6:6, 14:10}),
8: ("", {3:8.5, 9:6, 10:2}),
9: ("", {8:6}),
10: ("", {8:2, 11:13, 12:6}),
11: ("", {6:4, 10:13, 13:6}),
12: ("", {10:6, 15:17}),
13: ("", {11:6, 14:6, 16:16.5}),
14: ("", {7:10, 13:6}),
15: ("", {12:17, 20:13.5, 23:19.5}),
16: ("", {13:16.5, 17:2, 18:3.5, 19:4}),
17: ("", {16:2, 18:4, 19:3.5}),
18: ("", {16:3.5, 17:4, 19:2, 20:2.5}),
19: ("", {16:4, 17:3.5, 18:2}),
20: ("", {15:13.5, 18:2.5, 21:8}),
21: ("", {20:8, 22:6, 24:6}),
22: ("", {21:6, 28:10}),
23: ("", {15:19.5, 24:13, 26:2}),
24: ("", {21:6, 23:13, 27:4}),
25: ("", {26:6}),
26: ("", {23:2, 25:6, 30:8}),
27: ("", {24:4, 28:6, 31:6}),
28: ("", {22:10, 27:6}),
29: ("", {30:6}),
30: ("", {26:8, 29:6, 31:13}),
31: ("", {27:6, 30:13, 32:4.5, 33:2.5}),
32: ("", {31:4.5, 33:3.5}),
33: ("", {31:2.5, 32:3.5})
}
print(DPath(gr2, 21, 28))
| [
"noreply@github.com"
] | noreply@github.com |
a396adede5315997c758c190ed6df97b6a084566 | 37c53a0d1ab8395c1a0ebf95a1368ca131d1dd81 | /editor.py | 697f43320c4d3afe267eb95d4a4a9f299789111a | [
"Apache-2.0"
] | permissive | JonathanQuang/Wifi-Basher-Windows10 | b871ebf1ee704b44dd4843fff0dfead31a404142 | 951f0f4881ae1ddd4595ea5b9d9b16926b7029d4 | refs/heads/master | 2021-01-24T16:53:34.319292 | 2018-03-28T05:13:42 | 2018-03-28T05:13:42 | 123,218,643 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py |
xmlFileString = "INSERT XML FILE NAME HERE"
charset="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ123456789-_"
f = open("pingResult.txt","r")
pingResultText=f.read()
#print pingResultText
f.close()
def isPingBroken():
if pingResultText.find("could not find") > 0:
#print "ping Failed"
modifyXML()
print "yes"
else:
print "no"
def findModificationIndex(inputString):
retIndex = inputString.find(charset[len(charset)-1])
if retIndex == -1:
return len(inputString) - 1
return retIndex-1
def modifyXML():
e = open(xmlFileString,'r')
xmlResult = e.read()
beginningIndex=xmlResult.find('<keyMaterial>')
endIndex = xmlResult.find('</keyMaterial>')
passPhrase = xmlResult[beginningIndex+13:endIndex]
#print passPhrase
#normal case
modificationIndex = findModificationIndex(passPhrase)
if modificationIndex == -1:
subPhrase = charset[0]
phraseLength = len(passPhrase)
index = 0
while (index < phraseLength):
subPhrase = subPhrase + charset[0]
index += 1
passPhrase = subPhrase
else:
#print passPhrase[modificationIndex]
workingChar = passPhrase[modificationIndex]
newWorkingChar = charset.find(workingChar)+1
passPhrase = passPhrase[0:modificationIndex] + charset[newWorkingChar] + passPhrase[modificationIndex+1:len(passPhrase)]
#print passPhrase
#print workingChar + charset[newWorkingChar]
newXMLResult=xmlResult[0:beginningIndex+13] + passPhrase + xmlResult[endIndex:len(xmlResult)]
#print newXMLResult
e.close()
e = open(xmlFileString,'w')
e.write(newXMLResult)
e.close()
isPingBroken() | [
"jquang@stuy.edu"
] | jquang@stuy.edu |
5168e1c2d9374e94ddd524960cd3e1565aa75bfa | 1ce65c3c08691cc3d0733b7b918bfce0a1683ea6 | /testFunction2.py | a99f1c0cf41501fdaa9782f6a617030692f881fd | [] | no_license | lf-xxxyyy/lfpy | db4d521b43d88d129eb2612c17559dcdf5056546 | 30d3ad7534923a2d78efeb2d73c494fc24cb879f | refs/heads/master | 2020-06-12T23:45:16.630034 | 2017-01-16T14:10:43 | 2017-01-16T14:10:43 | 75,477,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | from testFunction import my_abs
from testFunction import my_hello
from testFunction import power
from testFunction import enroll
from testFunction import add_end
from testFunction import calc
print(my_hello('tony'))
print (my_abs(100))
print(my_abs(-200))
print (power(14))
print(power(10, 4))
enroll('tony', "male")
print (add_end())
print (calc((1, 2, 3)))
print(calc(3,4,5)) | [
"liufeng.lf@aliyun.com"
] | liufeng.lf@aliyun.com |
53f875bb56b97a81f14c3124ae6390204fd8cef4 | 6f041cfcadc66206a00eca5eafb1378fe261d2dd | /8x26tools/tools/ramdump-parser/linux_list.py | f7168beb4093aa79089384756f945a3d2469847f | [] | no_license | kalmuthu/slos | bf857aaa80c33f0a59361614702740c46fa20d01 | 7516632037f788b00e1137619b88ecca1ac66fa3 | refs/heads/master | 2021-01-18T03:38:55.509889 | 2017-02-26T15:24:34 | 2017-02-26T15:24:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,328 | py | # Copyright (c) 2013, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from print_out import *
'''
struct list_head {
struct list_head *next, *prev;
};
'''
def get_list_offsets(ram_dump):
LIST_NEXT_IDX = 0
LIST_PREV_IDX = 1
LIST_OFFSETS = [
('((struct list_head *)0x0)', 'next', 0, 0),
('((struct list_head *)0x0)', 'prev', 0, 0),
]
ram_dump.setup_offset_table(LIST_OFFSETS)
next_offset = ram_dump.get_offset_struct(LIST_OFFSETS[LIST_NEXT_IDX][0], LIST_OFFSETS[LIST_NEXT_IDX][1])
prev_offset = ram_dump.get_offset_struct(LIST_OFFSETS[LIST_PREV_IDX][0], LIST_OFFSETS[LIST_PREV_IDX][1])
return next_offset, prev_offset
class ListWalker(object):
'''
ram_dump: Reference to the ram dump
node_addr: The address of the first element of the list
list_elem_offset: The offset of the list_head in the structure that this list is container for.
next_offset: The offset for the next pointer in the list
prev_offset: The offset for the prev pointer in the list
'''
def __init__(self, ram_dump, node_addr, list_elem_offset, next_offset, prev_offset):
self.LIST_OFFSETS = [
('((struct list_head *)0x0)', 'next', 0, 0),
('((struct list_head *)0x0)', 'prev', 0, 0),
]
self.LIST_NEXT_IDX = 0
self.LIST_PREV_IDX = 1
self.ram_dump = ram_dump
self.next_offset = next_offset
self.prev_offset = prev_offset
self.list_elem_offset = list_elem_offset
self.last_node = node_addr
self.seen_nodes = []
def walk(self, node_addr, func):
if node_addr != 0:
func(node_addr - self.list_elem_offset)
next_node_addr = node_addr + self.next_offset
next_node = self.ram_dump.read_word(next_node_addr)
if next_node != self.last_node:
if next_node in self.seen_nodes:
print_out_str ('[!] WARNING: Cycle found in attach list for IOMMU domain. List is corrupted!')
else:
self.seen_nodes.append(node_addr)
self.walk(next_node, func)
| [
"chungae9ri@gmail.com"
] | chungae9ri@gmail.com |
94320151a2602c398d57f65c3e6993dc7b9f706e | 11099a71c685f04a207cdd119031d20ba0e7e670 | /test.py | 57ecb2f07030055d6177ec934a3a338c97a0939c | [] | no_license | zeniconcombres/exercises | d4a0ea8ae82f9b013de0b5b6e47200882566fc7d | ee5c719ea201be34f2272d45211644d72038e7e7 | refs/heads/master | 2023-04-19T00:26:34.595950 | 2021-03-13T06:23:25 | 2021-03-13T06:23:25 | 347,286,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | test = str(input("What is your name?\n"))
print("Hello %s!" % test)
greeting = str(input("How are you?\n")) | [
"email.me.inez@gmail.com"
] | email.me.inez@gmail.com |
cbfe59adc4ccebcfac17c721ef9cd51d45ee2c74 | 68c90ef853ea415a3c43ca10ebc9c23656907e10 | /list/threeSumClosest.py | 143151efa28bb9b18de5a53e1533e3247e79909c | [] | no_license | NJ-zero/LeetCode_Answer | 73889f46f4cd0c08f60a1e556c29114495062b2b | 1040b5dbbe509abe42df848bc34dd1626d7a05fb | refs/heads/master | 2021-08-06T04:33:39.890568 | 2020-04-14T09:59:39 | 2020-04-14T09:59:39 | 145,671,777 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | py | # coding=utf-8
# Time: 2019-11-18-17:46
# Author: dongshichao
'''
16. 最接近的三数之和
给定一个包括 n 个整数的数组 nums 和 一个目标值 target。
找出 nums 中的三个整数,使得它们的和与 target 最接近。
返回这三个数的和。假定每组输入只存在唯一答案。
例如,给定数组 nums = [-1,2,1,-4], 和 target = 1.
与 target 最接近的三个数的和为 2. (-1 + 2 + 1 = 2).
思路:
先排序 后 双指针
固定的ans = 0 + 1 +2
遍历数组,i + i+1 + r
如果三个和大于 target r-1 小于 则 l=i+1 右移 加一
如果 sum==target 返回target
判断 sum和target 之间距离是不是小于 ans和target的距离,更小则,更新ans = sum
'''
class Solution(object):
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
n=len(nums)
ans = nums[0]+nums[1]+nums[2]
print("ane:",ans)
for i in range(n):
l = i+1
r = len(nums) - 1
while l < r:
res = nums[i] + nums[l] + nums[r]
if abs(target-res) < abs(target-ans):
print("res:",res)
ans = res
if res > target:
r -=1
elif res < target:
l +=1
else:
return target
return ans
s= Solution()
print(s.threeSumClosest([1,1,1,0],-100))
| [
"dongshichao@qutoutiao.net"
] | dongshichao@qutoutiao.net |
63071d56bf62b4aef145af25d7b33009c19fb9e7 | 58359b8cc618c20f8561779a9e773308befa10b7 | /show_PT.py | 8aad340e343f90adeb3a8502e48666db956183d0 | [] | no_license | Cho-sei/Mystudy | 2cdc4e7c0314148444ce7bc978be85e2d409f445 | d61b82ba3f44f402ff1237fa4b19e9c4b147a45b | refs/heads/master | 2022-03-31T21:57:48.681259 | 2019-12-17T04:20:16 | 2019-12-17T04:20:16 | 196,204,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | import pandas as pd
import matplotlib.pyplot as plt
import sys
data = pd.read_csv('result/' + sys.argv[1] + '_PT.csv')
plt.subplot(1, 2, 1)
pre = data[(data.timing == 'pre') & (data.hand =='left')].PTime.mean()
post = data[(data.timing == 'post') & (data.hand =='left')].PTime.mean()
plt.plot(['pre', 'post'], [0, post-pre])
plt.ylim(-3, 1)
plt.title('left')
plt.subplot(1, 2, 2)
pre = data[(data.timing == 'pre') & (data.hand =='right')].PTime.mean()
post = data[(data.timing == 'post') & (data.hand =='right')].PTime.mean()
plt.plot(['pre', 'post'], [0, post-pre])
plt.ylim(-3, 1)
plt.title('right')
plt.show() | [
"knct0420@gmail.com"
] | knct0420@gmail.com |
68844cb9f643ffa4e036fe891ec3864aae7070f0 | bb898753e0de090db0614bad3ff5d6b8b4050fda | /week_2_if_n_while/reverse.py | c6bd409b3e8be32f78bd43fc95ccc5a798e66a29 | [] | no_license | MRS88/python_basics | 4c2fb7fffed3542b0a4739934eb771f49be720e4 | 20995e267c67077e4b78ebd05b4ddd4506295c59 | refs/heads/master | 2023-04-03T18:48:37.524741 | 2021-03-31T18:35:32 | 2021-03-31T18:35:32 | 347,826,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | '''Переставьте цифры числа в обратном порядке.'''
n = input()
print(int(n[::-1]))
| [
"salavat.mr@gmail.com"
] | salavat.mr@gmail.com |
c5ae206215093773715894fb6909c2cb1019ce6a | 221729e86d3cef6af170bd969deddceabbf6c16f | /steam_gsi/events.py | 995b92b4741922962b788a92f3fc0dea74361078 | [] | no_license | Natsku123/steam-gsi.py | dbc53c3bafb19256ed21976e52cf121d2cd85260 | f6f2cebb2ce4a33c2deed6bfebaba682ca9462c2 | refs/heads/main | 2023-05-31T16:31:24.557959 | 2021-06-30T12:53:50 | 2021-06-30T12:53:50 | 378,603,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | from .games import Base
def event_trigger(gs: Base):
pass
| [
"max@meckl.in"
] | max@meckl.in |
0dff6a1e70b3e0c2cc329b7b201add7b52e52901 | f814b8069fe2f5fe14fe5767db68c2385e504cd1 | /jobsProject/jobsProject/urls.py | 5a5a3c29f565815c55d6bac0efae58ba00b62f0a | [] | no_license | sohaibmansuri/jobs-project | adb2184447db9d2a3274e52829eabe7fddf30f7f | e384aede5646b542cb37dfc3c8d424d6feae4387 | refs/heads/master | 2023-03-20T07:05:17.639843 | 2021-03-22T10:11:47 | 2021-03-22T10:11:47 | 350,303,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | """jobsProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from testApp import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.home),
url(r'^jprjobs/', views.jaipur),
url(r'^hydjobs/', views.hyd),
url(r'^banglorejobs/', views.banglore),
url(r'^delhijobs/', views.delhi),
]
| [
"sohaibmansuri2@gmail.com"
] | sohaibmansuri2@gmail.com |
93b77f70dfda6c460a897355c421e575f087f2c7 | d1232b3befce740f9c0a8bfb2834bb05f1094999 | /article/migrations/0003_auto_20170715_1140.py | 38319b43e5de87ac6877c2983d5a9a6ea0b8f3ff | [] | no_license | Samarth08/first | 8a31eb24979172d49a584cb7bb9e25f210bd6107 | 13b73c09c95ea20d64bada9e41251afb0323c7dd | refs/heads/master | 2021-01-23T09:10:58.488652 | 2017-09-06T05:11:43 | 2017-09-06T05:11:43 | 102,564,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-07-15 06:10
from __future__ import unicode_literals
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0002_art_picture'),
]
operations = [
migrations.AlterField(
model_name='art',
name='picture',
field=models.FileField(default=0, storage=django.core.files.storage.FileSystemStorage(location='/media/photos'), upload_to=''),
preserve_default=False,
),
]
| [
"samarthsah@Samarths-MacBook-Pro.local"
] | samarthsah@Samarths-MacBook-Pro.local |
edff97ca61c1b4e2b30b5dc729f5248998193855 | 9a320ffbd992d09c7767221339153af1494c72f4 | /ecommerce/lib/python3.6/hashlib.py | e43cedba2aec0dc40a199d1c255c2ee174e8528b | [] | no_license | carlosblancosierra/ecommerce-cfe | 520c7baf2a10c7e0a83459de4ff31fedbec983e4 | 9538034ad0e39dae226ebc350b38fbe20ffe4c18 | refs/heads/master | 2022-12-18T16:45:17.645842 | 2018-06-14T01:29:03 | 2018-06-14T01:29:03 | 122,151,270 | 0 | 0 | null | 2022-12-08T00:53:45 | 2018-02-20T03:33:13 | Python | UTF-8 | Python | false | false | 54 | py | /Users/carlosblanco/anaconda3/lib/python3.6/hashlib.py | [
"carlosblancosierra@gmail.com"
] | carlosblancosierra@gmail.com |
47a785f3921367e2e31811da2a583af9fc3f97f4 | bfdbdaec2d61df45098f51dbb55438d67f9767a3 | /states/StateAddRegionalAcc.py | dd29261cc5c7a5c09d9fab198f17b40816cbd0e4 | [] | no_license | doroshenko-online/autobot | 46cc65b070eb86d3e26e63ff29f2862f4ae8724f | 48fa1fa224c6b537e364c8d78fb16ac05960aa63 | refs/heads/main | 2023-06-25T06:17:56.788216 | 2021-07-25T10:05:38 | 2021-07-25T10:05:38 | 382,090,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | from aiogram.dispatcher.filters.state import State, StatesGroup
class AddRegionalAcc(StatesGroup):
wait_for_chat_id = State() | [
"doroshenko@tutanota.com"
] | doroshenko@tutanota.com |
0e0b02856e4b9275bbad24a7461c2c793b231d87 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_208/81.py | 5eb844629edbf0f9bad243963bf552da90da0e7c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | for t in range(int(input())):
n, q = (int(i) for i in input().split())
hs = [[int(i) for i in input().split()] for j in range(n)]
ds = [[int(i) for i in input().split()][j + 1] for j in range(n - 1)]
input()
input()
tc = [0] * n
tc[n - 1] = 0
for i in range(n - 2, -1, -1):
min = -1
sd = 0
for j in range(1, n - i):
sd += ds[i + j - 1]
if sd > hs[i][0]:
break
if tc[i + j] == -1:
continue
tm = tc[i + j] + sd / hs[i][1]
if min == -1 or tm < min:
min = tm
tc[i] = min
print("Case #%d: %f" % (t + 1, tc[0])) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
cc0cf070c1d0508310505b023dd2e2a65e030828 | 33a2ef2e0d078656afe2e79ddb941778a5439aa0 | /rent_a_dent_app/rent_a_dent/views.py | aedbcbd23048287aef3f789ff52aa1f04c64a037 | [] | no_license | leszekemil/rent_a_dent-rekrutacja | 8b0b02b0e4f811f740147a5aed1894be12f0a01a | eec4a7075f96941fbb70fd89dfd0e0f243cbc3a1 | refs/heads/master | 2023-02-09T05:47:32.034639 | 2021-01-04T10:34:04 | 2021-01-04T10:34:04 | 326,598,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,155 | py | from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin
from django.shortcuts import render
from django.views import View
from django.shortcuts import redirect
from django.views.generic import UpdateView, DeleteView
from rest_framework import generics
from rent_a_dent.models import Visit
from rent_a_dent.forms import VisitForm, VisitPerDayForm
from rent_a_dent.serializers import VisitSerializer
# Create your views here.
class LandingPage(View):
def get(self, request):
return render(request, "main.html")
class VisitsList(View):
def get(self, request):
visits_lst = Visit.objects.all().order_by('day')
return render(request, 'visits.html', {'visits': visits_lst})
# ADD VIEW
class AddVisit(View):
def get(self, request):
form = VisitForm()
return render(request, 'add_visit.html', {'form': form})
def post(self, request):
form = VisitForm(request.POST)
if form.is_valid():
form.save()
return redirect('visits_list')
else:
return render(request, 'add_visit.html', {'form': form})
# UPDATE & DELETE
class UpdateVisitView(PermissionRequiredMixin, UpdateView):
permission_required = ['rent_a_dent.update_visit']
model = Visit
form_class = VisitForm
template_name = 'add_visit.html'
success_url = '/visits/'
class DeleteVisitView(PermissionRequiredMixin, DeleteView):
permission_required = ['rent_a_dent.delete_visit']
model = Visit
template_name = "delete_view.html"
success_url = '/visits/'
def get_context_data(self, **kwargs):
contex = super().get_context_data(**kwargs)
contex.update({'button_name': 'DELETE'})
return contex
def post(self, request, *args, **kwargs):
if request.POST['del'] == 'Abort':
self.object = self.get_object()
success_url = self.get_success_url()
return redirect(success_url)
return self.delete(request, *args, **kwargs)
# DETAILS VIEW
class VisitDetails(View):
def get(self, request, visit_id):
visit = Visit.objects.get(id=visit_id)
context = {
'visit': visit,
}
return render(request, 'details_visit.html', context)
# SERIALIZERS
class VisitListViewSerializer(generics.ListCreateAPIView):
queryset = Visit.objects.all()
serializer_class = VisitSerializer
class VisitViewSerializer(generics.RetrieveUpdateDestroyAPIView):
queryset = Visit.objects.all()
serializer_class = VisitSerializer
class PerDay(View):
def get(self, request):
form = VisitPerDayForm()
return render(request, 'per_day_visit.html', {'form': form})
def post(self, request):
date_count = VisitPerDayForm(request.POST)
if request.method == 'POST':
form = VisitPerDayForm(request.POST)
if form.is_valid():
date1 = form.cleaned_data['day']
date = str(date1)
date_count = Visit.objects.filter(day=date)
return render(request, 'per_day_visit.html', {'date_count': date_count}) | [
"nortal@gmail.com"
] | nortal@gmail.com |
6c7d033f0a21fcd56c5a711729f9b0b772a27e13 | 6d7e8a61a85baf858805c1ae47328a27b5bbb4bd | /send_notifs_cron.py | 21a6f9d84b173a26d8e7d4a079ba3f5e9144f2ca | [] | no_license | shannon-heh/TigerSnatch | 8c8f681d7e1d27497cc685b6e560b24b9d732462 | b91415db1035b4709e605336c9221414d565ae6d | refs/heads/main | 2023-06-02T10:54:17.707749 | 2021-06-27T05:41:46 | 2021-06-27T05:41:46 | 341,774,248 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | # ----------------------------------------------------------------------
# send_notifs_cron.py
# Manages regular execution of the email notification script using a
# cron wrapper. Disable/enable using admin panel or _set_cron_status.py.
#
# Set execution interval in config: NOTIFS_INTERVAL_SECS
# ----------------------------------------------------------------------
from sys import path
path.append('src') # noqa
from send_notifs import cronjob
from config import NOTIFS_INTERVAL_SECS
from apscheduler.schedulers.blocking import BlockingScheduler
scheduler = BlockingScheduler()
scheduler.add_job(cronjob, 'interval', seconds=NOTIFS_INTERVAL_SECS)
scheduler.start()
| [
"nicholaspad@gmail.com"
] | nicholaspad@gmail.com |
68152eb0046118464c20ab2b166aa716745123ad | 25194bd77823c25f65553c4400788040e69b1a1f | /okikae_kun.py | a97a70d2ab4031c54d1b64ed4b6830679a738834 | [] | no_license | cabbage63/okikae_kun | 936c1ff7299384a863a359eb4971ee68dee7895b | 00fcbc1c12d7129b5436ac6dcd40aaf7e4276084 | refs/heads/master | 2020-12-24T12:40:00.714906 | 2016-11-06T03:23:24 | 2016-11-06T03:23:24 | 72,967,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | import sys
argv = sys.argv
temp_body = open(argv[1]).read()
temp_body = temp_body.replace('http://ecx.images-amazon.com/','https://images-fe.ssl-images-amazon.com/')
temp_body = temp_body.replace('http://www.amazon.co.jp/','https://www.amazon.co.jp/')
f = open('output.txt', 'w')
f.write(temp_body)
f.close()
print("finished!")
| [
"e.cabbage63@gmail.com"
] | e.cabbage63@gmail.com |
203559691a9f96446d035b939909c16743b23ade | 1bc1727c17e237c1badafc0233115d5001ff5274 | /audioset/vggish_slim.py | fba0feb693aca9aa24c55ee976ee1ff4eb8d992f | [
"Apache-2.0"
] | permissive | sshleifer/object_detection_kitti | 09b0ae37608f31491f79c537916cea8fd446f698 | 7f8c93c96a7ab5e150f217b7c369bec9d4b8bb81 | refs/heads/master | 2022-10-29T19:13:21.657012 | 2019-05-09T06:11:52 | 2019-05-09T06:11:52 | 103,348,300 | 35 | 18 | Apache-2.0 | 2022-10-26T09:39:39 | 2017-09-13T03:23:16 | Python | UTF-8 | Python | false | false | 5,773 | py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the 'VGGish' model used to generate AudioSet embedding features.
The public AudioSet release (https://research.google.com/audioset/download.html)
includes 128-D features extracted from the embedding layer of a VGG-like model
that was trained on a large Google-internal YouTube dataset. Here we provide
a TF-Slim definition of the same model, without any dependences on libraries
internal to Google. We call it 'VGGish'.
Note that we only define the model up to the embedding layer, which is the
penultimate layer before the final classifier layer. We also provide various
hyperparameter values (in vggish_params.py) that were used to train this model
internally.
For comparison, here is TF-Slim's VGG definition:
https://github.com/tensorflow/models/blob/master/slim/nets/vgg.py
"""
import tensorflow as tf
import vggish_params as params
slim = tf.contrib.slim
def define_vggish_slim(training=False):
"""Defines the VGGish TensorFlow model.
All ops are created in the current default graph, under the scope 'vggish/'.
The input is a placeholder named 'vggish/input_features' of type float32 and
shape [batch_size, num_frames, num_bands] where batch_size is variable and
num_frames and num_bands are constants, and [num_frames, num_bands] represents
a log-mel-scale spectrogram patch covering num_bands frequency bands and
num_frames time frames (where each frame step is usually 10ms). This is
produced by computing the stabilized log(mel-spectrogram + params.LOG_OFFSET).
The output is an op named 'vggish/embedding' which produces the activations of
a 128-D embedding layer, which is usually the penultimate layer when used as
part of a full model with a final classifier layer.
Args:
training: If true, all parameters are marked trainable.
Returns:
The op 'vggish/embeddings'.
"""
# Defaults:
# - All weights are initialized to N(0, INIT_STDDEV).
# - All biases are initialized to 0.
# - All activations are ReLU.
# - All convolutions are 3x3 with stride 1 and SAME padding.
# - All max-pools are 2x2 with stride 2 and SAME padding.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(
stddev=params.INIT_STDDEV),
biases_initializer=tf.zeros_initializer(),
activation_fn=tf.nn.relu,
trainable=training), \
slim.arg_scope([slim.conv2d],
kernel_size=[3, 3], stride=1, padding='SAME'), \
slim.arg_scope([slim.max_pool2d],
kernel_size=[2, 2], stride=2, padding='SAME'), \
tf.variable_scope('vggish'):
# Input: a batch of 2-D log-mel-spectrogram patches.
features = tf.placeholder(
tf.float32, shape=(None, params.NUM_FRAMES, params.NUM_BANDS),
name='input_features')
# Reshape to 4-D so that we can convolve a batch with conv2d().
net = tf.reshape(features, [-1, params.NUM_FRAMES, params.NUM_BANDS, 1])
# The VGG stack of alternating convolutions and max-pools.
net = slim.conv2d(net, 64, scope='conv1')
net = slim.max_pool2d(net, scope='pool1')
net = slim.conv2d(net, 128, scope='conv2')
net = slim.max_pool2d(net, scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, scope='conv3')
net = slim.max_pool2d(net, scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, scope='conv4')
net = slim.max_pool2d(net, scope='pool4')
# Flatten before entering fully-connected layers
net = slim.flatten(net)
net = slim.repeat(net, 2, slim.fully_connected, 4096, scope='fc1')
# The embedding layer.
net = slim.fully_connected(net, params.EMBEDDING_SIZE, scope='fc2')
return tf.identity(net, name='embedding')
def load_vggish_slim_checkpoint(session, checkpoint_path):
"""Loads a pre-trained VGGish-compatible checkpoint.
This function can be used as an initialization function (referred to as
init_fn in TensorFlow documentation) which is called in a Session after
initializating all variables. When used as an init_fn, this will load
a pre-trained checkpoint that is compatible with the VGGish model
definition. Only variables defined by VGGish will be loaded.
Args:
session: an active TensorFlow session.
checkpoint_path: path to a file containing a checkpoint that is
compatible with the VGGish model definition.
"""
# Get the list of names of all VGGish variables that exist in
# the checkpoint (i.e., all inference-mode VGGish variables).
with tf.Graph().as_default():
define_vggish_slim(training=False)
vggish_var_names = [v.name for v in tf.global_variables()]
# Get the list of all currently existing variables that match
# the list of variable names we just computed.
vggish_vars = [v for v in tf.global_variables() if v.name in vggish_var_names]
# Use a Saver to restore just the variables selected above.
saver = tf.train.Saver(vggish_vars, name='vggish_load_pretrained')
saver.restore(session, checkpoint_path)
| [
"plakal@google.com"
] | plakal@google.com |
1041f3b038fd79462b48848f5cbf72db5b84b583 | aeb33e5eeaf9c77167f1af433c2b000de4aafe07 | /helloworld.py | 146695b88402bc954bc05310ddc88da2592d81f5 | [] | no_license | jchapman68/pyworld | 0a9167975b3479371cc44217d3e0510e7c4dc89e | b2d6cbe000091e8827da19071a585590527f0371 | refs/heads/master | 2020-03-22T15:50:56.187094 | 2018-07-12T07:41:53 | 2018-07-12T07:41:53 | 140,283,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | import os
#os.environ["GREETING"] = "Hello Rob"
#myvar = os.environ["GREETING"]
#if myvar == "Hello Rob":
# print ("success")
#else:
# print("fail")
#print os.environ["GREETING"]
#print("Goodbye, World!")
def test_greeting(message):
if message == "foobar":
return message
else:
return "Hello {}".format(message)
| [
"joshua.chapman@ons.gov.uk"
] | joshua.chapman@ons.gov.uk |
5eae4b091b6eef83c71055a0e4d6c073a7746749 | 34c6384d4137003317a5c3ac9d3ef34b831bbf44 | /code/qrcode.py | af5d7b57e7cc69890ca18983ad567125859ede63 | [] | no_license | brekooname/License-Detection-using-QR-Code | c59fe35967f48335d8c138738b1c05ef1ca72930 | 9a7eedaa903a89030127d3ea5c5e16625307ad82 | refs/heads/master | 2023-05-01T04:29:45.884905 | 2021-05-15T12:54:33 | 2021-05-15T12:54:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | import cv2
import numpy as np
from pyzbar.pyzbar import decode
from tabulate import tabulate
import csv
def data_list():
myDataList = list()
with open('hello.csv', 'r', newline='') as cs:
reader = csv.DictReader(cs)
for row in reader:
myDataList.append(row['Name'])
return myDataList
def print_data():
with open('hello.csv', 'r', newline='') as cs:
reader = csv.DictReader(cs)
for r in reader:
li = list(r.values())
keys = list(r.keys())
if li[0] == myData:
table = [keys, li]
print(tabulate(table, headers='firstrow',
tablefmt='fancy_grid'))
url = "http://192.168.1.100:8080/video"
#! img = cv2.imread('qrcodes/Megha.png')
cap = cv2.VideoCapture(url)
#! Size of the output window
cap.set(1, 250)
cap.set(2, 250)
myDataList = data_list()
while True:
#! Reading data on QR code
success, img = cap.read()
for code in decode(img):
#! Decoding the unicode 8-bit data
myData = code.data.decode('utf-8')
#! Printing the decoded data
print(myData)
#! If data is in the list will display authorized above polygon
if myData in myDataList:
myOutput = 'Authorized'
myColor = (0, 255, 0)
print(myOutput)
print_data()
#! If data is not in the list will display un-authorized above polygon
else:
myOutput = 'Un-Authorized'
print(myOutput)
myColor = (0, 0, 255)
#! This is for polygon(green/red for authorized/un-authorized)
pts = np.array([code.polygon], np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(img, [pts], True, myColor, 5)
pts2 = code.rect
cv2.putText(img, myOutput, (pts2[0], pts2[1]),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, myColor, 2)
#! End of polygon
#! Image display
cv2.imshow('Result', img)
#! To quit
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"hms@290926.github.com"
] | hms@290926.github.com |
c536b9fd5c1e73cc295090ed7b3acb50d109db16 | fec863b67ec1ae65da7111bd8c77d0ab2ef1f6ce | /movie recommendation system/.history/moviemodel_20210503171215.py | 065bab6744cb5a59f9f2bcad99cc217a20cecea4 | [] | no_license | kannan768/movie-recommendation-system | e6cf71620e25a0185fed3b37896137f1f39b0801 | 7460d440d44e77390e459ab10c535b6971c9c3ab | refs/heads/main | 2023-05-14T02:21:50.930672 | 2021-06-09T05:02:30 | 2021-06-09T05:02:30 | 375,225,316 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,004 | py | import pandas as pd
import numpy as np
from zipfile import ZipFile
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from pathlib import Path
import matplotlib.pyplot as plt
"""##Dataset"""
df = pd.read_csv('ratings.csv', sep=',', encoding='latin-1', usecols=['userId','movieId','rating','timestamp'])
movie_df=df
user_ids = df["userId"].unique().tolist()
user2user_encoded = {x: i for i, x in enumerate(user_ids)}
userencoded2user = {i: x for i, x in enumerate(user_ids)}
movie_ids = df["movieId"].unique().tolist()
movie2movie_encoded = {x: i for i, x in enumerate(movie_ids)}
movie_encoded2movie = {i: x for i, x in enumerate(movie_ids)}
df["user"] = df["userId"].map(user2user_encoded)
df["movie"] = df["movieId"].map(movie2movie_encoded)
num_users = len(user2user_encoded)
num_movies = len(movie_encoded2movie)
df["rating"] = df["rating"].values.astype(np.float32)
min_rating = min(df["rating"])
max_rating = max(df["rating"])
# print(
# "Number of users: {}, Number of Movies: {}, Min rating: {}, Max rating: {}".format(
# num_users, num_movies, min_rating, max_rating
# )
# )
df = df.sample(frac=1, random_state=42)
x = df[["user", "movie"]].values
y = df["rating"].apply(lambda x: (x - min_rating) / (max_rating - min_rating)).values
train_indices = int(0.9 * df.shape[0])
x_train, x_val, y_train, y_val = (
x[:train_indices],
x[train_indices:],
y[:train_indices],
y[train_indices:],
)
EMBEDDING_SIZE = 50
class RecommenderNet(keras.Model):
def __init__(self, num_users, num_movies, embedding_size, **kwargs):
super(RecommenderNet, self).__init__(**kwargs)
self.num_users = num_users
self.num_movies = num_movies
self.embedding_size = embedding_size
self.user_embedding = layers.Embedding(
num_users,
embedding_size,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
)
self.user_bias = layers.Embedding(num_users, 1)
self.movie_embedding = layers.Embedding(
num_movies,
embedding_size,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
)
self.movie_bias = layers.Embedding(num_movies, 1)
def call(self, inputs):
user_vector = self.user_embedding(inputs[:, 0])
user_bias = self.user_bias(inputs[:, 0])
movie_vector = self.movie_embedding(inputs[:, 1])
movie_bias = self.movie_bias(inputs[:, 1])
dot_user_movie = tf.tensordot(user_vector, movie_vector, 2)
# Add all the components (including bias)
x = dot_user_movie + user_bias + movie_bias
return tf.nn.sigmoid(x)
model = RecommenderNet(num_users, num_movies, EMBEDDING_SIZE)
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(), optimizer=keras.optimizers.Adam(lr=0.001)
)
history = model.fit(
x=x_train,
y=y_train,
batch_size=64,
epochs=5,
verbose=1,
validation_data=(x_val, y_val),
)
# plt.plot(history.history["loss"])
# plt.plot(history.history["val_loss"])
# plt.title("model loss")
# plt.ylabel("loss")
# plt.xlabel("epoch")
# plt.legend(["train", "test"], loc="upper left")
# plt.show()
movie_df = pd.read_csv('movies.csv', sep=',', encoding='latin-1', usecols=['movieId','title','genres'])
def Display(User_id):
user_id = df.userId.sample(1).iloc[0]
movies_watched_by_user = df[df.userId == user_id]
movies_not_watched = movie_df[~movie_df["movieId"].isin(movies_watched_by_user.movieId.values)]["movieId"]
movies_not_watched = list(
set(movies_not_watched).intersection(set(movie2movie_encoded.keys())))
movies_not_watched = [[movie2movie_encoded.get(x)] for x in movies_not_watched]
user_encoder = user2user_encoded.get(user_id)
user_movie_array = np.hstack(([[user_encoder]] * len(movies_not_watched), movies_not_watched))
ratings = model.predict(user_movie_array).flatten()
top_ratings_indices = ratings.argsort()[-10:][::-1]
recommended_movie_ids = [ movie_encoded2movie.get(movies_not_watched[x][0]) for x in top_ratings_indices]
# print("Showing recommendations for user: {}".format(user_id))
# print("====" * 9)
# print("Movies with high ratings from user")
# print("----" * 8)
top_movies_user = (movies_watched_by_user.sort_values(by="rating", ascending=False)
.head(5)
.movieId.values
)
movie_df_rows = movie_df[movie_df["movieId"].isin(top_movies_user)]
# for row in movie_df_rows.itertuples():
# print(row.title, ":", row.genres)
print("----" * 8)
print("Top 10 movie recommendations")
print("----" * 8)
recommended_movies = movie_df[movie_df["movieId"].isin(recommended_movie_ids)]
# for row in recommended_movies.itertuples():
# print(row.title, ":", row.genres)
print
# user_id=input("Please Enter User id")
user_id=int(sys.argv[1])
Display(user_id) | [
"kannanbsk1609080@gmail.com"
] | kannanbsk1609080@gmail.com |
cf160eb097ceba05d1280e889a1944044246a65f | 65b604f19644628791f7f286538d2ac80deba225 | /user_app/urls.py | ce570b5f957810e87479e74aa48ad75a876ebc84 | [] | no_license | Anish8/instaclone | e68d6de1de0e21cb4560ce4f84d52222d0903ba9 | c8c831776f231c4f09052fe87b6eb8ba91fc272d | refs/heads/master | 2023-04-30T07:05:47.167340 | 2019-12-23T03:13:53 | 2019-12-23T03:13:53 | 229,521,803 | 0 | 0 | null | 2023-04-21T20:43:37 | 2019-12-22T05:18:10 | HTML | UTF-8 | Python | false | false | 929 | py | """Tasbir URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from user_app.views import display,login,logout
app_name= 'user'
urlpatterns = [
# path('', index),
path('display/',display, name='display'),
path('login/',login, name="login"),
path('logout/',logout, name="logout")
]
| [
"anishsubedi9@gmail.com"
] | anishsubedi9@gmail.com |
128c8218b9b250556682a1630279f115ed7218c6 | 922dba5cbe7b261cad13f0aa520d365307a053cd | /string.py | 712a7aeacefcbe60fe9424e05f2477f0ed15b8b7 | [] | no_license | dtwin/shiyanlou_code | 2cf7364896270b63543560d77be75ce90a4541b8 | 5db23b577f11eb4ca81564176a6bf8a7312db1a1 | refs/heads/master | 2020-07-01T14:54:41.123468 | 2019-11-21T08:16:45 | 2019-11-21T08:16:45 | 201,202,551 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | s='I am Chinese'
s="Here is line \...split in two lines"
print(s)
print("""Usage: thingy [OPTIONS]
-h Display
-H hostname hostname
""")
a= "shi yan lou"
print(a.title())
print(a.upper())
print(a.lower())
print(a.swapcase()) #jiaohuan daxiaoxie
print(a.isalnum()) #jiancha shifou zhiyou zimu he shuzi
print(a.isalpha()) #jiancha shifou zhiyou zimu
print(a.isdigit()) #检查是否全是数字
print(a.islower())
print(a.istitle()) #检查是否全是标题样式
print(a.isupper())
b="We all love python"
print(s.split())
c=" shiyanlou:is:waiting"
print(c.spilt(':')
#print(c.rstrip('abs')
# "-".join("GNU/Linux is great".split()) #用-连接字符 但是不成功。。。
# d= ab c\n
#print(c.strip()) #剥离首尾空格和换行符
#print(f.lstrip("cwsd.") #删除在字符串左边出现的 c w s d . 字符
#print(f.rstrip("cnwdi.")) #删除在字符串右边出现的 c n w d i 字符
#g="abcdefedcba"
#print(g.lstrip("abc")
#print(g.rstrip("abc")
#p="faulty for a reason
#p.find("for")
#p.find("fora")
#p.startswith("fa")
#p.endswith("reason")
| [
"495271301@qq.com"
] | 495271301@qq.com |
4c592d51f61cf481cc775b42cd08c2ac8509d63a | d2f50124ff3bec70b9b3139ecb063b06e526781d | /biable/migrations/0063_auto_20170209_1210.py | b1f585b6e2133d7294f9972748a301e53108e589 | [] | no_license | odecsarrollo/odecopack-componentes | e8d993f089bf53bbf3c53d1265e70ac5c06b59b8 | b583a115fb30205d358d97644c38d66636b573ff | refs/heads/master | 2022-12-12T00:33:02.874268 | 2020-08-13T18:45:01 | 2020-08-13T18:45:01 | 189,262,705 | 0 | 0 | null | 2022-12-08T11:23:46 | 2019-05-29T16:37:21 | Python | UTF-8 | Python | false | false | 470 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-09 17:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('biable', '0062_auto_20170209_1032'),
]
operations = [
migrations.AlterField(
model_name='grupocliente',
name='nombre',
field=models.CharField(max_length=120, unique=True),
),
]
| [
"fabio.garcia.sanchez@gmail.com"
] | fabio.garcia.sanchez@gmail.com |
03b32bd18ba48df51ff3afa7c4c8d2f65e28caeb | 7b9b7367a98fc29ef173b37abd611065bfda289e | /placa.py | 8c1d03d54f92059d28baab780275f9f3d7154e8a | [] | no_license | smorap/Proyecto_Proc._Imagenes | 30e98273921a0b1b1fcc80ac156463ef1b95499d | eca0344e0079f02edda300fc62b0105ae6e3d2f4 | refs/heads/main | 2023-01-07T20:48:14.090713 | 2020-11-03T01:35:52 | 2020-11-03T01:35:52 | 309,530,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | import cv2
if __name__ == '__main__':
path = "D:/Datos/sergio/UNIVERSIDAD/2020/Proc_ Imagens/Poryecto/Imagenes_fuente/Foto_2.PNG"
img = cv2.imread(path)
image_draw=img.copy()
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
filename1 = 'placa_grises.jpg'
cv2.imwrite(filename1, img_gray)
ret2, thresh1 = cv2.threshold(img_gray, 150, 200, cv2.THRESH_BINARY)
filename2 = 'placa_umbral.jpg'
cv2.imwrite(filename2, thresh1)
contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for idx, cont in enumerate(contours):
area = cv2.contourArea(contours[idx])
if area > 4000:
if area < 5050:
color = (200, 0, 255)
cv2.drawContours(image_draw, contours, idx, color, 1)
cv2.imshow("Image", image_draw)
filename3 = 'placa_contorno.jpg'
cv2.imwrite(filename3, image_draw)
cv2.waitKey(0) | [
"noreply@github.com"
] | noreply@github.com |
9d63e54252a9268a3d5a8ae57963c481936b808d | 1f8d7bcbd223e86ed862a89e0cd6b2112f9112a3 | /habataku300checker.py | 20455cfb82ff3d34d9bba501ab948db2e1cfdfef | [] | no_license | howther111/utility | c94d7a1036e7365bbeed3b3c454351752242bdb1 | b074a67b8aa9fe5593a80071b1ef98ce193d835b | refs/heads/master | 2021-10-16T18:20:37.136517 | 2019-02-12T13:27:50 | 2019-02-12T13:27:50 | 113,938,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,632 | py | import openpyxl
import os # osモジュールのインポート
def year_change(gengo, year):
if gengo == '明治':
if year != '元' and year != '':
ans = str(int(year) + 1867)
return ans
else:
return '1988'
elif gengo == '大正':
if year != '元' and year != '':
ans = str(int(year) + 1911)
return ans
else:
return '1912'
elif gengo == '昭和':
if year != '元' and year != '':
ans = str(int(year) + 1925)
return ans
else:
return '1926'
elif gengo == '平成':
if year != '元' and year != '':
ans = str(int(year) + 1988)
return ans
else:
return '1989'
if __name__ == '__main__':
# os.listdir('パス')
# 指定したパス内の全てのファイルとディレクトリを要素とするリストを返す
work_folder = 'C:\\Users\\atsuk\\PycharmProjects\\study\\utility\\work'
files = os.listdir(work_folder)
for file_name in files:
wb = openpyxl.load_workbook(work_folder + '\\' + file_name)
sheet = wb.get_sheet_by_name(wb.get_sheet_names()[0])
print(file_name + ' チェック開始')
# ①社名と株式会社の間は詰める
company_name = sheet['C11'].value
if '株式会社 ' in company_name:
print('C11:社名と株式会社の間は詰める')
elif '株式会社 ' in company_name:
print('C11:社名と株式会社の間は詰める')
elif ' 株式会社' in company_name:
print('C11:社名と株式会社の間は詰める')
elif ' 株式会社' in company_name:
print('C11:社名と株式会社の間は詰める')
if '株式会社' not in company_name:
print('C11:「株式会社」の表記なし')
# ②TEL、FAX番号にはハイフン入れる
tel = sheet['C15'].value
if '-' not in tel:
print('C15:電話番号にハイフン入れる')
fax = sheet['I15'].value
if '-' not in fax:
print('I15:FAX番号にハイフン入れる')
url = sheet['C16'].value
if 'http' not in url:
print('C16:URL記載なし')
# ③代表者の姓と名の間にスペース入れる
name = sheet['C13'].value
if ' ' not in name:
print('C13:代表者の姓と名の間に全角スペース入れる')
# ④資本金の表記「,」入れる(正常に動作せず)
capital = sheet['K18'].value
if len(str(capital)) > 3 and ',' not in str(capital):
print('K18:資本金の表記「,」入れる ' + str(capital))
# ⑤判別不能
# ⑥各3項目の見出しと同一文章の場合は修正を依頼
point = [sheet['D22'].value, sheet['D23'].value, sheet['D24'].value]
midashi_list = ['ア', 'イ', 'ウ', 'エ', 'オ', 'カ', 'キ', 'ク', 'ケ', 'コ']
midashi_position_list = ['D27', 'D30', 'D34', 'D37', 'D41', 'D44', 'D49', 'D53', 'D57', 'D61']
for i in range(len(midashi_list)):
if point[0] == midashi_list[i]:
if sheet[midashi_position_list[i]].value == sheet['E22'].value:
print(midashi_position_list[i] + ':見出しがポイント1と同一です')
if point[1] == midashi_list[i]:
if sheet[midashi_position_list[i]].value == sheet['E23'].value:
print(midashi_position_list[i] + ':見出しがポイント2と同一です')
if point[2] == midashi_list[i]:
if sheet[midashi_position_list[i]].value == sheet['E24'].value:
print(midashi_position_list[i] + ':見出しがポイント3と同一です')
# ⑦
# 「自社」「弊社」→「同社」に統一
# 取引先「様」トル
# 「です」「ます」→「である」に統一
# 「明治」「大正」「昭和」「平成」→西暦に統一
# 掲載するところだけ選出
text_position_list = ['C21', 'E22', 'E23', 'E24', 'C25']
midashi_num_list = []
naiyo_num_list = []
for j in point:
if j == 'ア':
text_position_list.append('D27')
text_position_list.append('D28')
midashi_num_list.append('D27')
naiyo_num_list.append('D28')
if j == 'イ':
text_position_list.append('D30')
text_position_list.append('D31')
midashi_num_list.append('D30')
naiyo_num_list.append('D31')
if j == 'ウ':
text_position_list.append('D34')
text_position_list.append('D35')
midashi_num_list.append('D34')
naiyo_num_list.append('D35')
if j == 'エ':
text_position_list.append('D37')
text_position_list.append('D38')
midashi_num_list.append('D37')
naiyo_num_list.append('D38')
if j == 'オ':
text_position_list.append('D41')
text_position_list.append('D42')
midashi_num_list.append('D41')
naiyo_num_list.append('D42')
if j == 'カ':
text_position_list.append('D44')
text_position_list.append('D45')
midashi_num_list.append('D44')
naiyo_num_list.append('D45')
if j == 'キ':
text_position_list.append('D49')
text_position_list.append('D50')
midashi_num_list.append('D49')
naiyo_num_list.append('D50')
if j == 'ク':
text_position_list.append('D53')
text_position_list.append('D54')
midashi_num_list.append('D53')
naiyo_num_list.append('D54')
if j == 'ケ':
text_position_list.append('D57')
text_position_list.append('D58')
midashi_num_list.append('D57')
naiyo_num_list.append('D58')
if j == 'コ':
text_position_list.append('D61')
text_position_list.append('D62')
midashi_num_list.append('D61')
naiyo_num_list.append('D62')
ng_word = ['自社', '弊社', '様', 'です', 'ます', '明治', '大正', '昭和', '平成'
, '10', '20', '30', '40', '50', '60', '70', '80', '90', '00'
, '11', '21', '31', '41', '51', '61', '71', '81', '91', '01'
, '12', '22', '32', '42', '52', '62', '72', '82', '92', '02'
, '13', '23', '33', '43', '53', '63', '73', '83', '93', '03'
, '14', '24', '34', '44', '54', '64', '74', '84', '94', '04'
, '15', '25', '35', '45', '55', '65', '75', '85', '95', '05'
, '16', '26', '36', '46', '56', '66', '76', '86', '96', '06'
, '17', '27', '37', '47', '57', '67', '77', '87', '97', '07'
, '18', '28', '38', '48', '58', '68', '78', '88', '98', '08'
, '19', '29', '39', '49', '59', '69', '79', '89', '99', '09']
for text in text_position_list:
for ng in ng_word:
ng_find = str(sheet[text].value).find(ng)
text_val = str(sheet[text].value)
check_flg = True
while check_flg:
ng_find = text_val.find(ng)
if ng_find != -1:
front = 0
if (ng_find > 5):
front = 5
print(text + ':「' + ng + '」あり ' + text_val[ng_find - front: ng_find + 10])
# 元号を西暦に変換
if ng == '明治' or ng == '大正' or ng == '昭和' or ng == '平成':
if text_val[ng_find + 3] == '年':
year_pos = ng_find + 3
gengo_year = text_val[ng_find + 2:year_pos]
seireki_year = year_change(ng, gengo_year)
print(ng + gengo_year + '年→' + seireki_year + '年')
elif text_val[ng_find + 4] == '年':
year_pos = ng_find + 4
gengo_year = text_val[ng_find + 2:year_pos]
seireki_year = year_change(ng, gengo_year)
print(ng + gengo_year + '年→' + seireki_year + '年')
elif text_val[ng_find + 5] == '年':
year_pos = ng_find + 5
gengo_year = text_val[ng_find + 2:year_pos]
seireki_year = year_change(ng, gengo_year)
print(ng + gengo_year + '年→' + seireki_year + '年')
text_val = text_val[ng_find + 2:]
else:
check_flg = False
# 字数チェック
# キャッチフレーズ 30字以上57字以下
if len(sheet['C21'].value) < 30:
print('C21:キャッチフレーズの字数が少なすぎます')
elif len(sheet['C21'].value) > 57:
print('C21:キャッチフレーズの字数が多すぎます')
# 取り組みの要約3項目 30字以上48字以下
if len(sheet['E22'].value) < 30:
print('E22:取組の要約1の字数が少なすぎます')
elif len(sheet['E22'].value) > 48:
print('E22:取組の要約1の字数が多すぎます')
if len(sheet['E23'].value) < 30:
print('E23:取組の要約2の字数が少なすぎます')
elif len(sheet['E23'].value) > 48:
print('E23:取組の要約2の字数が多すぎます')
if len(sheet['E24'].value) < 30:
print('E24:取組の要約3の字数が少なすぎます')
elif len(sheet['E24'].value) > 48:
print('E24:取組の要約3の字数が多すぎます')
# 会社概要 160字以上228字以下
if len(sheet['C25'].value) < 160:
print('C25:会社概要の字数が少なすぎます')
elif len(sheet['C25'].value) > 228:
print('C25:会社概要の字数が多すぎます')
# 見出し 12字以上28字以下
for i in midashi_num_list:
if len(sheet[i].value) < 12:
print(i + ':見出しの字数が少なすぎます')
elif len(sheet[i].value) > 28:
print(i + ':見出しの字数が多すぎます')
# 本文 180字以上228字以下
for i in naiyo_num_list:
if len(sheet[i].value) < 180:
print(i + ':本文の字数が少なすぎます')
elif len(sheet[i].value) > 228:
print(i + ':本文の字数が多すぎます') | [
"atsukiexfeel@yahoo.co.jp"
] | atsukiexfeel@yahoo.co.jp |
e7d10f8320db3c2f560b7875b1bb254593aca879 | 5ffa05429f1278455cd02e759cc64f376813ce20 | /html_form_builder/__openerp__.py | 1e8592471c11867f3ba1a29645d05d25c8cae4e7 | [] | no_license | tonihr/Odoo9 | 217f483993c4a49d5c14ad93ec2594e0a46bef5d | 93e0d3de55714e34229cb5273400a6ebc1f6e3e0 | refs/heads/9.0 | 2021-01-19T04:02:57.407271 | 2017-03-08T05:35:47 | 2017-03-08T05:35:47 | 84,426,868 | 0 | 0 | null | 2017-03-09T10:02:45 | 2017-03-09T10:02:45 | null | UTF-8 | Python | false | false | 681 | py | {
'name': "HTML Form Builder",
'version': "1.8.9",
'author': "Sythil Tech",
'category': "Tools",
'support': "steven@sythiltech.com.au",
'summary': "Manage both internal and external forms",
'description': "Manage both internal and external forms",
'license':'LGPL-3',
'data': [
'views/html_form.xml',
'views/html_form_builder_templates.xml',
'data/html.form.captcha.csv',
'data/html.form.field.type.csv',
'data/html.form.action.type.csv',
'security/ir.model.access.csv',
],
'demo': [],
'images':[
'static/description/1.jpg',
],
'depends': [],
'installable': True,
} | [
"steven@sythiltech.com"
] | steven@sythiltech.com |
2d939764dcd1a8446c88df11df99c240bdf0e89d | 72d17b9ef29724a314650f95bb247180bea5e39a | /q1.py | 29c53490a359b51cd97bdf3a11b3965b91035ce5 | [] | no_license | kanglif2/ke5205-text-mining-project | a5dda9cad13dd3e9671daa644ef492b3e6febcf0 | 6338a6e157ae5b5b915e5c80ffe19618c04efd67 | refs/heads/master | 2020-05-04T23:40:39.120345 | 2016-10-15T12:42:40 | 2016-10-15T12:42:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,107 | py | # -*- coding: utf-8 -*-
import string
#import unicodedata
import pandas as pd
import matplotlib.pyplot as plt
#import numpy as np
import nltk
from nltk import pos_tag, word_tokenize, sent_tokenize
from nltk.corpus import stopwords, wordnet
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import VotingClassifier
msia = pd.read_csv('data/MsiaAccidentCases_clean.csv')
osha = pd.read_csv('data/osha_clean.csv', header=None)
wnl = nltk.WordNetLemmatizer()
stop = set(stopwords.words('english'))
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
#stop |= set(['die', 'kill'])
def lemmatize_df_col(df, col):
res=[]
for index, row in df.iterrows():
if type(row[col]) is float:
print index
return
text = row[col].lower()
sents = sent_tokenize(text)
pos = [pos_tag(word_tokenize(s)) for s in sents] #map
pos = reduce(lambda x, y: x + y, pos) #flatten
pos = filter(lambda (w, t): w.isalpha() and w not in stop, pos)
text_lem = ' '.join([wnl.lemmatize(w, get_wordnet_pos(t)) for (w, t) in pos])
res.append(text_lem)
return res
print 'Distribution of causes for Msia Accident Cases dataset:'
msia_cause_count = msia.groupby('cause').size().sort_values(ascending=False)
msia_cause_count.plot(kind='barh')
plt.gca().invert_yaxis()
plt.show()
print msia_cause_count
print
print 'Training models based on Msia Accident Cases...'
print 'Prediction score based on Title:'
text_lem_list = lemmatize_df_col(msia, 'title_case')
vectorizer = TfidfVectorizer(max_df=0.9)
X = vectorizer.fit_transform(text_lem_list)
y = msia.cause
seed = 32
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed)
dt = DecisionTreeClassifier(random_state=seed).fit(X_train, y_train)
print '\tDecision Tree:\t\t\t%f' % dt.score(X_test, y_test)
knn = KNeighborsClassifier(n_neighbors = 11, weights = 'distance', \
metric = 'cosine', algorithm = 'brute').fit(X_train, y_train)
print '\tDecision Tree:\t\t\t%f' % knn.score(X, y)
mnb = MultinomialNB().fit(X_train, y_train)
print '\tNaive Bayesian:\t\t\t%f' % mnb.score(X_test, y_test)
svm = SVC(C=1000000.0, gamma='auto', kernel='rbf').fit(X_train, y_train)
print '\tSVM:\t\t\t\t%f' % svm.score(X_test, y_test)
lr = LogisticRegression().fit(X_train, y_train)
print '\tLogistic Regression:\t\t%f' % lr.score(X_test, y_test)
vc = VotingClassifier(estimators=[ \
('dt', dt), ('knn', knn), ('mnb', mnb), ('svm', svm), ('lr', lr) \
], voting='hard').fit(X_train, y_train)
print '\tEnsemble (Majority Vote):\t%f' % vc.score(X_test, y_test)
print 'Prediction score based on Summary:'
text_lem_list2 = lemmatize_df_col(msia, 'summary_case')
vectorizer2 = TfidfVectorizer(max_df=0.9)
X2 = vectorizer2.fit_transform(text_lem_list2)
X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y, test_size=0.2, random_state=seed)
vc2 = VotingClassifier(estimators=[ \
('dt', dt), ('knn', knn), ('mnb', mnb), ('svm', svm), ('lr', lr) \
], voting='hard').fit(X_train, y_train)
vc2.fit(X2_train, y2_train)
print '\tEnsemble (Majority Vote):\t%f' % vc2.score(X2_test, y2_test)
print
print 'Using Ensemble Model based on Titles of Msia dataset to predice Causes for osha dataset...'
print
text_lem_list_osha = lemmatize_df_col(osha, 1) #title
#vocab = set(reduce(lambda x, y: x + y, [l.split() for l in text_lem_list]))
#vectorizer_osha = TfidfVectorizer(max_df=0.9, vocabulary=vectorizer.get_feature_names())
X_osha = vectorizer.transform(text_lem_list_osha)
osha_pred = vc.predict(X_osha)
print osha_pred
osha
| [
"yan9za1@gmail.com"
] | yan9za1@gmail.com |
b6cd59becb4fb605ea25d1342073d03073b486b8 | 54d60cf707139ae3eefac7d4136fc4b57a33c6cb | /Json_webScrapping/scrapping_bs4.py | ec1ac2a38f3be8a24a6e8ed8147531488b68e05a | [] | no_license | rakeshgowdan/Python_DataAnalysis | 7814eb13274ca95d5ffc405dff1b9093b9c6d3bf | d911b9f3bde57b615d28eab142dde2720f066d16 | refs/heads/master | 2020-03-31T12:45:59.356181 | 2019-03-15T10:31:51 | 2019-03-15T10:31:51 | 152,228,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | import requests
from bs4 import BeautifulSoup
data=requests.get('https://www.w3schools.com/tags/tag_table.asp')
soup=BeautifulSoup(data.text,'html.parser')
for tr in soup.find_all('tr'):
for td in tr.find_all('td'):
print(td.text)
| [
"rakeshgowda3101@gmail.com"
] | rakeshgowda3101@gmail.com |
cd14d101a34e2fb93abf67a6f5d7818b15d89544 | e3565e1ce607f60745f2a045aae8026661a6b99b | /resources/Onyx-1.0.511/sandbox/malach/filefix.py | b32a126b489cba53a0958df1d590cbbd7e027b16 | [
"Apache-2.0"
] | permissive | eternity668/speechAD | 4c08d953b2ed06b3357b1c39d8709dd088a2471c | f270a1be86372b7044615e4fd82032029e123bc1 | refs/heads/master | 2021-01-12T22:10:33.358500 | 2014-02-03T16:03:28 | 2014-02-03T16:03:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,982 | py | ###########################################################################
#
# File: filefix.py
# Date: Tue 28 Apr 2009 14:51
# Author: Ken Basye
# Description: Some general tools for fixing files
#
# This file is part of Onyx http://onyxtools.sourceforge.net
#
# Copyright 2009 The Johns Hopkins University
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
###########################################################################
from __future__ import with_statement
import os.path
import re
"""
>>> problem_line_re = re.compile("^(.*)<(Trans|Episode|Speaker|Speakers|Turn|Who|Sync)(.*)>(.*)$")
"""
def apply_line_transform_to_dir(transform, dirname, newdirname, glob_str='*'):
"""
Apply the callable *transform* to each line of each file in *dirname* that
matches *glob_str* (default '*'), creating new files with the same basename
in *newdirname*.
"""
import glob
import os
fnames = glob.glob(dirname + os.sep + glob_str)
print("Reading %d files in %s" % (len(fnames), dirname))
for fname in fnames:
dir,base = os.path.split(fname)
newfname = os.path.join(newdirname, base)
with open(fname) as f:
with open(newfname, 'w') as newf:
for lineno,line in enumerate(f):
newf.write(transform(line, lineno))
def fix_all_malach_files(transform, stage):
"""
Process the given transform on all Malach transcription files. *stage*
should be a positive integer; its value will be used to determine both the
source and target directory names.
"""
dirname = "./transcriptions%d" % (stage,)
newdirname = "./transcriptions%d" % (stage + 1,)
os.mkdir(newdirname)
apply_line_transform_to_dir(transform, dirname, newdirname, glob_str='*.trs')
dirname = "./transcriptions%d/additional" % (stage,)
newdirname = "./transcriptions%d/additional" % (stage + 1,)
os.mkdir(newdirname)
apply_line_transform_to_dir(transform, dirname, newdirname, glob_str='*.trs')
def fix_encoding_in_header(line, lineno):
"""
This line transform fixes a problem in the first line of the file where the
encoding attribute had been formatted incorrectly.
"""
_CORRECT = """<?xml version="1.0" encoding="ISO-8859-1"?>\n"""
if lineno == 0 and line.find("encoding") == -1:
return _CORRECT
else:
return line
def fix_version_date(line, lineno):
"""
This line transform fixes a problem in the third line of the file where the
version_date attribute had been misspelt.
"""
if lineno == 2 and line.find("version_data") != -1:
return line.replace("version_data", "version_date")
else:
return line
def fix_common_bad_tags(line, lineno):
"""
This line transform fixes a problem in several files where <> was used to
indicate certain transcription tokens, e.g. '<pause>' Since <> is the XML
tag syntax, this causes XML parsing to fail in many places. This transform
identifies the problem regions and replaces <XXX> with >XXX< which
will then be parsed correctly. This transform is limited to replacing a few
common bad tags just to reduce the remaining problems to a manageable size.
"""
bases = ("noise", "pause", "um", "UH", "breath", "inhale", "uh", "cough", "laugh", "HM", "emotion", "UH-UH", "UM",
"unintelligible", "mouth", "silence", "lead_silence", "hm", "uh_hum", "sniff", "exhale", "UH-UH-UH", "uh-uh",
"cross_talk_begin", "cross_talk_end", "cross_talk_begins", "cross_talk_ends",
"bkgrd_noise", "cross_talk", "long_pause", "UH_HUH", "uh_huh", "UH_HUM", "UH-HUH", "uh-huh", "UH-HUM", "EH",
"laugh-laugh", "noise-noise", "cough-cough", "ap-", "uf-", "spk#1", "spk#2")
pairs = [("<%s>" % (token,), "<%s>" % (token,)) for token in bases]
for problem, fix in pairs:
if line.find(problem) != -1:
line = line.replace(problem, fix)
return line
def fix_bad_tags1(line, lineno):
"""
This line transform fixes a problem in several files where <> was used to
indicate certain transcription tokens, e.g. '<pause>' Since <> is the XML
tag syntax, this causes XML parsing to fail in many places. This transform
identifies the problem regions and replaces <XXX> with >XXX< which
will then be parsed correctly. This transform is limited to replacing
tokens in <>s with only lower-case letters, and underscores, and will
only replace one such instance in a line. This covers many error cases, and
later transforms can do more work on fewer instances.
"""
import re
problem_line_re = re.compile("^(.*)<([a-z_]*)>(.*)$")
match = problem_line_re.match(line)
if match is None:
return line
else:
groups = match.groups()
assert len(groups) == 3
newline = groups[0] + '<' + groups[1] + '>' + groups[2] + '\n'
return newline
def fix_bad_tags2(line, lineno):
"""
This line transform fixes a problem in several files where <> was used to
indicate certain transcription tokens, e.g. '<pause>' Since <> is the XML
tag syntax, this causes XML parsing to fail in many places. This transform
identifies the problem regions and replaces <XXX> with >XXX< which
will then be parsed correctly. Limited to any <> with an a-z character
immediately after the <.
"""
import re
problem_line_re = re.compile("^(.*)<([a-z].*)>(.*)$")
match = problem_line_re.match(line)
if match is None:
return line
else:
groups = match.groups()
assert len(groups) == 3
newline = groups[0] + '<' + groups[1] + '>' + groups[2] + '\n'
return newline
def fix_bad_tags3(line, lineno):
"""
This line transform fixes a problem in several files where <> was used to
indicate certain transcription tokens, e.g. '<pause>' Since <> is the XML
tag syntax, this causes XML parsing to fail in many places. This transform
identifies the problem regions and replaces <XXX> with >XXX< which
will then be parsed correctly. This transform deals with tokens in <>s
which consist only of capital letters, underscores, and hyphens.
"""
import re
problem_line_re = re.compile("^(.*)<([A-Z_/-]*)>(.*)$")
match = problem_line_re.match(line)
if match is None:
return line
else:
groups = match.groups()
assert len(groups) == 3
newline = groups[0] + '<' + groups[1] + '>' + groups[2] + '\n'
return newline
def fix_bad_tags4(line, lineno):
"""
This line transform fixes remaining bad tags, which is anything in <>s that
doesn't start with a tag we know about. It prints the line it is fixing,
and is meant to be used when almost everything has been fixed.
"""
import re
ok_line_re = re.compile(r"^(.*)</?(Trans|Episode|Speaker|Speakers|Turn|Who|Sync|Section|\?xml|!DOCTYPE)(.*)>(.*)$")
ok_match = ok_line_re.match(line)
problem_line_re = re.compile("^(.*)<(.*)>(.*)$")
problem_match = problem_line_re.match(line)
if ok_match is not None:
return line
if problem_match is None:
return line
else:
groups = problem_match.groups()
assert len(groups) == 3
newline = groups[0] + '<' + groups[1] + '>' + groups[2] + '\n'
print line
return newline
def check_for_bad_tags0(line, lineno):
"""
This line transform just checks for bad tags, which is anything in <>s that
doesn't start with a tag we know about. It prints any line which has more than one < in it.
"""
import re
ok_line_re = re.compile(r"^(.*)</?(Trans|Episode|Speaker|Speakers|Turn|Who|Sync|Section|\?xml|!DOCTYPE)(.*)>(.*)$")
ok_match = ok_line_re.match(line)
problem_line_re = re.compile("^(.*)<(.*)>(.*)$")
problem_match = problem_line_re.match(line)
if ok_match is not None:
return line
if problem_match is None:
return line
else:
groups = problem_match.groups()
if line.count('<') > 1:
print line
return line
def check_for_bad_tags(line, lineno):
"""
This line transform just checks for bad tags, which is anything in <>s that
doesn't start with a tag we know about.
"""
import re
ok_line_re = re.compile(r"^(.*)</?(Trans|Episode|Speaker|Speakers|Turn|Who|Sync|Section|\?xml|!DOCTYPE)(.*)>(.*)$")
ok_match = ok_line_re.match(line)
problem_line_re = re.compile("^(.*)<(.*)>(.*)$")
problem_match = problem_line_re.match(line)
if ok_match is not None:
return line
if problem_match is None:
return line
else:
groups = problem_match.groups()
print line
return line
if __name__ == '__main__':
fix_all_malach_files(fix_encoding_in_header, 1)
fix_all_malach_files(fix_version_date, 2)
fix_all_malach_files(fix_common_bad_tags, 3)
# We do two rounds of the next fix since there are several begin/end pairs
# and each round will only clean up one tag
fix_all_malach_files(fix_bad_tags1, 4)
fix_all_malach_files(fix_bad_tags1, 5)
fix_all_malach_files(fix_bad_tags2, 6)
fix_all_malach_files(fix_bad_tags3, 7)
fix_all_malach_files(check_for_bad_tags0, 8)
fix_all_malach_files(fix_bad_tags4, 9)
fix_all_malach_files(check_for_bad_tags, 10)
| [
"nassos@n12mavra.cs.ntua.gr"
] | nassos@n12mavra.cs.ntua.gr |
74cdf0828e11c756fec67a5b80bde0e5b418453d | f0063ed82e9f6c71485796826b1cb8878b2d50f5 | /lab2/lab2c.py | b08170a118b6cc91a7402b32e9eff08b4f16c758 | [] | no_license | pyao7-code/ops435-Python | d6ca6c9651c9426d010e8f15450d43920e3bd706 | 488b382d28e2530baf8da5a1f25a4bd88f471dd2 | refs/heads/master | 2021-10-26T02:38:31.576764 | 2019-04-09T22:00:56 | 2019-04-09T22:00:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | #!/usr/bin/env python3
import sys
name = sys.argv[1]
age = sys.argv[2]
print('Hi ' + name + ', you are ' + str(age) + ' years old.') | [
"vlha@myseneca.ca"
] | vlha@myseneca.ca |
f562bc0096ec80473c16957f03b4c070b782bab7 | 99280ee4672420b43bdcedb9c6f5c93a5fe182f0 | /API/backend_3/todo_project/todo_project/settings.py | 297321c3ae1b7a167c333d4af61b2cc4b333d714 | [] | no_license | kamral/test_1 | f8674a075d51fc94630df7d6a5cf55b11d086db0 | a10ce3337463d1cb9b56876d0566798740c0b42f | refs/heads/master | 2023-08-06T23:50:45.519935 | 2020-06-07T09:27:43 | 2020-06-07T09:27:43 | 265,688,683 | 0 | 0 | null | 2021-09-22T19:23:15 | 2020-05-20T21:21:21 | Python | UTF-8 | Python | false | false | 3,293 | py | """
Django settings for todo_project project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=_jwj$8oi08uu8m)5170xe#@o_aqjjpyhy(5d-fq=^k-^!f9ui'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#3d party
'rest_framework',
#local
'todos.apps.TodosConfig',
]
REST_FRAMEWORK={
'DEFAULT_PERMISSION_CLASSES':[
'rest_framework.permissions.AllowAny',
]
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"kamral010101@gmail.com"
] | kamral010101@gmail.com |
878bd389051dba8d38357329c923e6f9ea202a2c | 8b197dcaa22b867c101dd4d13267949f2962e94c | /json_test.py | a99883115ed69dd65998f0b23faa31653d4f17a9 | [] | no_license | tomasmenito/PythonTests | 45670a5439906fb3e6ba2b350d1a474d12c0d811 | c21e38f2c8722c17cf8f8ec904df3cae82518e7a | refs/heads/master | 2022-11-07T06:17:05.922191 | 2020-06-16T17:45:35 | 2020-06-16T17:45:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | import json,requests,sys
url ='https://api.github.com//users//mralexgray//repos'
response = requests.get(url)
response.raise_for_status()
data = json.loads(response.text)
w = data['list']
print(w)
| [
"tomaslm@hotmail.com"
] | tomaslm@hotmail.com |
9abfdc5a2c0729518fddf65bbefeae6317b8b9a0 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /eXe/rev2283-2366/right-branch-2366/twisted/internet/tksupport.py | 19dcf48b56a21fe81e5d2e00d290099a36bdac51 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | """This module integrates Tkinter with twisted.internet's mainloop.
API Stability: semi-stable
Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
To use, do::
| tksupport.install(rootWidget)
and then run your reactor as usual - do *not* call Tk's mainloop(),
use Twisted's regular mechanism for running the event loop.
Likewise, to stop your program you will need to stop Twisted's
event loop. For example, if you want closing your root widget to
stop Twisted::
| root.protocol('WM_DELETE_WINDOW', reactor.stop)
"""
import Tkinter, tkSimpleDialog, tkMessageBox
from twisted.python import log
from twisted.internet import task
_task = None
def install(widget, ms=10, reactor=None):
"""Install a Tkinter.Tk() object into the reactor."""
installTkFunctions()
global _task
_task = task.LoopingCall(widget.update)
_task.start(ms / 1000.0, False)
def uninstall():
"""Remove the root Tk widget from the reactor.
Call this before destroy()ing the root widget.
"""
global _task
_task.stop()
_task = None
def installTkFunctions():
import twisted.python.util
twisted.python.util.getPassword = getPassword
def getPassword(prompt = '', confirm = 0):
while 1:
try1 = tkSimpleDialog.askstring('Password Dialog', prompt, show='*')
if not confirm:
return try1
try2 = tkSimpleDialog.askstring('Password Dialog', 'Confirm Password', show='*')
if try1 == try2:
return try1
else:
tkMessageBox.showerror('Password Mismatch', 'Passwords did not match, starting over')
__all__ = ["install", "uninstall"]
| [
"joliebig@fim.uni-passau.de"
] | joliebig@fim.uni-passau.de |
42c170cdc7ec40157a5fe37fc17a43224af43c96 | ed9fe676f645263c7e05242fd2c18dd06286296b | /bfs_and_dfs.py | 1133ced2837d1e9154e64517aaa90e694923f95d | [] | no_license | nadavleh/Undirected-unweighted-graph-search | 1938e6a52d7d10a2ec12d203892f68455e779655 | 0c99ea31b653503fa3a88367e3192539fce3a94e | refs/heads/master | 2020-12-06T05:35:34.150689 | 2020-01-07T16:03:35 | 2020-01-07T16:03:35 | 232,361,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,237 | py | """
In this script I implement the Breadth First Seach Algorithm or BFS and
Depth First Seach Algorithm or DFS, to determine a route or "traversal"
between nodes in a graph (if such traversal even exists)
In order to do so, i first define a graph on which i implement this seach.
This graph is a grid of size-by-size, where each square is a node, and is
connected to its 8 surrounding squares (unless its in the edges of the grid than
we have some special cases)
We stat with BFS: this search aswell as the DFS alg. can be implemened recursivly, however
it is not common to do so. The way it is ofen implemented is by using a Queue
(q) in which we stack the nodes we want to visit next. If for each node we stack
its adjacent nodes (if already in the list) on top of the previous stack, we will
implement BFS by definition. In DFS we mearly satck the nodes at the beginning of the list
rather than its end. This explenation can be seen in [1] and [2,3]
MIT 6.034 Artificial Intelligence, Fall 2010 lecture 4 min 22:
[1] https://www.youtube.com/watch?v=j1H3jAAGlEA&list=PLUl4u3cNGP63gFHB6xb-kVBiQHYe_4hSi&index=5&t=1974s
Joe James's YouTube and GitHub:
[2] https://www.youtube.com/watch?v=-uR7BSfNJko
[3] https://github.com/joeyajames/Python/blob/master/bfs.py
"""
# we will use the graph class which uses te vertex class, thus we import both
from Adj_List_graph import vertex, graph
import numpy as np
# We define a class that manifests a grid as a graph. to initialize an object
# of this class we only need to input its size (e.e. 10 will result in a 10 by 10
# grid with 100 cells as nodes)
class grid_graph:
def __init__(self, size):
# to see the graph an its nodes in a grid, we do the following:
self.grid = np.array([range(1,size**2+1)])
self.grid = self.grid.reshape((size,size))
self.size=size
print(self.grid)
self.graph=graph()
# to define the grid as an object of class "graph", we need to initialize
# each of the size^2 nodes as a "vertex" object and add it to the graph.
# Then afer we determine each nodes' edges and add it to the list of egdes.
# I brute forced it, so the list will contain pairs like (node1, node2)
# and (node2, node1) whis is the same in an undirected graph as we implement
# (the add_edge method in class "graph" adds a connection/edge between
# both nodes regardless of their order in the tuple).
# In order to see the logic of how to determine which nodes are connected,
# simply write the numbers 1 to size^2 in on a grid, starting from the
# top right and filling in each row.
# If i dont want to allow connections between diagonal nodes just delete the
# tuples that are of the form (i,i+-size+-1)
edges=list()
for i in range(1, size**2+1):
self.graph.add_node(vertex(i))
if i==1:
edges+=[(1,2),(1,size+1),(1,size+2)]
elif i==size:
edges+=[(size,size-1),(size,2*size-1),(size,2*size)]
elif i==(size-1)*size+1:
edges+=[(i,i+1),(i,i-size+1),(i,i-size)]
elif i==size**2:
edges+=[(i,i-1),(i,i-size-1),(i,i-size)]
elif i%size==1:
edges+=[(i,i+1),(i,i-size+1),(i,i-size),(i,i+size+1),(i,i+size)]
elif i%size==0:
edges+=[(i,i-1),(i,i-size-1),(i,i-size),(i,i+size-1),(i,i+size)]
elif i in list(range(2,size)):
edges+=[(i,i+1),(i,i-1),(i,i+size),(i,i+size+1),(i,i+size-1)]
elif i in list(range(size+1,size**2)):
edges+=[(i,i+1),(i,i-1),(i,i-size),(i,i-size-1),(i,i-size+1)]
self.graph.add_edge(edges)
def bfs(graph,start=22,goal=88):
# to implement bfs we use a Queue "q" as a list which contains nodes's
# names, which intend to visit untill we find the goal node.
q=[start];
# we dont want to insert to q, a node which it already has (this will just be
# redundant as we may even go backwards and end looping endlessly). So, we keep
# track of the nodes we have already inserted to the Queue, we have a boolian
# array that is True if the node has been already inserted (and False otherwise)
# we implement this again using a dictionary (which is good incase the nodes
# names are not necessarily numbers), but this can also be implemented using a simple
# 1-by-size^2 array like: [False]*size**2
# inorder to retrive the path once we find the goal node, we need to keep track
# of which node put another node in the queue. this is manifested as a linked list
# called "route" which we implements using a dictionary. if we go back from
# the goal node, and keep track of who queued who, we will get the path.
# this is done a the end of this function.
visited={}
route={};
for key in graph.AdjList:
visited[key]=False
route[key]=[]
visited[start]=True
# now we can acrtually start the search. "found" is a boolian that indicates
# succes in dinding goal and "i" is just an itteration counter.
found=False
i=0
# Run as long as the Queue is not empty (so we have visited each connected node
# in the graph). The q initially contains only the start node.
while q and not found:
# Get the list of all adjacent nodes of the first node in the q. enq
# stands for EnQueue
enq=graph.AdjList[q[0]]
# now, for each item on this list of adjacent nodes, check if its already
# EnQueued, if not add it to the q and mark it as EnQueued in the visited dictionary.
for j in enq:
if visited[j]==False:
q+=[j]
visited[j]=True
# mark the j'th node as enqued by the q[0] node.
route[j]=q[0]
# if we managed to reach the goal node, raise the success flag.
if j==goal:
found=True
# once all adjacent members of q[0] were enqued, we move to the next
# member of the q. This can be done by :
# while not found and i<=graph.size:
# enq=graph.AdjList[q[i]]
# for j in enq:
# if visited[j]==False:
# q+=[j]
# visited[j]=True
# route[j]=q[i]
# if j==goal:
# found=True
# i+=1
# or more ellegantly just by deleting the first ellement of q.
q.pop(0)
i+=1
# we determine the traversal using the linked-list "route")
traversal=[]
connection=False
if found:
connection=True
done=False
i=goal
traversal=[goal]
while not done and i!=start:
traversal+=[route[i]]
i=route[i]
traversal=traversal[::-1]
print(traversal,'itterations=',i)
return connection,traversal
"""
Now we move to DFS:
This is very simmylar to BFS however here, we insert the djacent nodes in each itteration,
at the beginning of the queue. this is explained reasonably good by [1]
MIT 6.034 Artificial Intelligence, Fall 2010 lecture 4 min 22:
[1] https://www.youtube.com/watch?v=j1H3jAAGlEA&list=PLUl4u3cNGP63gFHB6xb-kVBiQHYe_4hSi&index=5&t=1974s
"""
def dfs(graph,start=22,goal=88):
q=[start];
visited={}
route={};
for key in graph.AdjList:
visited[key]=False
route[key]=[]
visited[start]=True
found=False
i=0
############ The only part that differs from BFS ############
# now, instead of enqueing at the end of the q, we enqueue in the beginning.
while not found: #and i<=graph.size**2:
enq=graph.AdjList[q[i]]
k=0
for j in enq:
if j not in q[0:i+1+k]:
q=q[0:i+1+k]+[j]+q[i+1+k:-1]
route[j]=q[i]
k+=1
if j==goal:
found=True
i+=1
##############################################################
traversal=[]
connection=False
if found:
connection=True
done=False
i=goal
traversal=[goal]
while not done and i!=start:
traversal+=[route[i]]
i=route[i]
traversal=traversal[::-1]
print(traversal,'itterations=',i)
return connection,traversal
# here i tried implementing dfs more ellegantly, like in bfs, however
# the results are exactly he same as BFS, so something is clearly wrong
def dfs2(graph,start=22,goal=88):
q=[start];
visited={}
route={};
for key in graph.AdjList:
visited[key]=False
route[key]=[]
# visited[start]=True
found=False
i=0
while q and not found: #and i<=graph.size**2:
enq=graph.AdjList[q[0]]
if visited[q[0]]:
continue
else:
visited[q[0]]=True
curr_node=q[0]
q.pop(0)
for j in enq:
if not visited[j]:
# visited[j]=True
q=[j]+q # q.insert(0,j) is the same
route[j]=curr_node
if j==goal:
found=True
i+=1
traversal=[]
connection=False
if found:
connection=True
done=False
i=goal
traversal=[goal]
while not done and i!=start:
traversal+=[route[i]]
i=route[i]
traversal=traversal[::-1]
print(traversal,'itterations=',i)
return connection,traversal
def main():
grid=grid_graph(10)
print('DFS route and itterations')
dfs(grid.graph,22,88)
print('DFS2 route and itterations')
dfs2(grid.graph,22,88)
print('BFS route and itterations')
bfs(grid.graph,22,88)
if __name__ == "__main__":
# execute only if run as a script
main()
| [
"noreply@github.com"
] | noreply@github.com |
92e592f69aa42552367251f1e4374870fcf374cf | b77c767fe684e1263bf5a9b40e679fed993c56d5 | /manage.py | 8f2f8db8e50c8d4b03c0e19034bb507f6b7c370e | [] | no_license | OMCloud/SecurityAuditPlatform | 53990e4580ad14d8dc1706181db507ab7ab8155e | c16fb6f28de8c13e78367d67003ebe878bf842e4 | refs/heads/master | 2020-06-07T02:40:21.232755 | 2019-06-20T10:53:08 | 2019-06-20T10:53:08 | 192,902,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SecurityAuditPlatform.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"omcloudw@gmail.com"
] | omcloudw@gmail.com |
7129342bc82be34d2f6921de2f6dfee14ef9e637 | d719e636edefb96d43383245c9c36dd701605092 | /setup.py | dd7440e7ec0034f1f44853ee11b55e11bbaa250a | [
"MIT"
] | permissive | Spider8801/conllu | a5c6c6b128f4ee1989449f8dbc8474763d61ea16 | d3f72686961376ac63e7b0e6ae47776bc6e50c55 | refs/heads/master | 2022-07-29T13:07:39.171546 | 2020-06-07T14:53:48 | 2020-06-07T14:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | # -*- coding: utf-8 -*-
import os
from setuptools import setup
VERSION = '3.0'
setup(
name='conllu',
packages=["conllu"],
version=VERSION,
description='CoNLL-U Parser parses a CoNLL-U formatted string into a nested python dictionary',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.md')).read(),
long_description_content_type="text/markdown",
author=u'Emil Stenström',
author_email='em@kth.se',
url='https://github.com/EmilStenstrom/conllu/',
install_requires=[],
keywords=['conllu', 'conll', 'conll-u', 'parser', 'nlp'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
],
)
| [
"em@kth.se"
] | em@kth.se |
9f3e522f5eaf4bccca6718e97418876a8b0a517a | 3b3e6c246bb61681fe083fff76f7867fdc713d24 | /fedot/core/operations/evaluation/regression.py | 245bfad74323c488eca5c7b063941182fb75732b | [
"BSD-3-Clause"
] | permissive | houmanb/FEDOT | 2f57a60f9c78e879cf3a97ce9c4bfa53c5cf1a0e | b7123d586616fbd1cb067ea61e1b688829b11685 | refs/heads/master | 2023-06-17T02:47:58.190501 | 2021-06-30T10:34:59 | 2021-06-30T10:34:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,286 | py | import warnings
from typing import Optional
from fedot.core.data.data import InputData
from fedot.core.operations.evaluation.evaluation_interfaces import EvaluationStrategy, SkLearnEvaluationStrategy
from fedot.core.operations.evaluation.operation_implementations.data_operations.decompose \
import DecomposerRegImplementation
from fedot.core.operations.evaluation.operation_implementations. \
data_operations.sklearn_filters import LinearRegRANSACImplementation, NonLinearRegRANSACImplementation
from fedot.core.operations.evaluation.operation_implementations. \
data_operations.sklearn_selectors import LinearRegFSImplementation, NonLinearRegFSImplementation
from fedot.core.operations.evaluation.operation_implementations.models.knn import CustomKnnRegImplementation
warnings.filterwarnings("ignore", category=UserWarning)
class SkLearnRegressionStrategy(SkLearnEvaluationStrategy):
def predict(self, trained_operation, predict_data: InputData,
is_fit_chain_stage: bool):
"""
Predict method for regression task
:param trained_operation: model object
:param predict_data: data used for prediction
:param is_fit_chain_stage: is this fit or predict stage for chain
:return:
"""
prediction = trained_operation.predict(predict_data.features)
# Convert prediction to output (if it is required)
converted = self._convert_to_output(prediction, predict_data)
return converted
class CustomRegressionPreprocessingStrategy(EvaluationStrategy):
""" Strategy for applying custom algorithms from FEDOT to preprocess data
for regression task
"""
__operations_by_types = {
'ransac_lin_reg': LinearRegRANSACImplementation,
'ransac_non_lin_reg': NonLinearRegRANSACImplementation,
'rfe_lin_reg': LinearRegFSImplementation,
'rfe_non_lin_reg': NonLinearRegFSImplementation,
'decompose': DecomposerRegImplementation
}
def __init__(self, operation_type: str, params: Optional[dict] = None):
self.operation_impl = self._convert_to_operation(operation_type)
super().__init__(operation_type, params)
def fit(self, train_data: InputData):
"""
This method is used for operation training with the data provided
:param InputData train_data: data used for operation training
:return: trained data operation
"""
warnings.filterwarnings("ignore", category=RuntimeWarning)
if self.params_for_fit:
operation_implementation = self.operation_impl(**self.params_for_fit)
else:
operation_implementation = self.operation_impl()
operation_implementation.fit(train_data)
return operation_implementation
def predict(self, trained_operation, predict_data: InputData,
is_fit_chain_stage: bool):
"""
Transform method for preprocessing
:param trained_operation: model object
:param predict_data: data used for prediction
:param is_fit_chain_stage: is this fit or predict stage for chain
:return:
"""
prediction = trained_operation.transform(predict_data, is_fit_chain_stage)
# Convert prediction to output (if it is required)
converted = self._convert_to_output(prediction, predict_data)
return converted
def _convert_to_operation(self, operation_type: str):
if operation_type in self.__operations_by_types.keys():
return self.__operations_by_types[operation_type]
else:
raise ValueError(f'Impossible to obtain Custom Regression Preprocessing Strategy for {operation_type}')
class CustomRegressionStrategy(EvaluationStrategy):
"""
Strategy for applying custom regression models from FEDOT make predictions
"""
__operations_by_types = {
'knnreg': CustomKnnRegImplementation
}
def __init__(self, operation_type: str, params: Optional[dict] = None):
self.operation_impl = self._convert_to_operation(operation_type)
super().__init__(operation_type, params)
def fit(self, train_data: InputData):
""" This method is used for operation training """
warnings.filterwarnings("ignore", category=RuntimeWarning)
if self.params_for_fit:
operation_implementation = self.operation_impl(**self.params_for_fit)
else:
operation_implementation = self.operation_impl()
operation_implementation.fit(train_data)
return operation_implementation
def predict(self, trained_operation, predict_data: InputData,
is_fit_chain_stage: bool):
""" Predict method for regression models """
prediction = trained_operation.predict(predict_data, is_fit_chain_stage)
# Convert prediction to output (if it is required)
converted = self._convert_to_output(prediction, predict_data)
return converted
def _convert_to_operation(self, operation_type: str):
if operation_type in self.__operations_by_types.keys():
return self.__operations_by_types[operation_type]
else:
raise ValueError(f'Impossible to obtain Custom Regression Strategy for {operation_type}')
| [
"noreply@github.com"
] | noreply@github.com |
533661d3c3dc30a457cf43ee40b712444cab32f4 | c5033ec18d537b26db80fd2fa6a6b61d30bcd313 | /stocket/models.py | 4ee0d8ba3551e4d697ae817ba7297eafd35d5a19 | [] | no_license | dannyphan2910/stocket | 2b8068bad3c74a5ac56f76a22b07e55463a9fb94 | ab4eec6e8a3b73da6752924bb409e2307741d8ed | refs/heads/main | 2023-06-16T15:25:29.307687 | 2021-07-10T04:27:51 | 2021-07-10T04:27:51 | 350,432,403 | 0 | 0 | null | 2021-06-17T02:29:05 | 2021-03-22T17:32:15 | Python | UTF-8 | Python | false | false | 2,378 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
TRANSACTION_TYPES = [
(0, 'BUY'),
(1, 'SELL')
]
class Account(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
balance = models.DecimalField(max_digits=12, decimal_places=2, default=0) # max = 9 999 999 999 . 99
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.user.first_name} {self.user.last_name} (balance: {self.balance})"
class Portfolio(models.Model):
account = models.ForeignKey(Account, on_delete=models.CASCADE)
title = models.CharField(max_length=50)
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"user #{self.account_id}: {self.title}"
class Meta:
unique_together = ['account_id', 'title']
class Transaction(models.Model):
portfolio = models.ForeignKey(Portfolio, on_delete=models.CASCADE)
transaction_type = models.IntegerField(choices=TRANSACTION_TYPES)
ticker_symbol = models.CharField(max_length=10)
amount = models.DecimalField(max_digits=11, decimal_places=5) # max = 999 999 . 99999
order_price = models.DecimalField(max_digits=8, decimal_places=2) # max = 999 999 . 99
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"portfolio #{self.portfolio_id}: {'BUY' if self.transaction_type == 0 else 'SELL'} {self.amount} " \
f"of {self.ticker_symbol} at {self.order_price} per share"
@property
def total_change(self):
price = round(self.amount * self.order_price, 2)
return price if self.transaction_type == 1 else -price # sell -> +amount*price; buy -> -amount*price
class Meta:
order_with_respect_to = 'portfolio'
class Snapshot(models.Model):
portfolio = models.ForeignKey(Portfolio, on_delete=models.CASCADE)
market_value = models.DecimalField(max_digits=8, decimal_places=2)
time_record = models.DateTimeField()
def __str__(self):
return f"{self.portfolio_id} - valued {self.market_value} at {self.time_record}"
class Meta:
unique_together = ['portfolio', 'time_record']
order_with_respect_to = 'portfolio'
| [
"phanhaidang29102000@gmail.com"
] | phanhaidang29102000@gmail.com |
b247c4def0f45dc6866abfd6944a0a96789d5be0 | 162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d | /examples/plot_kernel_approximation.py | 735ed9238223d5243a53a245a95a7e1b780db417 | [] | no_license | testsleeekGithub/trex | 2af21fa95f9372f153dbe91941a93937480f4e2f | 9d27a9b44d814ede3996a37365d63814214260ae | refs/heads/master | 2020-08-01T11:47:43.926750 | 2019-11-06T06:47:19 | 2019-11-06T06:47:19 | 210,987,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,687 | py | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: mrex.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`mrex.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
"""
###########################################################################
# Python package and dataset imports, load dataset
# ---------------------------------------------------
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
print(__doc__)
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from mrex import datasets, svm, pipeline
from mrex.kernel_approximation import (RBFSampler,
Nystroem)
from mrex.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
##################################################################
# Timing and accuracy plots
# --------------------------------------------------
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = (data[:n_samples // 2],
digits.target[:n_samples // 2])
# Now predict the value of the digit on the second half:
data_test, targets_test = (data[n_samples // 2:],
digits.target[n_samples // 2:])
# data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(16, 4))
accuracy = plt.subplot(121)
# second y axis for timings
timescale = plt.subplot(122)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
plt.tight_layout()
plt.show()
############################################################################
# Decision Surfaces of RBF Kernel SVM and Linear SVM
# --------------------------------------------------------
# The second plot visualized the decision surfaces of the RBF kernel SVM and
# the linear SVM with approximate kernel maps.
# The plot shows decision surfaces of the classifiers projected onto
# the first two principal components of the data. This visualization should
# be taken with a grain of salt since it is just an interesting slice through
# the decision surface in 64 dimensions. In particular note that
# a datapoint (represented as a dot) does not necessarily be classified
# into the region it is lying in, since it will not lie on the plane
# that the first two principal components span.
# The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
# in :ref:`kernel_approximation`.
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Generate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.figure(figsize=(18, 7.5))
plt.rcParams.update({'font.size': 14})
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired,
edgecolors=(0, 0, 0))
plt.title(titles[i])
plt.tight_layout()
plt.show()
| [
"shkolanovaya@gmail.com"
] | shkolanovaya@gmail.com |
8c895338f7d0efd258a225c32321d524a8813768 | 9cddc6000431b88e78e8f953855db135b62dd490 | /question_generation/framework/simulation.py | 7a0c31cd598a22f2a1f0e9cdd036fd387717eb28 | [
"MIT"
] | permissive | hucvl/craft | 5aae1c3e3e6eac23e0c2c9ef4c18fc1a51d01d61 | 85c50b26eb8941781dc4bb93bce61201aff4643d | refs/heads/main | 2023-05-15T00:57:37.502562 | 2021-06-10T13:29:34 | 2021-06-10T13:29:34 | 372,904,182 | 12 | 0 | MIT | 2021-06-10T13:29:35 | 2021-06-01T17:00:44 | C | UTF-8 | Python | false | false | 14,309 | py | import copy
import json
import os
import subprocess
import sys
from pathlib import Path
from loguru import logger
from framework.utils import FileIO
from svqa.causal_graph import CausalGraph
import svqa.generate_questions as QuestionGeneratorScript
class SimulationRunner(object):
def __init__(self, exec_path: str, working_directory: str = None):
self.exec_path = exec_path
self.working_directory = working_directory if working_directory is not None \
else Path(exec_path).parents[4].joinpath("Testbed").absolute().as_posix()
def run_simulation(self, controller_json_path: str, debug_output_path=None):
subprocess.call(f"{self.exec_path} {controller_json_path}",
shell=True,
universal_newlines=True,
cwd=self.working_directory,
stdout=open(os.devnull, 'wb') if debug_output_path is None else open(debug_output_path, "w"))
def run_variations(self, controller_json_path: str, variations_output_path: str, debug_output_path=None):
variation_runner = VariationRunner(self)
variation_runner.run_variations(controller_json_path, variations_output_path, debug_output_path)
class SimulationInstance:
def __init__(self, instance_id: int,
controller_json_path: str,
variations_output_path: str,
questions_file_path: str,
runner: SimulationRunner):
self.__runner = runner
self.__controller_json_path = controller_json_path
self.__variations_output_path = variations_output_path
self.__questions_file_path = questions_file_path
self.instance_id = instance_id
def run_simulation(self, debug_output_path=None):
self.__runner.run_simulation(self.__controller_json_path, debug_output_path)
def run_variations(self, debug_output_path=None):
self.__runner.run_variations(self.__controller_json_path, self.__variations_output_path, debug_output_path)
def generate_questions(self,
simulation_config,
output_file_path=None,
instances_per_template=1,
metadata_file_path: str = '../svqa/metadata.json',
synonyms_file_path: str = '../svqa/synonyms.json',
templates_dir: str = '../svqa/SVQA_1.0_templates'):
question_generator = QuestionGenerator(self.__variations_output_path,
self.__questions_file_path if output_file_path is None else output_file_path,
simulation_config,
metadata_file_path=metadata_file_path,
synonyms_file_path=synonyms_file_path,
templates_dir=templates_dir,
instances_per_template=instances_per_template)
question_generator.execute()
class VariationRunner(object):
def __init__(self, runner: SimulationRunner):
self.__runner = runner
def __new_output_json(self, output: json, i: int):
ret = copy.deepcopy(output)
del ret["scene_states"][0]["scene"]["objects"][i]
del ret["causal_graph"]
for i in range(len(ret["scene_states"])):
if ret["scene_states"][i]["step"] != 0:
del ret["scene_states"][i]
return ret
def __create_variations(self, path: str, controller: json, output: json) -> list:
start_scene_state = output["scene_states"][0] # best to check step count
objects = start_scene_state["scene"]["objects"]
variations = [(objects[i]["uniqueID"], self.__new_output_json(output, i)) for i in range(len(objects)) if
objects[i]["bodyType"] != 0] # 0 for static objects
controller_paths = []
for i in range(len(variations)):
output = variations[i]
name = f"{os.path.splitext(path)[0]}_var_{output[0]}"
with open(f"{name}.json", "w") as f:
json.dump(output[1], f)
controller_paths.append((output[0], self.__create_controller_variations(controller, name)))
return controller_paths
def __create_controller_variations(self, controller: json, name: str) -> str:
controller = copy.deepcopy(controller)
controller["outputVideoPath"] = f"{name}_out.mpg"
controller["outputJSONPath"] = f"{name}_out.json"
controller["inputScenePath"] = f"{name}.json"
name = f"{name}_controller.json"
with open(name, "w") as f:
json.dump(controller, f)
return name
def __get_variation_output(self, controller: str):
with open(controller) as controller_json_file:
controller_data = json.load(controller_json_file)
with open(controller_data["outputJSONPath"]) as output_json_file:
output_data = json.load(output_json_file)
return output_data
def __is_equal_without_step(self, event1, event2):
return set(event1["objects"]) == set(event2["objects"]) and event1["type"] == event2["type"]
def __get_different_event_list(self, causal_graph_src: CausalGraph, causal_graph_compare: CausalGraph,
object_props: dict,
discarded_object_id: int):
src_events = causal_graph_src.events
compare_events = causal_graph_compare.events
discarded_shapes = ['platform']
objects_ids_discarded = [object['uniqueID'] for object in object_props if
object['shape'] in discarded_shapes]
res = []
for src_event in src_events:
objects_of_event = src_event['objects']
# discard events including object to be discarded
if discarded_object_id in objects_of_event:
continue
found_discarded_shape = False
for object_of_event in objects_of_event:
if object_of_event in objects_ids_discarded:
found_discarded_shape = True
break
if found_discarded_shape:
continue
found_equal = False
for compare_event in compare_events:
if self.__is_equal_without_step(src_event, compare_event):
found_equal = True
break
if not found_equal:
res.append(src_event["id"])
return res
def __write_enables_prevents(self, output_dict: dict):
original_causal_graph = CausalGraph(output_dict["original_video_output"]["causal_graph"])
variation_outputs = output_dict["variations_outputs"]
output_dict_enables = []
output_dict_prevents = []
for removed_object_key in variation_outputs:
removed_object_id = int(removed_object_key)
variation_causal_graph = CausalGraph(variation_outputs[removed_object_key]["causal_graph"])
enables = self.__get_different_event_list(original_causal_graph, variation_causal_graph,
output_dict['original_video_output']['scene_states'][0]['scene'][
'objects'],
removed_object_id)
prevents = self.__get_different_event_list(variation_causal_graph, original_causal_graph,
output_dict['original_video_output']['scene_states'][0]['scene'][
'objects'],
removed_object_id)
output_dict_enables.extend([{removed_object_key: enabled_event_id} for enabled_event_id in enables])
output_dict_prevents.extend([{removed_object_key: prevent_event_id} for prevent_event_id in prevents])
output_dict["enables"] = output_dict_enables
output_dict["prevents"] = output_dict_prevents
def run_variations(self, controller_json_path: str, variations_output_path: str, debug_output_path: str):
final_output_json = {}
controller_json = FileIO.read_json(controller_json_path)
original_output_path: str = controller_json["outputJSONPath"]
original_output: dict = FileIO.read_json(original_output_path)
final_output_json["original_video_output"] = original_output
variation_outputs = {}
controller_paths = self.__create_variations(original_output_path,
controller_json,
original_output)
for c in controller_paths:
self.__runner.run_simulation(c[1], debug_output_path)
variation_outputs[str(c[0])] = self.__get_variation_output(c[1])
final_output_json["variations_outputs"] = variation_outputs
self.__write_enables_prevents(final_output_json)
with open(variations_output_path, "w") as f:
json.dump(final_output_json, f)
class Perturbator:
@staticmethod
def regenerate_answers(original_variations_output_file_path,
perturbed_variations_output_path,
original_questions_path,
new_perturbed_qa_file_path,
metadata_path):
variations_output = FileIO.read_json(perturbed_variations_output_path)
metadata = FileIO.read_json(metadata_path)
original_questions = FileIO.read_json(original_questions_path)
original_variations_output = FileIO.read_json(original_variations_output_file_path)
new_answers = {"info": original_questions["info"], "questions": []}
for qa in original_questions["questions"]:
program = qa["program"]
scene_structs = original_variations_output["original_video_output"]["scene_states"]
causal_graph = CausalGraph(original_variations_output["original_video_output"]["causal_graph"])
start_scene_struct = [scene['scene'] for scene in scene_structs if scene['step'] == 0][0]
end_scene_struct = [scene['scene'] for scene in scene_structs if scene['step'] != 0][0]
scene_structs_array = [start_scene_struct, end_scene_struct]
answer = None
try:
answer = QuestionGeneratorScript.answer_question_offline(variations_output,
scene_structs_array,
causal_graph,
program, metadata)
except Exception as e:
logger.error(f"Answer could not be generated: {str(e)}")
new_qa = copy.deepcopy(qa)
new_qa["answer"] = answer
new_answers["questions"].append(new_qa)
# Because of parallelization, we need to write to file, to not make things more complex with process-safety
FileIO.write_json(new_answers, new_perturbed_qa_file_path)
@staticmethod
def measure_similarity(questions_original, questions_perturbed):
correct = 0
found_count = 0
wrong_answers = []
correct_answers = []
not_found = []
for original in questions_original:
perturbed = None
for question in questions_perturbed:
if (original["question"] == question["question"]) and (
str(original["video_index"]) == str(question["video_index"])):
perturbed = question
if perturbed is None:
not_found.append(original)
continue
else:
found_count += 1
if str(original["answer"]) == str(perturbed["answer"]):
correct += 1
correct_answers.append(original)
else:
wrong_answers.append({"original": original, "perturbed": perturbed})
data = {"correct": correct_answers, "wrong": wrong_answers, "not_found_in_perturbed_questions": not_found}
return data, len(questions_original), found_count, correct / found_count if found_count != 0 else 0
class QuestionGenerator:
def __init__(self,
input_scene_file_path: str,
output_file_path: str,
simulation_config: dict,
metadata_file_path: str = '../svqa/metadata.json',
synonyms_file_path: str = '../svqa/synonyms.json',
templates_dir: str = '../svqa/SVQA_1.0_templates',
instances_per_template=1):
self.__args = QuestionGeneratorScript.parser.parse_args(['--input-scene-file', input_scene_file_path,
'--output-questions-file', output_file_path,
'--metadata-file', metadata_file_path,
'--synonyms-json', synonyms_file_path,
'--template-dir', templates_dir,
'--instances-per-template',
str(instances_per_template),
'--restrict-template-count-per-video', False,
'--print-stats', False,
'--excluded-task-ids',
simulation_config[
"excluded_task_ids"] if simulation_config is not None else []])
def execute(self):
QuestionGeneratorScript.main(self.__args)
| [
"m.samilatesoglu@gmail.com"
] | m.samilatesoglu@gmail.com |
a51419acde7f1d082a6e3f58a35f250b8ce99b1b | 73d4f10770b30eb5014fe6f12ad11656f08b5a15 | /venv/Scripts/pip3-script.py | cc4a41a0d49198bb572b233539d2b003babfdb75 | [] | no_license | wksiazak/machinelearning_practicing | 95ed9ac8de2c0a270ade9fde148b8237b30618f0 | b7b747c293329390efc68cf847fc4c4e4692256a | refs/heads/master | 2022-06-20T22:16:55.264199 | 2020-05-09T10:22:35 | 2020-05-09T10:22:35 | 258,751,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | #!C:\machinelearning_1904\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"weronikaksiazak@gmail.com"
] | weronikaksiazak@gmail.com |
b6e342601739bf2ac9de2c64dcc912651a121c30 | f5cb63b795246acb80e1de74f4218a449b05bd6f | /with Sentiment (3.25 hours)/LSTM-Sentiment.py | 0ff53d978750534ce2857f6597dfff0a597e0645 | [] | no_license | Kreuz-und-QuerHan/Bitcoin-Trend-Prediction | 7a0e00c07e074846c70aa25ebdcd0ccd3df74da1 | bd97e7b7ea471215ceec64a49e08edf18572f14b | refs/heads/main | 2023-06-02T12:18:09.245775 | 2021-06-23T20:52:22 | 2021-06-23T20:52:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,183 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# Libraries
from pandas import DataFrame, read_csv, concat
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM, Bidirectional, GRU,ConvLSTM2D, Flatten
from matplotlib import pyplot as plt
from numpy import concatenate, reshape
from sklearn.metrics import mean_squared_error, mean_absolute_error
from math import sqrt
from sklearn.preprocessing import MinMaxScaler
from sys import argv
import csv
import datetime
import time
# In[2]:
# Series to Supervised Learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
# print("I: ",i)
cols.append(df.shift(i))
# print("Column: ",cols)
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# print("Names: ",names)
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
# print("COls: ",cols)
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# print("Names: ",names)
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# In[39]:
# Read Data and Extract Values
# Read Data and Extract Values
fileName = 'ETH' #write crypto name
dataset = read_csv(fileName+'.csv', parse_dates=['time'])
startIndex = 3
nrows = dataset.shape[0]
values = dataset.iloc[:,startIndex:].values #Getting values - Total Sentiment and BTC Values
valuesCrypto = dataset.iloc[:,-1:].values #Getting values - C Values
# For predicting with just Cryptocurrency values, we have just 1 input variable.
# Incorporating sentiment values will make input variables=2
# Comment the below line if there are multiple features / input variable.
# values = values.reshape(-1,1) #Only do this if you have 1 input variable
# In[40]:
# Scaling
scaler = MinMaxScaler(feature_range = (0,1))
scaler = scaler.fit(values)
scaled = scaler.fit_transform(values)
# In[41]:
# Input and Output Sequence Length
input_sequence = 1
output_sequence = 1
# Call Series to Supervised Function
reframed = series_to_supervised(scaled, input_sequence, output_sequence)
# In[42]:
# Drop current sentiment/any other feature that might be added in the future(at time t)
dropColumns = []
for i in range(values.shape[1]-1):
dropColumns.append('var{}(t)'.format(i+1))
reframed=reframed.drop(columns=dropColumns)
# Drop cuurent sentiment
# reframed=reframed.drop(columns=['var1(t)'])
reframed
# In[7]:
# Ignore the headers
reframedValues = reframed.values
reframedValues
# In[8]:
#Splitting data into train and test sets
n_train_days = int(0.9*nrows) #90% data is train, 10% test
train = reframedValues[:n_train_days, :]
test = reframedValues[n_train_days:nrows, :]
# valuesCrypto = reframed.iloc[:,-1:].values #Getting values - C Values
#Assigning inputs and output datasets
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
#Reshaping input to be 3 dimensions (samples, timesteps, features)
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
# In[9]:
#Building LSTM Neural Network model
model = Sequential()
model.add(Bidirectional(GRU(50, activation='relu', return_sequences=True, input_shape=(train_X.shape[1], train_X.shape[2]))))
model.add(LSTM(50, activation='tanh'))
model.add(Dropout(0.4))
model.add(Dense(1, activation='relu'))
model.compile(optimizer='adam', loss='mse',metrics=['acc'])
# Uncomment below line to get summary of the model
# print(model.summary(line_length=None, positions=None, print_fn=None))
#Fitting model
history = model.fit(train_X, train_y, epochs = 200, batch_size=25, validation_data=(test_X, test_y), verbose=2, shuffle=False) #Best so far: 100 neurons, epochs = 400, batch_size = 53
print(history.history)
# In[11]:
# Predicition
model_prediction = model.predict(test_X)
# In[12]:
# Inverse Scale
scalerCrypto = MinMaxScaler(feature_range = (0,1))
scalerCrypto = scaler.fit(valuesCrypto)
scaledCrypto = scaler.fit_transform(valuesCrypto)
# valuesCrypto = reframedValues[n_train_days:nrows, -1].tolist()
model_prediction_unscale = scalerCrypto.inverse_transform(model_prediction)
predictedValues = reshape(model_prediction_unscale, model_prediction_unscale.shape[0])
# actualValues = valuesCrypto[n_train_days:] #test_y+input_sequence:
actualValues = valuesCrypto[n_train_days+input_sequence:] #test_y+input_sequence:
# print(len(actualValues))
# print(input_sequence)
# print("----")
# print(len(predictedValues))
# print(len(test))
# print(len(test_X))
# print(type(reframedValues))
actualValues = reshape(actualValues, actualValues.shape[0])
# In[13]:
#Plotting training loss vs validation loss
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='validation')
plt.legend()
plt.show()
# In[19]:
#Visualising Results (Actual vs Predicted)
plt.plot(actualValues, color = 'red', label = 'Actual '+ fileName + ' Value')
plt.plot(predictedValues, color = 'blue', label = 'Predicted '+ fileName + ' Value') #[1:38]
plt.title(fileName+' Trend Prediction')
plt.xlabel('Time Interval (1 interval = 3.5 hours)')
plt.ylabel('Price')
plt.legend()
# Uncomment below line to save the figure
# plt.savefig('Trend Graph.png', dpi=700)
plt.show()
# In[15]:
actual= DataFrame(actualValues, columns= ['Actual Value'])
predicted=DataFrame(predictedValues, columns= ['Predicted Value'])
# In[16]:
#Calculating RMSE and MAE
errorDF=concat([actual,predicted], axis=1)
errorDF.dropna(inplace=True)
rmse = sqrt(mean_squared_error(errorDF.iloc[:,0], errorDF.iloc[:,1]))
mae = mean_absolute_error(errorDF.iloc[:,0], errorDF.iloc[:,1])
print('Test MAE: %.3f' % mae)
print('Test RMSE: %.3f' % rmse)
# In[17]:
# Write to csv
writeFileName = "--Results.csv"
timestamp = DataFrame(dataset['time'][n_train_days:], columns= ['time'])
timestamp.reset_index(drop=True, inplace=True)
results=concat([timestamp,actual,predicted], axis=1)
print("Head: ",results.head())
print("Tail: ",results.tail())
results.dropna(inplace=True)
results.to_csv(fileName+writeFileName, index= False)
# In[18]:
# Analyzer 2
with open(fileName+writeFileName, newline='') as read_file:
results = csv.reader(read_file, delimiter=',', quotechar='|')
next(results)
time = []
actual = []
predicted = []
n_percent= 2
# Pull data from CSV and store in arrays.
for row in results:
# print(row)
time.append(row[0])
actual.append(float(row[1]))
predicted.append(float(row[2]))
actual_delta = []
predicted_delta = []
correct_long = 0
incorrect_long = 0
correct_short = 0
incorrect_short = 0
profit = []
profit_agg = 0
# Process predictions, print analytics.
for i in range(2, len(actual) - 1):
a_delta = (actual[i + 1] - actual[i]) / actual[i + 1]
p_delta = (predicted[i + 1] - predicted[i]) / predicted[i + 1]
# print("time_start", time[i])
# print("time_ended", time[i+1])
# print("actual_price", actual[i], "->", actual[i+1])
# print("actual_delta", a_delta * 100)
# print("predicted_price", predicted[i], "->", predicted[i + 1])
# print("predicted_delta", p_delta * 100)
# print("difference", abs(a_delta - p_delta) * 100)
if p_delta > 0 and a_delta > 0:# and abs(a_delta)*100>n_percent:
correct_long += 1
profits = 10000 * abs(a_delta)
profit.append(profits)
profit_agg += profits
#print('gain/loss', profits)
if p_delta > 0 > a_delta:# and abs(a_delta)*100>n_percent:
incorrect_long += 1
profits = -10000 * abs(a_delta)
profit.append(profits)
profit_agg += profits
#print('gain/loss', profits)
# To ignore the "short" predictions, comment out the following 2 if-blocks.
if p_delta < 0 and a_delta < 0:# and abs(a_delta)*100>n_percent:
correct_short += 1
profits = 10000 * abs(a_delta)
profit.append(profits)
profit_agg += profits
#print('gain/loss', profits)
if p_delta < 0 < a_delta:# and abs(a_delta)*100>n_percent:
incorrect_short += 1
profits = -10000 * abs(a_delta)
profit.append(profits)
profit_agg += profits
#print('gain/loss', profits)
# print()
print('Analyzer 2 ')
print("------------")
print('correct_long', correct_long)
print('incorrect_long', incorrect_long)
print('correct_short', correct_short)
print('incorrect_short', incorrect_short)
print('profit_tracker', profit_agg)
# In[ ]:
| [
"79607020+TandonAnanya@users.noreply.github.com"
] | 79607020+TandonAnanya@users.noreply.github.com |
9cacc499ad1d427ec8d2592fa29bfeab94a32130 | bb18f19a00e01e94652bcbe1aeb5d5da70f58f8f | /setup.py | 7ba523b7a8795415a2d1f494060fe99a81697a6d | [
"MIT"
] | permissive | MLDERES/isys599v | 02b8e4d573bccbbf06ab31b307649f99c03c334c | 078097f23c3d636a1472546ff4384abbe641a28d | refs/heads/master | 2022-04-08T14:20:14.646766 | 2020-02-23T20:50:25 | 2020-02-23T20:50:25 | 234,643,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Repo for work in 599v',
author='Michael Dereszynski',
license='MIT',
)
| [
"mlderes@hotmail.com"
] | mlderes@hotmail.com |
d5a1208ab0db01560b65193a0939850725825dbe | 2fdabdbf4a1cce9708e5b194331908e95172fe4f | /crudapplication/migrations/0002_auto_20190518_1557.py | 4387d58084c3fe23d14d122065aca11e9d0468ab | [] | no_license | patilganeshv/DjangoSampleProject_webapplication | b1245d7291d080f1b5e3ecbcd43231803a096d25 | 116b8f5b6fa19945d23f090786b2e426f805450b | refs/heads/main | 2023-03-24T23:32:34.094132 | 2021-03-08T07:03:36 | 2021-03-08T07:03:36 | 345,560,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | # Generated by Django 2.2.1 on 2019-05-18 10:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crudapplication', '0001_initial'),
]
operations = [
migrations.AlterModelTable(
name='employee',
table='employee11111',
),
]
| [
"patilganeshv@gmail.com"
] | patilganeshv@gmail.com |
cdf4f9f0feb9581b551fcc12879a25bdc4bcd735 | 5eb91e9c60f79d401a8580768885d227dfb75cf1 | /sahara/plugins/mapr/services/oozie/oozie.py | e11c18f6ba0d56552e99f7e89422558105efea85 | [
"Apache-2.0"
] | permissive | hamzehkhazaei/sahara | 3b2375e1a6602fc359766abdf5b7453a407a164b | 58faa2d62e30390732dc1be4be5bc9220960fcf0 | refs/heads/master | 2021-05-29T00:19:24.588397 | 2015-05-05T14:41:35 | 2015-05-05T14:41:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,903 | py | # Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
import sahara.plugins.mapr.domain.configuration_file as bcf
import sahara.plugins.mapr.domain.node_process as np
import sahara.plugins.mapr.domain.service as s
import sahara.plugins.mapr.services.mysql.mysql as mysql
import sahara.plugins.mapr.util.general as g
import sahara.plugins.mapr.util.validation_utils as vu
LOG = logging.getLogger(__name__)
OOZIE = np.NodeProcess(
name='oozie',
ui_name='Oozie',
package='mapr-oozie',
open_ports=[11000]
)
@six.add_metaclass(s.Single)
class Oozie(s.Service):
def __init__(self):
super(Oozie, self).__init__()
self._name = 'oozie'
self._ui_name = 'Oozie'
self._version = '4.0.1'
self._node_processes = [OOZIE]
self._dependencies = [('mapr-oozie-internal', self.version)]
self._cluster_defaults = ['oozie-default.json']
self._validation_rules = [vu.exactly(1, OOZIE)]
self._ui_info = [('Oozie', OOZIE, 'http://%s:11000/oozie')]
def get_config_files(self, cluster_context, configs, instance=None):
oozie_site = bcf.HadoopXML("oozie-site.xml")
oozie_site.remote_path = self.conf_dir(cluster_context)
if instance:
oozie_site.fetch(instance)
oozie_site.load_properties(configs)
oozie_site.add_properties(self._get_oozie_site_props(cluster_context))
return [oozie_site]
def _get_oozie_site_props(self, context):
oozie_specs = mysql.MySQL.OOZIE_SPECS
return {
'oozie.db.schema.name': oozie_specs.db_name,
'oozie.service.JPAService.create.db.schema': True,
'oozie.service.JPAService.jdbc.driver': mysql.MySQL.DRIVER_CLASS,
'oozie.service.JPAService.jdbc.url': self._get_jdbc_uri(context),
'oozie.service.JPAService.jdbc.username': oozie_specs.user,
'oozie.service.JPAService.jdbc.password': oozie_specs.password,
'oozie.service.HadoopAccessorService.hadoop.configurations':
'*=%s' % context.hadoop_conf
}
def _get_jdbc_uri(self, context):
jdbc_uri = ('jdbc:mysql://%(db_host)s:%(db_port)s/%(db_name)s?'
'createDatabaseIfNotExist=true')
jdbc_args = {
'db_host': mysql.MySQL.get_db_instance(context).fqdn(),
'db_port': mysql.MySQL.MYSQL_SERVER_PORT,
'db_name': mysql.MySQL.OOZIE_SPECS.db_name,
}
return jdbc_uri % jdbc_args
def post_install(self, cluster_context, instances):
oozie_inst = cluster_context.get_instance(OOZIE)
oozie_service = cluster_context.get_service(OOZIE)
if oozie_service:
oozie_version = oozie_service.version
symlink_cmd = ('cp /usr/share/java/mysql-connector-java.jar '
'/opt/mapr/oozie/oozie-%s'
'/oozie-server/lib/') % oozie_version
with oozie_inst.remote() as r:
LOG.debug('Installing MySQL connector for Oozie')
r.execute_command(symlink_cmd, run_as_root=True,
raise_when_error=False)
def _install_share_libs(self, cluster_context):
check_sharelib = 'sudo -u mapr hadoop fs -ls /oozie/share/lib'
create_sharelib_dir = 'sudo -u mapr hadoop fs -mkdir /oozie'
is_yarn = cluster_context.cluster_mode == 'yarn'
upload_args = {
'oozie_home': self.home_dir(cluster_context),
'share': 'share2' if is_yarn else 'share1'
}
upload_sharelib = ('sudo -u mapr hadoop fs -copyFromLocal '
'%(oozie_home)s/%(share)s /oozie/share')
oozie_inst = cluster_context.get_instance(OOZIE)
with oozie_inst.remote() as r:
LOG.debug("Installing Oozie sharelibs")
command = '%(check)s || (%(mkdir)s && %(upload)s)'
args = {
'check': check_sharelib,
'mkdir': create_sharelib_dir,
'upload': upload_sharelib % upload_args,
}
r.execute_command(command % args, raise_when_error=False)
def post_start(self, cluster_context, instances):
instances = cluster_context.filter_instances(instances, OOZIE)
self._install_share_libs(cluster_context)
self._install_ui(cluster_context, instances)
@g.remote_command(1)
def _rebuild_oozie_war(self, remote, cluster_context):
extjs_url = 'http://dev.sencha.com/deploy/ext-2.2.zip'
extjs_file = '/tmp/extjs.zip'
g.download(remote, extjs_url, extjs_file)
cmd = '%(home)s/bin/oozie-setup.sh prepare-war -extjs %(ext)s'
args = {'home': self.home_dir(cluster_context), 'ext': extjs_file}
remote.execute_command(cmd % args, run_as_root=True)
def update(self, cluster_context, instances=None):
instances = instances or cluster_context.get_instances()
instances = cluster_context.filter_instances(instances, OOZIE)
self._install_ui(cluster_context, instances)
def _install_ui(self, cluster_context, instances):
OOZIE.stop(filter(OOZIE.is_started, instances))
g.execute_on_instances(
instances, self._rebuild_oozie_war, cluster_context)
OOZIE.start(instances)
| [
"aosadchiy@maprtech.com"
] | aosadchiy@maprtech.com |
04610e70877f5e0bfca5d4ab9e745ffc7706df50 | b80d6ad08fa5dc8c21bd08f99249c8df37af3126 | /setup.py | 4c2b3e596423b6ecd2f2e967c8a2462c801cfed6 | [] | no_license | quentin-auge/hilbertpiet | 717295e344ede803885fac2262816e2c115c4fb2 | 304c52129013c46ba71886be792039fc0a6a25be | refs/heads/master | 2022-07-13T10:16:53.801796 | 2020-05-10T02:15:56 | 2020-05-10T02:18:29 | 257,011,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | # coding: utf8
from setuptools import find_packages, setup
setup(name='hilbertpiet',
version='0.1',
description='Hilbert-curve-shaped Piet programs generation',
author='Quentin Augé',
author_email='quentin.auge@gmail.com',
license='closed',
packages=find_packages(),
package_data={'hilbertpiet': ['data/piet_numbers.pkl']},
python_requires='>=3.7',
classifiers=['Programming Language :: Python :: 3 :: Only',
'Operating System :: Unix'],
install_requires=['pillow'],
extras_require={
'testing': ['coverage', 'mock', 'pytest', 'pytest-cov']
},
entry_points={
'console_scripts': [
'hilbertpiet = hilbertpiet.cli.main:main',
'optimize-piet-numbers = hilbertpiet.cli.optimize_numbers:main'
]
})
| [
"quentin.auge@gmail.com"
] | quentin.auge@gmail.com |
4cec37fbef63716ae1ffe812f59e2cb1b4f0e9fa | 469eb9256ade8913a454d9e59bb11710239c9ab1 | /test/test_Identify.py | 9067654f9abd852594d855c81297232f1c76363c | [] | no_license | mokko/oai | 8b564b3bba0561278a34e7fdad26d35b9f931ee1 | 5d5ed4c0e2a792c82c1b6b3daa042c1bb9890c80 | refs/heads/main | 2023-04-13T17:15:59.891005 | 2021-05-01T10:16:34 | 2021-05-01T10:16:34 | 363,194,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | import sys
sys.path.append ("../src")
from Identify import Identify
def test_init():
i = Identify(
baseURL="www.mmm.com/oai", deletedRecord="transient", repositoryName="M3OAI"
)
assert i
assert i.baseURL == "www.mmm.com/oai"
assert i.deletedRecord == "transient"
assert i.repositoryName == "M3OAI"
def test_fromFile():
i = Identify.fromFile(path="identify.xml")
assert i
def test_fromXML():
i = Identify(
baseURL="www.mmm.com", deletedRecord="transient", repositoryName="M3OAI"
)
xml = i.toString()
i = Identify.fromXML(xml=xml)
assert i
def test_validate():
i = Identify(
baseURL="www.mmm.com", deletedRecord="transient", repositoryName="M3OAI"
)
i.adminEmail.append("m3@gmail.com")
i.toFile(path="identify2.xml")
print(i.toString())
i.validate()
def test_validate2():
i = Identify.fromFile(path="identify.xml")
i.validate() | [
"mauricemengel@gmail.com"
] | mauricemengel@gmail.com |
7f62acfb8b4c1fece3fa958fc6321f174a5dc480 | bcdbf414e06115a7f57c551886850ce95fc14a51 | /tzfinderapp/main.py | 5734737bd4557bb9ac10aade36e2ae18b796d3f6 | [
"MIT"
] | permissive | mblackgeo/timezone-finder-webapp | e5d7272618a67d65e8acc4b72fe434b6ee48bb70 | 4aa32874eb91ea7d3b1225fe7fc78c74388cc375 | refs/heads/main | 2023-08-03T07:20:49.778356 | 2021-09-15T20:13:53 | 2021-09-15T20:13:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | from typing import Tuple, List
import dash_html_components as html
import dash_leaflet as dl
from dash import Dash
from dash.dependencies import Input, Output
from flask import Flask
from timezonefinder import TimezoneFinderL
server = Flask(__name__)
app = Dash(prevent_initial_callbacks=True, server=server)
tf = TimezoneFinderL(in_memory=True)
url = "https://tiles.stadiamaps.com/tiles/alidade_smooth_dark/{z}/{x}/{y}{r}.png"
attribution = '© <a href="https://stadiamaps.com/">Stadia Maps</a> '
app.title = "Timezone Finder"
app.layout = html.Div(
[
dl.Map(
[
dl.TileLayer(url=url, maxZoom=20, attribution=attribution),
dl.LayerGroup(id="layer"),
],
id="map",
style={
"width": "100%",
"height": "98vh",
"margin": "none",
"display": "block",
},
),
]
)
@app.callback(Output("layer", "children"), [Input("map", "click_lat_lng")])
def map_click(click_lat_lng: Tuple[float, float]) -> List[dl.Marker]:
lat, lng = click_lat_lng
tz = tf.timezone_at(lat=lat, lng=lng)
return [
dl.Marker(
position=click_lat_lng,
children=dl.Tooltip(
f"Timezone: {tz} ({lat:.3f}, {lng:.3f})", permanent=True
),
)
]
if __name__ == "__main__":
app.run_server(port=8080)
| [
"18327836+mblack20@users.noreply.github.com"
] | 18327836+mblack20@users.noreply.github.com |
ab7cff661520295ebd66cdf67181839a9f2b1a66 | c547c293c101cd245d4219475587a312b53e5f6c | /KNN/kNN3.py | dbe213cb7483d3d427db003713340f2be47868ea | [] | no_license | MaxTian666/machine-learning | 4c977593d618526ff03525a38ed96b89e86997e7 | b0c1799c735a0acba8a517f79757f12d27bb312b | refs/heads/master | 2020-03-08T03:47:42.018667 | 2018-04-03T12:25:16 | 2018-04-03T12:25:16 | 127,901,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,621 | py | # -*- coding:utf-8 -*-
"""
@author:Tian Sir
@file:kNN3.py
@time:2018/3/2214:16
"""
'''
实施KNN算法的步骤:
1)计算已知分类数据集点与当前点的距离;
2)安装距离递增次序排序
3)选取与当前点距离最小的k个点;
4)确定前k个点出现的频率;
5)返回前k个点出现频率最高的类别作为当前点的预分类。
'''
from numpy import *
import operator
'''
创建一个函数用于生成一个待分类的数据及对应的数据标签
'''
def createDataSet():
group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])
labels = ['A', 'A', 'B', 'B']
return group, labels
'''
k-临近算法的实施
'''
def classify0(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0] #得到已知分类目标的行数
diffMat = tile(inX, (dataSetSize, 1)) - dataSet
sqDiffMat = diffMat ** 2
sqDistances = sqDiffMat.sum(axis=1) #各平方项求和;即将矩阵的每一行元素相加
distances = sqDistances ** 0.5
sortedDistIndicies = distances.argsort() #实现对矩阵中元素由小到大排序,返回排序后的下标
classCount = {}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]] #根据sortedDistIndicies中的元素取出 k 个对应的求出labels中存的元素
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1 #累积求出各标号的个数
sortedClassCount = sorted(classCount.items(), key = operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
group, labels = createDataSet()
a = classify0([0,0.1],group,labels, 3)
print(a) | [
"409855255@qq.com"
] | 409855255@qq.com |
e1e15445a7861fad680daa90f449659e805d60da | f65c89f0cf09912dbf55ee077d583085b0a607dc | /keyboards.py | 2254e464a3e9b1960761d2dcba5a88755ceb385a | [] | no_license | Hacker12lamer/HackerPisya | 02ffe7bc106edcfab89a9afe8795c3a63ff445e0 | c2fec30831a232e8ee4e997587faa50c69c68fb1 | refs/heads/master | 2020-12-10T00:49:26.337633 | 2020-01-12T21:41:57 | 2020-01-12T21:41:57 | 233,462,870 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 25,726 | py | # -*- coding: utf-8 -*-
import telebot
keyboardMain = telebot.types.ReplyKeyboardMarkup(True)
keyboardMain.row('🔹 Москва')
keyboardMain.row('🔹 Санкт-Петербург')
keyboardMain.row('🔹 Ростов-на-Дону')
keyboardMain.row('🔹 Екатеринбург')
keyboardMain.row('🔹 Челябинск')
keyboardMain.row('🔹 Новосибирск')
keyboardMain.row('🔹 Нижний Новгород')
keyboardMain.row('🔹 Казань')
keyboardMain.row('🔹 Омск')
keyboardMain.row('🔹 Самара')
keyboardMain.row('🔹 Краснодар')
keyboardMain.row('🔹 Саратов')
keyboardMain.row('🔹 Тюмень')
keyboardMain.row('🔹 Барнаул')
keyboardMain.row('🔹 Иркутск')
keyboardMain.row('🔹 Ярославль')
keyboardMain.row('🔹 Владивосток')
keyboardMain.row('🔹 Оренбург')
keyboardMain.row('🔹 Томск')
keyboardMain.row('🔹 Воронеж')
keyboardMain.row('🔹 Пермь')
keyboardMain.row('🔹 Волгоград')
keyboardMain.row('🔹 Уфа')
keyboardMain.row('🔹 Красноярск')
keyboardMain.row('🔹 Тольятти')
keyboardMain.row('🔹 Феодосия')
keyboardMain.row('🔹 Тимашевск')
moskow_rayons = telebot.types.ReplyKeyboardMarkup(True)
moskow_rayons.row('🔹 Измайлово')
moskow_rayons.row('🔹 Сокольники')
moskow_rayons.row('🔹 Внуково')
moskow_rayons.row('🔹 Кунцево')
moskow_rayons.row('🔹 Крюково')
moskow_rayons.row('🔹 Щукино')
moskow_rayons.row('🔹 Лефортово')
moskow_rayons.row('🔹 Выхино-Жулебина')
moskow_rayons.row('🔹 Медведково')
moskow_rayons.row('🔹 Якиманка')
moskow_rayons.row('🔹 Отрадное')
moskow_rayons.row('🔹 Главная')
moskow_tovar = telebot.types.ReplyKeyboardMarkup(True)
moskow_tovar.row('СК (син.крис) {0.3г/900 RUB}',)
moskow_tovar.row('СК (син.крис) {0.5г/1300 RUB}')
moskow_tovar.row('СК (син.крис) {1г/2200 RUB}')
moskow_tovar.row('Гашиш EURO {1г/1100 RUB}')
moskow_tovar.row('Гашиш EURO {2г/2000 RUB}')
moskow_tovar.row('Гашиш EURO {5г/4000 RUB}')
moskow_tovar.row('MEPHEDRONE крис {1г/2100 RUB}')
moskow_tovar.row('Амфетамин HQ {2г/2400 RUB}')
moskow_tovar.row('Шишки OG Kush {1г/1200 RUB}')
moskow_tovar.row('Шишки OG Kush {2г/2200 RUB}')
moskow_tovar.row('Шишки OG Kush {5г/4200 RUB}')
moskow_tovar.row('РОСС {5г/3000 RUB}')
moskow_tovar.row('Героин HQ {0.5г/1700 RUB}')
sankt_rayons = telebot.types.ReplyKeyboardMarkup(True)
sankt_rayons.row('🔹 Центральный район')
sankt_rayons.row('🔹 Невский район')
sankt_rayons.row('🔹 Кировский район')
sankt_rayons.row('🔹 Петроградский район')
sankt_rayons.row('🔹 Московский район')
sankt_rayons.row('🔹 Василеостровской район')
sankt_rayons.row('🔹 Главная')
sankt_tovar = telebot.types.ReplyKeyboardMarkup(True)
sankt_tovar.row('СК (син.крис) {0.3г/700 RUB')
sankt_tovar.row('СК (син.крис) {0.5г/1200 RUB}')
sankt_tovar.row('СК (син.крис) {1г/2200 RUB}')
sankt_tovar.row('Гашиш EURO {1г/1100 RUB}')
sankt_tovar.row('Гашиш EURO {2г/2000 RUB}')
sankt_tovar.row('Гашиш EURO {5г/4000 RUB}')
sankt_tovar.row('MEPHEDRONE крис {1г/2100 RUB}')
sankt_tovar.row('Амфетамин HQ {2г/2000 RUB}')
sankt_tovar.row('Шишки OG Kush {1г/1200 RUB}')
novosubirsk_rayons = telebot.types.ReplyKeyboardMarkup(True)
novosubirsk_rayons.row('🔹 Кировский')
novosubirsk_rayons.row('🔹 Дзержинский')
novosubirsk_rayons.row('🔹 Советский')
novosubirsk_rayons.row('🔹 Центральный')
novosubirsk_rayons.row('🔹 Железнодорожный')
novosubirsk_rayons.row('🔹 Калининский')
novosubirsk_rayons.row('🔹 Ленинский')
novosubirsk_rayons.row('🔹 Главная')
novosubirsk = telebot.types.ReplyKeyboardMarkup(True)
novosubirsk.row('СК (син.крис) {0.3г/700 RUB')
novosubirsk.row('СК (син.крис) {0.5г/1200 RUB}')
novosubirsk.row('СК (син.крис) {1г/2200 RUB}')
novosubirsk.row('Гашиш EURO {1г/1100 RUB}')
novosubirsk.row('Гашиш EURO {2г/2000 RUB}')
novosubirsk.row('Гашиш EURO {5г/4000 RUB}')
novosubirsk.row('РОСС {3г/1900 RUB}')
novosubirsk.row('Амфетамин HQ {2г/2000 RUB}')
novosubirsk.row('Шишки OG Kush {1г/1200 RUB}')
ekb_rayons = telebot.types.ReplyKeyboardMarkup(True)
ekb_rayons.row('🔹 Верх-Исетский')
ekb_rayons.row('🔹 Железнодорожный')
ekb_rayons.row('🔹 Кировский')
ekb_rayons.row('🔹 Ленинский')
ekb_rayons.row('🔹 Октябрьский')
ekb_rayons.row('🔹 Главная')
ekb = telebot.types.ReplyKeyboardMarkup(True)
ekb.row('СК (син.крис) {0.3г/900 RUB}')
ekb.row('СК (син.крис) {0.5г/1300 RUB}')
ekb.row('СК (син.крис) {1г/2200 RUB}')
ekb.row('Гашиш EURO {1г/1100 RUB}')
ekb.row('Гашиш EURO {2г/2000 RUB}')
ekb.row('Гашиш EURO {5г/4000 RUB}')
ekb.row('MEPHEDRONE крис {1г/2100 RUB}')
ekb.row('Амфетамин HQ {2г/2400 RUB}')
ekb.row('Шишки OG Kush {1г/1200 RUB}')
ekb.row('Шишки OG Kush {2г/2200 RUB}')
ekb.row('Шишки OG Kush {5г/4200 RUB}')
ekb.row('РОСС {5г/3000 RUB}')
ekb.row('Героин HQ {0.5г/1700 RUB}')
novgorod_rayons = telebot.types.ReplyKeyboardMarkup(True)
novgorod_rayons.row('🔹 Автозаводский')
novgorod_rayons.row('🔹 Богородский')
novgorod_rayons.row('🔹 Бутурлинский')
novgorod_rayons.row('🔹 Гагинский')
novgorod_rayons.row('🔹 Вознесенский')
novgorod_rayons.row('🔹 Ветлужский')
novgorod_rayons.row('🔹 Варнавинский')
novgorod_rayons.row('🔹 Вачский')
novgorod_rayons.row('🔹 Главная')
novgorod = telebot.types.ReplyKeyboardMarkup(True)
novgorod.row('СК (син.крис) {0.3г/800 RUB}')
novgorod.row('СК (син.крис) {0.5г/1200 RUB}')
novgorod.row('СК (син.крис) {1г/2000 RUB}')
novgorod.row('Гашиш EURO {1г/1100 RUB}')
novgorod.row('Гашиш EURO {2г/2000 RUB}')
novgorod.row('Гашиш EURO {5г/4000 RUB}')
novgorod.row('MEPHEDRONE крис {1г/2100 RUB}')
novgorod.row('Амфетамин HQ {2г/2400 RUB}')
novgorod.row('Шишки OG Kush {1г/1200 RUB}')
novgorod.row('Шишки OG Kush {2г/2200 RUB}')
novgorod.row('Шишки OG Kush {5г/4200 RUB}')
novgorod.row('РОСС {5г/3000 RUB}')
novgorod.row('Героин HQ {0.5г/1700 RUB}')
kazan_rayons = telebot.types.ReplyKeyboardMarkup(True)
kazan_rayons.row('🔹 Советский')
kazan_rayons.row('🔹 Приволжский')
kazan_rayons.row('🔹 Ново-Савиновский')
kazan_rayons.row('🔹 Московский')
kazan_rayons.row('🔹 Кировский')
kazan_rayons.row('🔹 Вахитовский')
kazan_rayons.row('🔹 Главная')
kazan = telebot.types.ReplyKeyboardMarkup(True)
kazan.row('СК (син.крис) {0.3г/800 RUB}')
kazan.row('СК (син.крис) {0.5г/1200 RUB}')
kazan.row('СК (син.крис) {1г/2000 RUB}')
kazan.row('Гашиш EURO {1г/1100 RUB}')
kazan.row('Гашиш EURO {2г/2000 RUB}')
kazan.row('Гашиш EURO {5г/4000 RUB}')
kazan.row('MEPHEDRONE крис {1г/2100 RUB}')
kazan.row('Амфетамин HQ {1г/1500 RUB}')
kazan.row('Шишки OG Kush {1г/1200 RUB}')
kazan.row('Шишки OG Kush {2г/2200 RUB}')
kazan.row('Шишки OG Kush {5г/4200 RUB}')
kazan.row('РОСС {5г/3000 RUB}')
kazan.row('Героин HQ {0.5г/1700 RUB}')
chelabinsk_rayons = telebot.types.ReplyKeyboardMarkup(True)
chelabinsk_rayons.row('🔹 Ленинский')
chelabinsk_rayons.row('🔹 ЧМЗ')
chelabinsk_rayons.row('🔹 Северо-Запад')
chelabinsk_rayons.row('🔹 АМЗ')
chelabinsk_rayons.row('🔹 Центральный')
chelabinsk_rayons.row('🔹 Ново-Синеглазово')
chelabinsk_rayons.row('🔹 Главная')
chelabinsk = telebot.types.ReplyKeyboardMarkup(True)
chelabinsk.row('СК (син.крис) {0.3г/900 RUB}')
chelabinsk.row('СК (син.крис) {0.5г/1300 RUB}')
chelabinsk.row('СК (син.крис) {1г/2200 RUB}')
chelabinsk.row('Гашиш EURO {1г/1100 RUB}')
chelabinsk.row('Гашиш EURO {2г/2000 RUB}')
chelabinsk.row('Гашиш EURO {5г/4000 RUB}')
chelabinsk.row('MEPHEDRONE крис {1г/2100 RUB}')
chelabinsk.row('Амфетамин HQ {2г/2400 RUB}')
chelabinsk.row('Шишки OG Kush {1г/1200 RUB}')
chelabinsk.row('Шишки OG Kush {2г/2200 RUB}')
chelabinsk.row('Шишки OG Kush {5г/4200 RUB}')
chelabinsk.row('РОСС {5г/3000 RUB}')
chelabinsk.row('Героин HQ {0.5г/1700 RUB}')
omsk_rayons = telebot.types.ReplyKeyboardMarkup(True)
omsk_rayons.row('🔹 Кировский')
omsk_rayons.row('🔹 Ленинский')
omsk_rayons.row('🔹 Октябрьский')
omsk_rayons.row('🔹 Советский')
omsk_rayons.row('🔹 Центральный')
omsk_rayons.row('🔹 Главная')
omsk = telebot.types.ReplyKeyboardMarkup(True)
omsk.row('СК (син.крис) {0.3г/900 RUB}')
omsk.row('СК (син.крис) {0.5г/1300 RUB}')
omsk.row('СК (син.крис) {1г/2200 RUB}')
omsk.row('Гашиш EURO {1г/1100 RUB}')
omsk.row('Гашиш EURO {2г/2000 RUB}')
omsk.row('Гашиш EURO {5г/4000 RUB}')
omsk.row('MEPHEDRONE крис {1г/2100 RUB}')
omsk.row('Амфетамин HQ {2г/2400 RUB}')
omsk.row('Шишки OG Kush {1г/1200 RUB}')
omsk.row('Шишки OG Kush {2г/2200 RUB}')
omsk.row('Шишки OG Kush {5г/4200 RUB}')
omsk.row('РОСС {5г/3000 RUB}')
omsk.row('Героин HQ {0.5г/1700 RUB}')
samara_rayons = telebot.types.ReplyKeyboardMarkup(True)
samara_rayons.row('🔹 Железнодорожный')
samara_rayons.row('🔹 Кировский')
samara_rayons.row('🔹 Красноглинский')
samara_rayons.row('🔹 Куйбышевский')
samara_rayons.row('🔹 Ленинский')
samara_rayons.row('🔹 Октябрьский')
samara_rayons.row('🔹 Главная')
samara = telebot.types.ReplyKeyboardMarkup(True)
samara.row('СК (син.крис) {0.3г/800 RUB}')
samara.row('СК (син.крис) {0.5г/1200 RUB}')
samara.row('СК (син.крис) {1г/2000 RUB}')
samara.row('Гашиш EURO {1г/1100 RUB}')
samara.row('Гашиш EURO {2г/2000 RUB}')
samara.row('Гашиш EURO {5г/4000 RUB}')
samara.row('MEPHEDRONE крис {1г/2100 RUB}')
samara.row('Амфетамин HQ {1г/1500 RUB}')
samara.row('Шишки OG Kush {1г/1200 RUB}')
samara.row('Шишки OG Kush {2г/2200 RUB}')
samara.row('Шишки OG Kush {5г/4200 RUB}')
samara.row('РОСС {5г/3000 RUB}')
samara.row('Героин HQ {0.5г/1700 RUB}')
krasnodar_rayons = telebot.types.ReplyKeyboardMarkup(True)
krasnodar_rayons.row('🔹 Западный')
krasnodar_rayons.row('🔹 Карасунский')
krasnodar_rayons.row('🔹 Прикубанский')
krasnodar_rayons.row('🔹 Центральный')
krasnodar= telebot.types.ReplyKeyboardMarkup(True)
krasnodar.row('СК (син.крис) {0.5г/1300 RUB}')
krasnodar.row('СК (син.крис) {1г/2200 RUB}')
krasnodar.row('Гашиш EURO {1г/1100 RUB}')
krasnodar.row('MEPHEDRONE крис {1г/2100 RUB}')
krasnodar.row('Амфетамин HQ {2г/2400 RUB}')
krasnodar.row('Шишки OG Kush {1г/1200 RUB}')
krasnodar.row('Героин HQ {0.5г/1700 RUB}')
saratov_rayons = telebot.types.ReplyKeyboardMarkup(True)
saratov_rayons.row('🔹 Волжский')
saratov_rayons.row('🔹 Заводской')
saratov_rayons.row('🔹 Кировский')
saratov_rayons.row('🔹 Ленинский')
saratov_rayons.row('🔹 Октябрьский')
saratov_rayons.row('🔹 Фрунзенский')
saratov = telebot.types.ReplyKeyboardMarkup(True)
saratov.row('СК (син.крис) {0.3г/900 RUB}')
saratov.row('СК (син.крис) {0.5г/1300 RUB}')
saratov.row('СК (син.крис) {1г/2200 RUB}')
saratov.row('Гашиш EURO {1г/1100 RUB}')
saratov.row('Гашиш EURO {2г/2000 RUB}')
saratov.row('Гашиш EURO {5г/4000 RUB}')
saratov.row('MEPHEDRONE крис {1г/2100 RUB}')
saratov.row('Амфетамин HQ {2г/2400 RUB}')
saratov.row('Шишки OG Kush {1г/1200 RUB}')
saratov.row('Шишки OG Kush {2г/2200 RUB}')
saratov.row('Шишки OG Kush {5г/4200 RUB}')
saratov.row('РОСС {5г/3000 RUB}')
saratov.row('Героин HQ {0.5г/1700 RUB}')
tymen_rayons = telebot.types.ReplyKeyboardMarkup(True)
tymen_rayons.row('🔹 Восточный')
tymen_rayons.row('🔹 Калининский')
tymen_rayons.row('🔹 Ленинский')
tymen_rayons.row('🔹 Центральный')
tymen = telebot.types.ReplyKeyboardMarkup(True)
tymen.row('СК (син.крис) {0.5г/1300 RUB}')
tymen.row('СК (син.крис) {1г/2200 RUB}')
tymen.row('Гашиш EURO {1г/1100 RUB}')
tymen.row('MEPHEDRONE крис {1г/2100 RUB}')
tymen.row('Амфетамин HQ {2г/2400 RUB}')
tymen.row('Шишки OG Kush {1г/1200 RUB}')
tymen.row('Героин HQ {0.5г/1700 RUB}')
barnaul_rayons = telebot.types.ReplyKeyboardMarkup(True)
barnaul_rayons.row('🔹 Железнодорожный')
barnaul_rayons.row('🔹 Ленинский')
barnaul_rayons.row('🔹 Индустриальный')
barnaul_rayons.row('🔹 Октябрьский')
barnaul_rayons.row('🔹 Центральный')
barnaul = telebot.types.ReplyKeyboardMarkup(True)
barnaul.row('СК (син.крис) {0.3г/900 RUB}')
barnaul.row('СК (син.крис) {0.5г/1300 RUB}')
barnaul.row('СК (син.крис) {1г/2200 RUB}')
barnaul.row('Гашиш EURO {1г/1100 RUB}')
barnaul.row('Гашиш EURO {2г/2000 RUB}')
barnaul.row('Гашиш EURO {5г/4000 RUB}')
barnaul.row('MEPHEDRONE крис {1г/2100 RUB}')
barnaul.row('Амфетамин HQ {2г/2400 RUB}')
barnaul.row('Шишки OG Kush {1г/1200 RUB}')
barnaul.row('Шишки OG Kush {2г/2200 RUB}')
barnaul.row('Шишки OG Kush {5г/4200 RUB}')
barnaul.row('РОСС {5г/3000 RUB}')
barnaul.row('Героин HQ {0.5г/1700 RUB}')
irkytsk_rayons = telebot.types.ReplyKeyboardMarkup(True)
irkytsk_rayons.row('🔹 Ленинский округ')
irkytsk_rayons.row('🔹 Октябрьский округ')
irkytsk_rayons.row('🔹 Правобережный округ')
irkytsk_rayons.row('🔹 Свердловский округ')
irkytsk = telebot.types.ReplyKeyboardMarkup(True)
irkytsk.row('СК (син.крис) {0.5г/1300 RUB}')
irkytsk.row('СК (син.крис) {1г/2200 RUB}')
irkytsk.row('Гашиш EURO {1г/1100 RUB}')
irkytsk.row('MEPHEDRONE крис {1г/2100 RUB}')
irkytsk.row('Амфетамин HQ {2г/2400 RUB}')
irkytsk.row('Шишки OG Kush {1г/1200 RUB}')
irkytsk.row('Героин HQ {0.5г/1700 RUB}')
yaroslavl_rayons = telebot.types.ReplyKeyboardMarkup(True)
yaroslavl_rayons.row('🔹 Дзержинский')
yaroslavl_rayons.row('🔹 Заволжский')
yaroslavl_rayons.row('🔹 Кировский')
yaroslavl_rayons.row('🔹 Красноперекопский')
yaroslavl_rayons.row('🔹 Ленинский')
yaroslavl_rayons.row('🔹 Фрунзенский')
yaroslavl = telebot.types.ReplyKeyboardMarkup(True)
yaroslavl.row('СК (син.крис) {0.3г/900 RUB}')
yaroslavl.row('СК (син.крис) {0.5г/1300 RUB}')
yaroslavl.row('СК (син.крис) {1г/2200 RUB}')
yaroslavl.row('Гашиш EURO {1г/1100 RUB}')
yaroslavl.row('Гашиш EURO {2г/2000 RUB}')
yaroslavl.row('Гашиш EURO {5г/4000 RUB}')
yaroslavl.row('MEPHEDRONE крис {1г/2100 RUB}')
yaroslavl.row('Амфетамин HQ {2г/2400 RUB}')
yaroslavl.row('Шишки OG Kush {1г/1200 RUB}')
yaroslavl.row('Шишки OG Kush {2г/2200 RUB}')
yaroslavl.row('Шишки OG Kush {5г/4200 RUB}')
yaroslavl.row('РОСС {5г/3000 RUB}')
yaroslavl.row('Героин HQ {0.5г/1700 RUB}')
vladivostok_rayons = telebot.types.ReplyKeyboardMarkup(True)
vladivostok_rayons.row('🔹 Ленинский')
vladivostok_rayons.row('🔹 Первомайский')
vladivostok_rayons.row('🔹 Первореченский')
vladivostok_rayons.row('🔹 Советский')
vladivostok_rayons.row('🔹 Фрунзенский')
vladivostok = telebot.types.ReplyKeyboardMarkup(True)
vladivostok.row('СК (син.крис) {0.3г/900 RUB}')
vladivostok.row('СК (син.крис) {0.5г/1300 RUB}')
vladivostok.row('СК (син.крис) {1г/2200 RUB}')
vladivostok.row('Гашиш EURO {1г/1100 RUB}')
vladivostok.row('Гашиш EURO {2г/2000 RUB}')
vladivostok.row('Гашиш EURO {5г/4000 RUB}')
vladivostok.row('MEPHEDRONE крис {1г/2100 RUB}')
orenburb_rayons = telebot.types.ReplyKeyboardMarkup(True)
orenburb_rayons.row('🔹 Дзержинский')
orenburb_rayons.row('🔹 Ленинский')
orenburb_rayons.row('🔹 Промышленный')
orenburb_rayons.row('🔹 Центральный')
orenburb = telebot.types.ReplyKeyboardMarkup(True)
orenburb.row('СК (син.крис) {0.3г/900 RUB}')
orenburb.row('СК (син.крис) {0.5г/1300 RUB}')
orenburb.row('СК (син.крис) {1г/2200 RUB}')
orenburb.row('Гашиш EURO {1г/1100 RUB}')
orenburb.row('Гашиш EURO {2г/2000 RUB}')
orenburb.row('Гашиш EURO {5г/4000 RUB}')
orenburb.row('MEPHEDRONE крис {1г/2100 RUB}')
tomsk_rayons = telebot.types.ReplyKeyboardMarkup(True)
tomsk_rayons.row('🔹 Кировский')
tomsk_rayons.row('🔹 Ленинский')
tomsk_rayons.row('🔹 Октябрьский')
tomsk_rayons.row('🔹 Советский')
tomsk = telebot.types.ReplyKeyboardMarkup(True)
tomsk.row('СК (син.крис) {0.3г/900 RUB}')
tomsk.row('СК (син.крис) {0.5г/1300 RUB}')
tomsk.row('СК (син.крис) {1г/2200 RUB}')
tomsk.row('Гашиш EURO {1г/1100 RUB}')
tomsk.row('Гашиш EURO {2г/2000 RUB}')
tomsk.row('Гашиш EURO {5г/4000 RUB}')
tomsk.row('MEPHEDRONE крис {1г/2100 RUB}')
tomsk.row('Амфетамин HQ {2г/2400 RUB}')
tomsk.row('Шишки OG Kush {1г/1200 RUB}')
tomsk.row('Шишки OG Kush {2г/2200 RUB}')
tomsk.row('Шишки OG Kush {5г/4200 RUB}')
tomsk.row('РОСС {5г/3000 RUB}')
tomsk.row('Героин HQ {0.5г/1700 RUB}')
volgograd_rayons = telebot.types.ReplyKeyboardMarkup(True)
volgograd_rayons.row('🔹 Ворошиловский')
volgograd_rayons.row('🔹 Дзержинский')
volgograd_rayons.row('🔹 Кировский')
volgograd_rayons.row('🔹 Красноармейский')
volgograd_rayons.row('🔹 Краснооктябрьский')
volgograd_rayons.row('🔹 Советский')
volgograd_rayons.row('🔹 Тракторозаводский')
volgograd_rayons.row('🔹 Центральный')
volgograd = telebot.types.ReplyKeyboardMarkup(True)
volgograd.row('СК (син.крис) {0.3г/800 RUB}')
volgograd.row('СК (син.крис) {0.5г/1200 RUB}')
volgograd.row('СК (син.крис) {1г/2000 RUB}')
volgograd.row('Гашиш EURO {1г/1100 RUB}')
volgograd.row('Гашиш EURO {2г/2000 RUB}')
volgograd.row('Гашиш EURO {5г/4000 RUB}')
volgograd.row('MEPHEDRONE крис {1г/2100 RUB}')
volgograd.row('Амфетамин HQ {1г/1500 RUB}')
volgograd.row('Шишки OG Kush {1г/1200 RUB}')
volgograd.row('Шишки OG Kush {2г/2200 RUB}')
volgograd.row('Шишки OG Kush {5г/4200 RUB}')
volgograd.row('РОСС {5г/3000 RUB}')
volgograd.row('Героин HQ {0.5г/1700 RUB}')
voroneg_rayons = telebot.types.ReplyKeyboardMarkup(True)
voroneg_rayons.row('🔹 Железнодорожный')
voroneg_rayons.row('🔹 Коминтерновский')
voroneg_rayons.row('🔹 Левобережный')
voroneg_rayons.row('🔹 Ленинский')
voroneg_rayons.row('🔹 Советский')
voroneg_rayons.row('🔹 Центральный')
voroneg = telebot.types.ReplyKeyboardMarkup(True)
voroneg.row('СК (син.крис) {0.3г/900 RUB}')
voroneg.row('СК (син.крис) {0.5г/1300 RUB}')
voroneg.row('СК (син.крис) {1г/2200 RUB}')
voroneg.row('Гашиш EURO {1г/1100 RUB}')
voroneg.row('Гашиш EURO {2г/2000 RUB}')
voroneg.row('Гашиш EURO {5г/4000 RUB}')
voroneg.row('MEPHEDRONE крис {1г/2100 RUB}')
voroneg.row('Амфетамин HQ {2г/2400 RUB}')
voroneg.row('Шишки OG Kush {1г/1200 RUB}')
voroneg.row('Шишки OG Kush {2г/2200 RUB}')
voroneg.row('Шишки OG Kush {5г/4200 RUB}')
voroneg.row('РОСС {5г/3000 RUB}')
voroneg.row('Героин HQ {0.5г/1700 RUB}')
perm_rayons = telebot.types.ReplyKeyboardMarkup(True)
perm_rayons.row('🔹 Свердловский')
perm_rayons.row('🔹 Орджоникидзевский')
perm_rayons.row('🔹 Кировский')
perm_rayons.row('🔹 Мотовилихинский')
perm_rayons.row('🔹 Индустриальный')
perm_rayons.row('🔹 Дзержинский')
perm_rayons.row('🔹 Ленинский')
perm = telebot.types.ReplyKeyboardMarkup(True)
perm.row('СК (син.крис) {0.3г/900 RUB}')
perm.row('СК (син.крис) {0.5г/1300 RUB}')
perm.row('СК (син.крис) {1г/2200 RUB}')
perm.row('Гашиш EURO {1г/1100 RUB}')
perm.row('Гашиш EURO {2г/2000 RUB}')
perm.row('Гашиш EURO {5г/4000 RUB}')
perm.row('MEPHEDRONE крис {1г/2100 RUB}')
perm.row('Амфетамин HQ {2г/2400 RUB}')
perm.row('Шишки OG Kush {1г/1200 RUB}')
perm.row('Шишки OG Kush {2г/2200 RUB}')
perm.row('Шишки OG Kush {5г/4200 RUB}')
perm.row('РОСС {5г/3000 RUB}')
perm.row('Героин HQ {0.5г/1700 RUB}')
ufa_rayons = telebot.types.ReplyKeyboardMarkup(True)
ufa_rayons.row('🔹 Кировский')
ufa_rayons.row('🔹 Советский')
ufa_rayons.row('🔹 Ленинский')
ufa_rayons.row('🔹 Демский')
ufa_rayons.row('🔹 Орджоникидзевский')
ufa_rayons.row('🔹 Октябрьский')
ufa_rayons.row('🔹 Калининский')
ufa = telebot.types.ReplyKeyboardMarkup(True)
ufa.row('СК (син.крис) {0.3г/900 RUB}')
ufa.row('СК (син.крис) {0.5г/1300 RUB}')
ufa.row('СК (син.крис) {1г/2200 RUB}')
ufa.row('Гашиш EURO {1г/1100 RUB}')
ufa.row('Гашиш EURO {2г/2000 RUB}')
ufa.row('Гашиш EURO {5г/4000 RUB}')
ufa.row('MEPHEDRONE крис {1г/2100 RUB}')
krasnoyarsk_rayons = telebot.types.ReplyKeyboardMarkup(True)
krasnoyarsk_rayons.row('🔹 Железнодорожный')
krasnoyarsk_rayons.row('🔹 Кировский')
krasnoyarsk_rayons.row('🔹 Ленинский')
krasnoyarsk_rayons.row('🔹 Октябрьский')
krasnoyarsk_rayons.row('🔹 Свердловский')
krasnoyarsk_rayons.row('🔹 Советский')
krasnoyarsk_rayons.row('🔹 Центральный')
krasnoyarsk = telebot.types.ReplyKeyboardMarkup(True)
krasnoyarsk.row('СК (син.крис) {0.3г/900 RUB}')
krasnoyarsk.row('СК (син.крис) {0.5г/1300 RUB}')
krasnoyarsk.row('СК (син.крис) {1г/2200 RUB}')
krasnoyarsk.row('Гашиш EURO {1г/1100 RUB}')
krasnoyarsk.row('Гашиш EURO {2г/2000 RUB}')
krasnoyarsk.row('MEPHEDRONE крис {1г/2100 RUB}')
toliyati_rayons = telebot.types.ReplyKeyboardMarkup(True)
toliyati_rayons.row('🔹 Автозаводский')
toliyati_rayons.row('🔹 Комсомольский')
toliyati_rayons.row('🔹 Центральный')
toliyati = telebot.types.ReplyKeyboardMarkup(True)
toliyati.row('СК (син.крис) {0.3г/900 RUB}')
toliyati.row('СК (син.крис) {0.5г/1300 RUB}')
toliyati.row('СК (син.крис) {1г/2200 RUB}')
toliyati.row('Гашиш EURO {1г/1100 RUB}')
toliyati.row('Гашиш EURO {2г/2000 RUB}')
toliyati.row('MEPHEDRONE крис {1г/2100 RUB}')
toliyati.row('РОСС {5г/3000 RUB}')
toliyati.row('Героин HQ {0.5г/1700 RUB}')
feodisiya_rayons = telebot.types.ReplyKeyboardMarkup(True)
feodisiya_rayons.row('🔹 Береговое')
feodisiya_rayons.row('🔹 Щебетовка')
feodisiya_rayons.row('🔹 Орджоникидзе')
feodisiya = telebot.types.ReplyKeyboardMarkup(True)
feodisiya.row('СК (син.крис) {0.3г/900 RUB}')
feodisiya.row('СК (син.крис) {0.5г/1300 RUB}')
feodisiya.row('СК (син.крис) {1г/2200 RUB}')
feodisiya.row('Гашиш EURO {1г/1100 RUB}')
feodisiya.row('Гашиш EURO {2г/2000 RUB}')
feodisiya.row('MEPHEDRONE крис {1г/2100 RUB}')
feodisiya.row('РОСС {5г/3000 RUB}')
feodisiya.row('Героин HQ {0.5г/1700 RUB}')
timashevsk = telebot.types.ReplyKeyboardMarkup(True)
timashevsk.row('СК (син.крис) {0.3г/900 RUB}')
timashevsk.row('СК (син.крис) {0.5г/1300 RUB}')
timashevsk.row('СК (син.крис) {1г/2200 RUB}')
timashevsk.row('Гашиш EURO {2г/2000 RUB}')
timashevsk.row('MEPHEDRONE крис {1г/2100 RUB}')
timashevsk.row('РОСС {5г/3000 RUB}')
kategor1 = telebot.types.ReplyKeyboardMarkup(True)
kategor1.row('СК (син.крис)','Гашиш')
kategor1.row('MEPHEDRONE крис','РОСС')
oplata = telebot.types.ReplyKeyboardMarkup(True)
oplata.row('Qiwi','Bitcoin')
oplata_oplatil = telebot.types.ReplyKeyboardMarkup(True)
oplata_oplatil.row('Оплатил','Отменить')
admin = telebot.types.ReplyKeyboardMarkup(True)
admin.row('Изменить Bitcoin','Изменить Qiwi')
admin.row('Количество пользователей')
admin.row('🔹 Главная')
yes_no = telebot.types.ReplyKeyboardMarkup(True)
yes_no.row('Да','Нет') | [
"noreply@github.com"
] | noreply@github.com |
d1ca2a52b83d8def8c1aa10f303e6cad817df346 | 41a20700b5bb351d20562ac23ec4db06bc96f0d7 | /src/fg/tv_metrics.py | f38e38ae3fce02c994c9be7c9605523073f0d3f0 | [] | no_license | kedz/noiseylg | ee0c54634767e8d3789b4ffb93727988c29c6979 | 17266e1a41e33aecb95dc1c3aca68f6bccee86d5 | refs/heads/master | 2020-07-30T11:22:08.351759 | 2019-10-30T21:33:11 | 2019-10-30T21:33:11 | 210,212,253 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,987 | py | from plum.types import register, PlumModule, HP, props
from subprocess import check_output
from queue import Queue
from threading import Thread
from pathlib import Path
from tempfile import NamedTemporaryFile
import json
import d2t.preprocessing.tvs as preproc
@register("metrics.tv_metrics")
class TVMetrics(PlumModule):
path = HP(type=props.EXISTING_PATH)
search_fields = HP()
references_fields = HP()
def __pluminit__(self):
self._cache = None
self._queue = Queue(maxsize=0)
self._thread = None
self._thread = Thread(target=self._process_result)
self._thread.setDaemon(True)
self._thread.start()
self._hyp_fp = NamedTemporaryFile("w")
self._ref_fp = NamedTemporaryFile("w")
def postprocess(self, tokens, mr):
# TODO right now this is specific to the e2e dataset. Need to
# generalize how to do post processing.
tokens = [t for t in tokens if t[0] != "<" and t[-1] != ">"]
text = " ".join(tokens)
return preproc.lexicalize(text, mr)
def _process_result(self):
while True:
hyp, refs, mr = self._queue.get()
print(self.postprocess(hyp, mr), file=self._hyp_fp)
#print(" ".join(hyp), file=self._hyp_fp)
if isinstance(refs, (list, tuple)):
refs = "\n".join(refs)
print(refs, file=self._ref_fp, end="\n\n")
self._queue.task_done()
def reset(self):
self._cache = None
while not self._queue.empty():
self._queue.get()
self._queue.task_done()
self._hyp_fp = NamedTemporaryFile("w")
self._ref_fp = NamedTemporaryFile("w")
def apply_fields(self, fields, obj):
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
if hasattr(field, "__call__"):
obj = field(obj)
else:
obj = obj[field]
return obj
def forward(self, forward_state, batch):
search = self.apply_fields(self.search_fields, forward_state)
hypotheses = search.output()
reference_sets = self.apply_fields(self.references_fields, batch)
for i, (hyp, refs) in enumerate(zip(hypotheses, reference_sets)):
self._queue.put([hyp, refs, batch["mr"][i]])
def run_script(self):
self._queue.join()
self._ref_fp.flush()
self._hyp_fp.flush()
script_path = Path(self.path).resolve()
result_bytes = check_output(
[str(script_path), self._hyp_fp.name, self._ref_fp.name])
result = json.loads(result_bytes.decode("utf8"))
self._cache = result
self._ref_fp = None
self._hyp_fp = None
def compute(self):
if self._cache is None:
self.run_script()
return self._cache
def pretty_result(self):
return str(self.compute())
| [
"kedzie@cs.columbia.edu"
] | kedzie@cs.columbia.edu |
0a101df3b11fa31f2f9270e4eb622a88f96554f3 | 41c605bf3a002a757cb2344cff526d7a7ae56ea9 | /plotly/validators/scattercarpet/selected/marker/__init__.py | 67542f2ea7f75af48003f76f0d057af6429e1e4c | [
"MIT"
] | permissive | Jonathan-MW/plotly.py | 9674b90b5de11fd9089e6afefd04b57bc4587829 | 7528c00772f44dee24c0df7e15d70a4852f171a8 | refs/heads/master | 2020-05-30T06:04:13.621478 | 2019-05-31T10:34:15 | 2019-05-31T10:34:15 | 189,571,988 | 2 | 0 | MIT | 2019-05-31T09:59:53 | 2019-05-31T09:59:53 | null | UTF-8 | Python | false | false | 1,616 | py |
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='size',
parent_name='scattercarpet.selected.marker',
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='opacity',
parent_name='scattercarpet.selected.marker',
**kwargs
):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
max=kwargs.pop('max', 1),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='scattercarpet.selected.marker',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| [
"noreply@github.com"
] | noreply@github.com |
25747ed2dbcbe8df7ee0fa834287c49b2acb669d | f93acaa904fdd780f4d28aea92b8794b0014b762 | /imitate/train_rough.py | 9808894b37f94ccf8ee1f2f0f09f1efe424a0553 | [] | no_license | KChikai/seq2seq-examples | c898a80cc3c011bc51fbde35acf93b7c27a539b8 | 6e69c061182c9e00fce31a2b4b720e338fab0ad0 | refs/heads/master | 2021-09-01T10:32:05.829308 | 2017-12-26T13:46:42 | 2017-12-26T13:46:42 | 109,656,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,120 | py | # -*- coding:utf-8 -*-
"""
Sample script of Sequence to Sequence model for ChatBot.
This is a train script for seq2seq.py
You can also use Batch and GPU.
args: --gpu (flg of GPU, if you want to use GPU, please write "--gpu 1")
単語次元:1024,隠れ層:2048
単語語彙数:25000
目的関数:Adam, 関数の初期化をエポック毎に行う
"""
import os
os.environ["CHAINER_TYPE_CHECK"] = "0"
import glob
import pickle
import argparse
import numpy as np
import chainer
from chainer import cuda, optimizers, serializers
from tuning_util import JaConvCorpus
from seq2seq import Seq2Seq
# parse command line args
parser = argparse.ArgumentParser()
parser.add_argument('--data', '-d', default='./data/rough_pair_corpus.txt', type=str, help='Data file directory')
parser.add_argument('--gpu', '-g', default='-1', type=int, help='GPU ID (negative value indicates CPU)')
parser.add_argument('--epoch', '-e', default=200, type=int, help='number of epochs to learn')
parser.add_argument('--feature_num', '-f', default=1024, type=int, help='dimension of feature layer')
parser.add_argument('--hidden_num', '-hi', default=2048, type=int, help='dimension of hidden layer')
parser.add_argument('--batchsize', '-b', default=100, type=int, help='learning minibatch size')
parser.add_argument('--testsize', '-t', default=1000, type=int, help='number of text for testing a model')
parser.add_argument('--lang', '-l', default='ja', type=str, help='the choice of a language (Japanese "ja" or English "en" )')
args = parser.parse_args()
# GPU settings
gpu_device = args.gpu
if args.gpu >= 0:
cuda.check_cuda_available()
cuda.get_device(gpu_device).use()
xp = cuda.cupy if args.gpu >= 0 else np
data_file = args.data
n_epoch = args.epoch
feature_num = args.feature_num
hidden_num = args.hidden_num
batchsize = args.batchsize
testsize = args.testsize
def main():
###########################
#### create dictionary ####
###########################
if os.path.exists('./data/corpus/dictionary.dict'):
corpus = JaConvCorpus(file_path=None, batch_size=batchsize, size_filter=True)
corpus.load(load_dir='./data/corpus/')
else:
corpus = JaConvCorpus(file_path=data_file, batch_size=batchsize, size_filter=True)
corpus.save(save_dir='./data/corpus/')
print('Vocabulary Size (number of words) :', len(corpus.dic.token2id))
######################
#### create model ####
######################
model = Seq2Seq(len(corpus.dic.token2id), feature_num=feature_num,
hidden_num=hidden_num, batch_size=batchsize, gpu_flg=args.gpu)
if args.gpu >= 0:
model.to_gpu()
##########################
#### create ID corpus ####
##########################
test_input_mat = []
test_output_mat = []
train_input_mats = []
train_output_mats = []
if not os.path.exists('./data/corpus/input_mat0.npy'):
print("You don't have any input matrix. You should run 'preprocess.py' before you run this script.")
raise ValueError
else:
for index, text_name in enumerate(glob.glob('data/corpus/input_mat*')):
batch_input_mat = np.load(text_name)
if index == 0:
# separate corpus into Train and Test
perm = np.random.permutation(batch_input_mat.shape[1])
test_input_mat = batch_input_mat[:, perm[0:0 + testsize]]
train_input_mats.append(batch_input_mat[:, perm[testsize:]])
else:
train_input_mats.append(batch_input_mat)
for index, text_name in enumerate(glob.glob('data/corpus/output_mat*')):
batch_output_mat = np.load(text_name)
if index == 0:
# separate corpus into Train and Test
test_output_mat = batch_output_mat[:, perm[0:0 + testsize]]
train_output_mats.append(batch_output_mat[:, perm[testsize:]])
else:
train_output_mats.append(batch_output_mat)
list_of_references = []
for text_ndarray in test_output_mat.T:
reference = text_ndarray.tolist()
references = [[w_id for w_id in reference if w_id is not -1]]
list_of_references.append(references)
#############################
#### train seq2seq model ####
#############################
matrix_row_size = train_input_mats[0].shape[1] - testsize
accum_loss = 0
train_loss_data = []
for num, epoch in enumerate(range(n_epoch)):
total_loss = test_loss = batch_num = 0
# initialize optimizer
optimizer = optimizers.Adam(alpha=0.001)
optimizer.setup(model)
# optimizer.add_hook(chainer.optimizer.GradientClipping(5))
optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))
# for training by each corpus matrix
for mat_index in range(len(train_input_mats)):
perm = np.random.permutation(matrix_row_size)
# by each batch size
for i in range(0, matrix_row_size, batchsize):
# select batch data
input_batch = train_input_mats[mat_index][:, perm[i:i + batchsize]]
output_batch = train_output_mats[mat_index][:, perm[i:i + batchsize]]
# Encode a sentence
model.initialize() # initialize cell
model.encode(input_batch, train=True) # encode (output: hidden Variable)
# Decode from encoded context
end_batch = xp.array([corpus.dic.token2id["<start>"] for _ in range(batchsize)])
first_words = output_batch[0]
loss, predict_mat = model.decode(end_batch, first_words, train=True)
next_ids = first_words
accum_loss += loss
for w_ids in output_batch[1:]:
loss, predict_mat = model.decode(next_ids, w_ids, train=True)
next_ids = w_ids
accum_loss += loss
# learn model
model.cleargrads()
accum_loss.backward()
optimizer.update()
total_loss += float(accum_loss.data)
print('Epoch: ', num, 'Matrix_num: ', mat_index, 'Batch_num', batch_num,
'batch loss: {:.2f}'.format(float(accum_loss.data)))
batch_num += 1
accum_loss = 0
# save model and optimizer
if (epoch + 1) % 5 == 0:
print('-----', epoch + 1, ' times -----')
print('save the model and optimizer')
serializers.save_hdf5('data/' + str(epoch) + '_rough.model', model)
serializers.save_hdf5('data/' + str(epoch) + '_rough.state', optimizer)
# display the on-going status
print('Epoch: ', num,
'Train loss: {:.2f}'.format(total_loss))
train_loss_data.append(float(total_loss / batch_num))
# save loss data
with open('./data/rough_loss_train_data.pkl', 'wb') as f:
pickle.dump(train_loss_data, f)
if __name__ == "__main__":
main() | [
"tokoroten0401@gmail.com"
] | tokoroten0401@gmail.com |
ca3084e1c7ae03fd9680cca4f6fa0bbcafd079d7 | 89e8383393fd69ee8c60c392131f1707e36e476b | /variable.py | 351ee054a921f9b6f16ed993a561b69c23c5ea17 | [] | no_license | vaarigupta/python_practice | ce011196576171bfb0b43cf8e37a21784d5361a4 | d642649a0835b3a991961faf3c8bd34434d06080 | refs/heads/master | 2020-03-12T23:21:33.512791 | 2018-04-24T14:12:23 | 2018-04-24T14:12:23 | 130,864,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | Python 3.6.4rc1 (v3.6.4rc1:3398dcb, Dec 5 2017, 20:41:32) [MSC v.1900 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> no1 = 1
>>> no2 = 2
>>> print("number1:",no1, ", number2 :", no2)
number1: 1 , number2 : 2
>>>
| [
"cutievaarigupta@gmail.com"
] | cutievaarigupta@gmail.com |
67537101f00bc797bd0d8de976e3d5c87cdc51dc | 5c0afded80cd9d773ae13a9558867716f8878e98 | /evernote_oauth_sample/wsgi_aws.py | b9b6e96a0ec79d02b9d20f547d1a6e6151ea14b2 | [] | no_license | teraijun/EN_Markdown | 3230c3043e963f7bf0a17c684897bd53f9e22573 | 016b251ee5bbd9fc64c4bcebb8efbd4268cdd5b8 | refs/heads/master | 2021-01-13T01:28:20.254636 | 2015-09-01T05:50:15 | 2015-09-01T05:50:15 | 33,945,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # -*- coding: utf-8 -*-
import os
import site
import sys
# virtualenvのパッケージパス
site.addsitedir("/home/ec2-user/ENV/lib/python2.7/site-packages")
sys.path.append('/var/www/cgi-bin/EN_Markdown')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "evernote_oauth_sample.settings")
# virtualenvの実行コードのパス
activate_env = os.path.expanduser("/home/ec2-user/ENV/bin/activate_this.py")
execfile(activate_env, dict(__file__=activate_env))
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application() | [
"teraijun48@gmail.com"
] | teraijun48@gmail.com |
9d7d16c4a73674e00426099c87f36ac5e20d778f | 60a4f0fa5c8239edbb4cd4390e3b4a7d70c919de | /user/migrations/0006_auto_20190805_2145.py | 54fac07db74e7b1545406f8ec51ded054071913c | [] | no_license | DuncanMoyo/Developer-Portfolio | cca6cbe29e13bddbf56584e400cbd169a515c047 | 9aa8dcef123b3144d9bf2c34a19f4c65c193ac98 | refs/heads/master | 2022-12-09T17:14:42.865413 | 2019-08-09T03:55:21 | 2019-08-09T03:55:21 | 200,691,837 | 0 | 0 | null | 2022-12-08T05:59:41 | 2019-08-05T16:31:39 | CSS | UTF-8 | Python | false | false | 497 | py | # Generated by Django 2.2.4 on 2019-08-05 19:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0005_auto_20190805_2144'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='skill_level',
),
migrations.AddField(
model_name='skill',
name='skill_level',
field=models.IntegerField(default=0),
),
]
| [
"duncanfmoyo@gmail.com"
] | duncanfmoyo@gmail.com |
daeb1f7fa521beacd31aac5043452165dbd73f54 | 6a0df64813594b7d1bd9f526eb828c2625516f17 | /MNISTTask.py | a1eb3da83a7dbc9f4630b2fb922494e5dd600790 | [] | no_license | qiulingxu/Automatic-Machine-Learning | 30f8bfaf05cd77a61879b763c4144cf5ed7b1dd8 | a5a7b1364c014e36a3100da437d84f58308e5d7e | refs/heads/master | 2021-09-16T00:05:50.105553 | 2018-06-13T11:51:42 | 2018-06-13T11:51:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,176 | py | import tensorflow as tf
import logging
from mnist import MNIST
import datetime
from Graph import Graph
from QLearning import QLearning
from ImageOperators import *#ConcatOperator,ImageInput,Conv2DFactory,PoolingFactory,TransConv2DFactory,ActivationFactory,BinaryOpFactory,ReuseFactory,DenseFactory,ConcatOperatorDense
import numpy as np
import os
mndata = MNIST('./')
mndata.gz = True
images, labels = mndata.load_training()
images = np.array(images,dtype=np.float32)
images=images.reshape([-1,28,28,1])
labels= np.array(labels,dtype=np.int32)
print ("Input Shape",images.shape,labels.shape)
###### Experiment Attributes
OperatorLimit=25
BatchSize=64
OperatorSupport=[]
MNIST_IMAGE_WIDTH=28
MNIST_IMAGE_HEIGHT=28
TrainEpochs=10000
######
Op_List=[]
#Convolution
Op_List.append(Conv2DFactory(Size=4,ChannelCoef=2,Stride=1))
Op_List.append(Conv2DFactory(Size=3,ChannelCoef=1,Stride=2))
#Op_List.append(Conv2DFactory(Size=2,ChannelCoef=0.5,Stride=1))
Op_List.append(Conv2DFactory(Size=2,ChannelCoef=1,Stride=1))
#Op_List.append(Conv2DFactory(Size=2,ChannelCoef=0.5,Stride=2))
#Trans Convolution
#Op_List.append(TransConv2DFactory(Size=3,ChannelCoef=2,Stride=1))
#Op_List.append(TransConv2DFactory(Size=3,ChannelCoef=0.5,Stride=1))
#Op_List.append(TransConv2DFactory(Size=2,ChannelCoef=2,Stride=2,ImageCoef=2))
#Op_List.append(TransConv2DFactory(Size=2,ChannelCoef=0.5,Stride=2,ImageCoef=2))
#Dense
Op_List.append(DenseFactory(HiddenNumCoef=2))
Op_List.append(DenseFactory(HiddenNumCoef=1))
Op_List.append(DenseFactory(HiddenNumCoef=0.5))
#Reuse
Op_List.append(ReuseFactory(OutputNum=2))
#Binary_Op
Op_List.append(BinaryOpFactory(Type='Concat'))
Op_List.append(BinaryOpFactory(Type='Add'))
#Pooling
Op_List.append(PoolingFactory(Size=2,Stride=2,Type='Max'))
Op_List.append(PoolingFactory(Size=2,Stride=2,Type='Avg'))
#Activation
Op_List.append(ActivationFactory(Type='Relu'))
Op_List.append(ActivationFactory(Type='Tanh'))
def NetworkDecor(Input,Labels):
if Input.shape.as_list()[1:]!=get_size_except_dim(Input):
Reshape=tf.reshape(Input,shape=[BatchSize,get_size_except_dim(Input)])
Output=tf.layers.dense(inputs=Reshape,units=10,activation=None)
Labels=tf.cast(tf.reshape(Labels,shape=[BatchSize]),tf.int64)
#OneHotLabels=tf.one_hot(Labels,depth=10,axis=-1)
Loss=tf.losses.sparse_softmax_cross_entropy(labels=Labels,logits=Output)
Acc=tf.reduce_mean(tf.cast(tf.equal(Labels, tf.argmax(Output,1)),tf.float32))
#print(Loss,Loss.shape.as_list())
#exit()
#Loss=tf.reshape(Loss,shape=[-1,1])
return Output,Loss,Acc
Mode="Train"
RL_Exp=QLearning()
TaskSpec={ "LogHistory":True,
"OperatorList":Op_List,
"OperatorNum":OperatorLimit,
"InputNum":1,
"OutputNum":1,
"TaskInput":images,
"TaskLabel":labels,
"Epochs":TrainEpochs,
"NetworkDecor":NetworkDecor,
"BatchSize":BatchSize,
"ConcatOperator":ConcatOperatorDense,
"InputOperator":ImageInput,
"TrajectoryLength":OperatorLimit-4,
"RewardGamma":0.9
}
if Mode=="Train":
logging.getLogger().setLevel(logging.DEBUG)
now = datetime.datetime.now()
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='%s_%s.log'%("MNISTTask",now.strftime("%Y-%m-%d %H-%M")),
filemode='w')
RL_Exp.StartTrial(TaskSpec)
elif Mode=="TestBuildGraph":
OptionList=[[1, 1, 1, 0],[11, 1, 1, 1],[13,1,1,2],[2, 1, 1, 3],[12, 1, 1, 4],[13,1,1,5],[6, 1, 1, 6]]
RL_Exp.DebugTrainNet(TaskSpec,OptionList)
elif Mode=="TestUnifiedTrans":
g=Graph(10,Op_List,1,1,ConcatOperatorDense,InputOperator=ImageInput)
OptionList=[[1, 1, 1, 0],[11, 1, 1, 1],[13,1,1,2],[2, 1, 1, 3],[12, 1, 1, 4],[13,1,1,5],[6, 1, 1, 6]]
np.set_printoptions(threshold=np.inf)
for Option in OptionList:
g.ApplyOption(Option)
print(np.array(g.UnifiedTransform("3D_NoNull").astype(np.int8))) | [
"v-qiux@microsoft.com"
] | v-qiux@microsoft.com |
b48cbc34229e604e32f551d252f74916fe277a3e | b789bf78ffe684782da7eed9df9d88a62d13ad82 | /pyannote/database/protocol/__init__.py | d9f270782593bbe36e7e6fabe7d6039e4a1d5979 | [
"MIT"
] | permissive | yinruiqing/pyannote-database | 8d77678efec06ffb797716e28b4673f1d5ec6453 | 731593b57082e675e0f661f6211f2dd261807561 | refs/heads/master | 2020-12-02T06:45:29.029202 | 2017-06-28T13:12:26 | 2017-06-28T13:12:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,320 | py | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from .speaker_diarization import SpeakerDiarizationProtocol
from .speaker_recognition import SpeakerRecognitionProtocol
| [
"bredin@limsi.fr"
] | bredin@limsi.fr |
c561c267d998bdcd11ae388b64d3370452b1914d | b1737e32922d045a7339ab672a04b82978da7608 | /Class1-Numbers.py | bae681a428451a0461e8128de9bda59f9156de1a | [] | no_license | vnaditya/python | fe7505c7432a9234e3dfaa2b9a23a20e48e62120 | a7c5698fe350f3ae60d78481b8736d4517268981 | refs/heads/master | 2022-11-26T05:18:10.849970 | 2020-08-01T06:31:21 | 2020-08-01T06:31:21 | 282,145,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
print('new')
# In[2]:
2+1
# In[3]:
2-1
# In[4]:
2*3
# In[5]:
0.1+0.2-0.3
# In[6]:
10*2+10*3
# In[7]:
10+2*10+3
# In[8]:
(10+2)*(10+3)
# In[ ]:
| [
"68727269+vnaditya@users.noreply.github.com"
] | 68727269+vnaditya@users.noreply.github.com |
271b017259091010131209daa0e08770462aec59 | 5fe8b2e09a48b473cc28a4ba56e96075af945d07 | /app_authentication_proxy.py | ed8dc4855dd683a9660d59302670464f5a1b70dc | [
"Apache-2.0"
] | permissive | stozk/msb-client-websocket-python | bf2821593386aa02b9f53069d72ec5dae98f3335 | 2c5dacaa27b2a5b543ba8693ca888ddd5dc46e38 | refs/heads/master | 2023-04-06T18:10:30.968152 | 2021-04-07T19:14:32 | 2021-04-07T19:14:32 | 275,241,849 | 0 | 0 | Apache-2.0 | 2020-06-26T20:29:16 | 2020-06-26T20:29:15 | null | UTF-8 | Python | false | false | 20,749 | py | # -*- coding: utf-8 -*-
"""
Copyright (c) 2019 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
Authors: Daniel Stock, Matthias Stoehr
Licensed under the Apache License, Version 2.0
See the file "LICENSE" for the full license governing this code.
"""
import datetime
import threading
import uuid
import pymongo
import json
import flask
import requests
import threading
from flask import request
from flask import jsonify
# from msb_client.ComplexDataFormat import ComplexDataFormat
# from msb_client.DataType import DataType
# from msb_client.Event import Event
# from msb_client.CustomMetaData import CustomMetaData
# from msb_client.TypeDescription import TypeDescription
# from msb_client.TypeDescriptor import TypeDescriptor
# from msb_client.Function import Function
# from msb_client.MsbClient import MsbClient
if __name__ == "__main__":
# somgmt_url = "http://192.168.0.67:8081"
somgmt_url = "http://192.168.1.9:8081"
# myclient = pymongo.MongoClient("mongodb://192.168.0.67:27017/")
myclient = pymongo.MongoClient("mongodb://192.168.1.9:27017/")
mydb = myclient["authentcation_proxy"]
mycol = mydb["auth_services"]
authdata = {
"uuid": "67f6dcf1-f558-4642-ab8c-4b5b918c2ec4",
"operationId": "OPERATION_4b5b918c2ec4",
"property": "PROP_4b5b918c2ec4",
"value": "VALUE_4b5b918c2ec4",
}
entity = {
"uuid": "67f6dcf1-f558-4642-ab8c-4b5b918c2ec4",
"trustlevel": "0",
}
def registerAuthService(authServicedata):
print("register auth service")
def deleteAuthService(authServicedata):
print("delete auth service")
def connectAuthService(authServicedata):
print("connect auth service")
def findAuthService(authServicedata):
print("find auth service")
# task = {"summary": "Take out trash", "description": "9876543"}
# resp = requests.post("http://127.0.0.1:1337/register", json=task)
# print(resp.status_code)
# # if resp.status_code != 201:
# # raise ApiError("POST /tasks/ {}".format(resp.status_code))
# print("Response: " + json.dumps((resp.json())))
def getAuthServices():
params = {"lifecycleState": "VERIFIED"}
resp = requests.get(somgmt_url + "/service", params=params)
# print(resp.status_code)
# if resp.status_code != 201:
# raise ApiError("POST /tasks/ {}".format(resp.status_code))
# print("Response: " + json.dumps((resp.json())))
auth_list = []
for serv in resp.json():
meta_resp = requests.get(somgmt_url + "/meta/{0}".format(serv["uuid"]))
for md in meta_resp.json():
# if "name" in md and md["name"] == "verification_service":
# # print("FOUND!")
# # print(json.dumps(md))
# # print("##########")
# print(md)
if (
"typeDescription" in md
and md["typeDescription"]["identifier"] == "verification_service"
):
auth_list.append(md)
# print(md)
return auth_list
# print("############################")
# print(json.dumps((meta_resp.json())))
# print(getAuthServices())
resp_uuid = getAuthServices()
def getService(uuid):
resp = requests.get(somgmt_url + "/service/{0}".format(uuid))
return resp.json()
print(getService(resp_uuid[0]["serviceUuid"]))
# app = flask.Flask(__name__)
# app.config["DEBUG"] = True
# @app.route("/", methods=["GET"])
# def home():
# return "<h1>SDP Authentication Service Proxy</h1><p>v.0.1</p>"
# @app.route("/drop", methods=["GET"])
# def dropDb():
# myclient.drop_database("authentcation_service")
# # myclient.drop_database("sdp_authentication")
# return "<h1>DB drop</h1><p>authentcation_service dropped.</p>"
# @app.route("/find", methods=["GET"])
# def getByUuidQuery():
# # here we want to get the value of user (i.e. ?user=some-value)
# uuid = request.args.get("uuid")
# # print(uuid)
# # if results.count() != 0:
# if mycol.count_documents({"uuid": str(uuid)}) != 0:
# myquery = {"uuid": str(uuid)}
# results = mycol.find(myquery, {"_id": False})
# return jsonify(results[0])
# else:
# return jsonify({})
# @app.route("/<uuid>", methods=["GET"])
# def getByUuid(uuid):
# if mycol.count_documents({"uuid": uuid}) != 0:
# myquery = {"uuid": uuid}
# results = mycol.find(myquery, {"_id": False})
# # if results.count() != 0:
# return jsonify(results[0])
# else:
# return jsonify({})
# @app.route("/all", methods=["GET"])
# def getAll():
# if mycol.count_documents({}) != 0:
# results = mycol.find({}, {"_id": False})
# resArray = []
# for res in results:
# resArray.append(res)
# return jsonify(resArray)
# else:
# return jsonify([])
# @app.route("/generate", methods=["GET"])
# def generate():
# insertList = []
# insertListPrint = []
# for i in range(1, 10):
# UUID = str(uuid.uuid4())
# print(UUID)
# authdata = {
# "uuid": str(UUID),
# "operationId": "OPERATION_" + str(UUID[-12:]),
# "property": "PROP_" + str(UUID[-12:]),
# "value": "VALUE_" + str(UUID[-12:]),
# }
# # mycol.insert_one(authdata)
# # insertedArray.append(authdata)
# insertList.append(authdata)
# insertListPrint.append(authdata.copy())
# mycol.insert_many(insertList)
# return jsonify(insertListPrint)
# @app.route("/register", methods=["POST"])
# def register():
# print(str(request.json))
# return json.dumps({"success": True}), 200, {"ContentType": "application/json"}
# """This is a sample client for the MSB python client library."""
# # define service properties as constructor parameters
# SERVICE_TYPE = "Application"
# SO_UUID = "d16c5634-c860-4e53-9163-fb884cea92fc"
# SO_NAME = "Influx DB Database"
# SO_DESCRIPTION = "Raspberry PI 3 + Enviro+ sensor board"
# SO_TOKEN = "fb884cea92fc"
# myMsbClient = MsbClient(
# SERVICE_TYPE,
# SO_UUID,
# SO_NAME,
# SO_DESCRIPTION,
# SO_TOKEN,
# )
# msb_url = 'wss://localhost:8084'
# myMsbClient.enableDebug(True)
# myMsbClient.enableTrace(False)
# myMsbClient.enableDataFormatValidation(True)
# myMsbClient.disableAutoReconnect(False)
# myMsbClient.setReconnectInterval(10000)
# myMsbClient.disableEventCache(False)
# myMsbClient.setEventCacheSize(1000)
# myMsbClient.disableHostnameVerification(True)
# myMsbClient.addMetaData(CustomMetaData("SEN",
# "Sensor - device which, when excited by a physical phenomenon, produces an electric signal characterizing the physical phenomenon",
# TypeDescription(TypeDescriptor.CDD,
# "0112/2///61360_4#AAA103#001",
# "https://cdd.iec.ch/cdd/iec61360/iec61360.nsf/2a050a792eee78e1c12575560054b803/219d27329351ec25c1257dd300515f69")))
# myMsbClient.addMetaData(CustomMetaData("Fine Particle Sensor",
# "Sensor which measures fine particles",
# TypeDescription(TypeDescriptor.CUSTOM,
# "0112/2///61360_4#AAA103#001-FEIN",
# "")))
# myMsbClient.addMetaData(CustomMetaData("MUP",
# "CPU - processor whose elements have been miniaturized into an integrated circuit",
# TypeDescription(TypeDescriptor.CDD,
# "0112/2///61360_4#AAA062#001",
# "https://cdd.iec.ch/cdd/iec61360/iec61360.nsf/2a050a792eee78e1c12575560054b803/670dc436b7e157cac1257dd300515f41"),
# "/",
# "METHOD_STUB_TO_GET_DATA",
# DataType.STRING))
# myMsbClient.addMetaData(CustomMetaData("CPU_Architecture",
# "CPU_Architecture",
# TypeDescription(TypeDescriptor.CUSTOM,
# "0112/2///61360_4#AAA062#001",
# ""),
# "/",
# "METHOD_STUB_TO_GET_DATA",
# DataType.STRING))
# myMsbClient.addMetaData(CustomMetaData("RAM",
# "memory that permits access to any of its address locations in any desired sequence",
# TypeDescription(TypeDescriptor.CDD,
# "0112/2///61360_4#AAA062#001",
# "https://cdd.iec.ch/cdd/iec61360/iec61360.nsf/2a050a792eee78e1c12575560054b803/670dc436b7e157cac1257dd300515f41"),
# "/",
# "METHOD_STUB_TO_GET_DATA",
# DataType.DOUBLE))
# myMsbClient.addMetaData(CustomMetaData("OS_platform",
# "Operating system platform",
# TypeDescription(TypeDescriptor.CUSTOM,
# "OS_platform",
# ""),
# "/",
# "METHOD_STUB_TO_GET_DATA",
# DataType.STRING))
# myMsbClient.addMetaData(CustomMetaData("OS_hostname",
# "OS_hostname",
# TypeDescription(TypeDescriptor.CUSTOM,
# "OS_hostname",
# ""),
# "/",
# "METHOD_STUB_TO_GET_DATA",
# DataType.STRING))
# myMsbClient.addMetaData(CustomMetaData("OS_platform_release",
# "OS_platform_release",
# TypeDescription(TypeDescriptor.CUSTOM,
# "OS_platform_release",
# ""),
# "/",
# "METHOD_STUB_TO_GET_DATA",
# DataType.STRING))
# myMsbClient.addMetaData(CustomMetaData("OS_platform_version",
# "OS_platform_version",
# TypeDescription(TypeDescriptor.CUSTOM,
# "OS_platform_version",
# ""),
# "/",
# "METHOD_STUB_TO_GET_DATA",
# DataType.STRING))
# myMsbClient.addMetaData(CustomMetaData("OS_system_serial",
# "OS_system_serial",
# TypeDescription(TypeDescriptor.CUSTOM,
# "OS_system_serial",
# ""),
# "/",
# "METHOD_STUB_TO_GET_DATA",
# DataType.STRING))
# myMsbClient.addMetaData(CustomMetaData("CPU_CORES",
# "CPU core count",
# TypeDescription(TypeDescriptor.CUSTOM,
# "CPU_CORES",
# ""),
# "/",
# "METHOD_STUB_TO_GET_DATA",
# DataType.INT32))
# e_particle_concentration = Event("PARTICLE_CONCENTRATION", "Aktuelle Partikelkonzentration", "Aktuelle Konzentration der Feinstaubpartikel in PPM", DataType.INT32, 1, False)
# e_particle_concentration.addMetaData(CustomMetaData("Particle Concentration",
# "Particle Concentration",
# TypeDescription(TypeDescriptor.CDD,
# "0112/2///61987#ABT514#001",
# "https://cdd.iec.ch/cdd/iec61987/iec61987.nsf/ListsOfUnitsAllVersions/0112-2---61987%23ABT514"),
# "/PARTICLE_CONCENTRATION"))
# e_particle_concentration.addMetaData(TypeDescription(TypeDescriptor.CDD,
# "0112/2///61987#ABT514#001",
# "https://cdd.iec.ch/cdd/iec61987/iec61987.nsf/ListsOfUnitsAllVersions/0112-2---61987%23ABT514",
# "/PARTICLE_CONCENTRATION"))
# myMsbClient.addEvent(e_particle_concentration)
# e_temperature = Event("AMBIENT_TEMPERATURE", "Current ambient temperature", "Current temperature reading in °C", DataType.DOUBLE, 1, False)
# e_temperature.addMetaData(CustomMetaData("Temperature",
# "Ambient temperature",
# TypeDescription(TypeDescriptor.CDD,
# "0112/2///61987#ABT514#001",
# "https://cdd.iec.ch/cdd/iec61987/iec61987.nsf/ListsOfUnitsAllVersions/0112-2---61987%23ABT514"),
# "/AMBIENT_TEMPERATURE",
# DataType.DOUBLE))
# e_temperature.addMetaData(TypeDescription(TypeDescriptor.CDD,
# "0112/2///62720#UAA033#001",
# "https://cdd.iec.ch/cdd/iec61360/iec61360.nsf/Units/0112-2---62720%23UAA033",
# "/AMBIENT_TEMPERATURE"))
# myMsbClient.addEvent(e_temperature)
# def sendParticleData():
# print("Method stub for data sending")
# def startReadFineParticle():
# print("Method stub for particle reading")
# f_start_fp_detection = Function("START_FP_DETECTION", "Start fine particle measurement", "Starts the Process of fine particle measurements", DataType.BOOLEAN, startReadFineParticle, False, ["PARTICLE_CONCENTRATION"])
# f_start_fp_detection.addMetaData(CustomMetaData("Funktion_Temperatur",
# "Funktion_Umgebungstemperatur",
# TypeDescription(TypeDescriptor.CDD,
# "0112/2///61987#ABT514#001",
# "https://cdd.iec.ch/cdd/iec61987/iec61987.nsf/ListsOfUnitsAllVersions/0112-2---61987%23ABT514"),
# "/START_FP_DETECTION"))
# myMsbClient.addFunction(f_start_fp_detection)
# e_cpu_speed_reading = Event("CPU_SPEED_READINGS", "CPU speed readings", "CPU speed readings for fingerprinting", DataType.DOUBLE, 1, True)
# e_cpu_speed_reading.addMetaData(CustomMetaData("CPU speed readings",
# "CPU speed readings",
# TypeDescription(TypeDescriptor.FINGERPRINT,
# "FP_CPU_SPEED_READINGS",
# ""),
# "/CPU_SPEED_READINGS",
# DataType.DOUBLE))
# myMsbClient.addEvent(e_cpu_speed_reading)
# f_cpu_speed = Function("CPU_SPEED", "Start CPU speed measurement", "Starts CPU speed measurement for fingerprinting", DataType.BOOLEAN, startReadFineParticle, False, ["CPU_SPEED_READINGS"])
# f_cpu_speed.addMetaData(CustomMetaData("CPU_SPEED",
# "Measure CPU speed for fingerprinting",
# TypeDescription(TypeDescriptor.FINGERPRINT,
# "FP_CPU_SPEED",
# ""),
# "/CPU_SPEED"))
# myMsbClient.addFunction(f_cpu_speed)
# e_cpu_temp_reading = Event("CPU_TEMPERATURE_READINGS", "CPU temperature readings", "CPU temperature readings for fingerprinting", DataType.DOUBLE, 1, False)
# e_cpu_temp_reading.addMetaData(CustomMetaData("CPU temperature",
# "CPU temperature readings for fingerprinting",
# TypeDescription(TypeDescriptor.FINGERPRINT,
# "FP_CPU_TEMPERATURE_READINGS",
# ""),
# "/CPU_TEMPERATURE_READINGS",
# DataType.DOUBLE))
# myMsbClient.addEvent(e_cpu_temp_reading)
# f_cpu_temp = Function("CPU_TEMPERATURE", "Get CPU temperature measurement", "Get the CPU tempreature for fingerprinting", DataType.DOUBLE, startReadFineParticle, False, ["CPU_TEMPERATURE_READINGS"])
# f_cpu_temp.addMetaData(CustomMetaData("CPU_TEMPERATURE",
# "Measure CPU temperature for fingerprinting",
# TypeDescription(TypeDescriptor.FINGERPRINT,
# "FP_CPU_TEMPERATURE",
# ""),
# "/CPU_TEMPERATURE"))
# myMsbClient.addFunction(f_cpu_temp)
# f_storage_speeds = Function("STORAGE_W_SPEED", "Measure storage speed", "Measure the CPU Speed for fingerprinting", DataType.DOUBLE, startReadFineParticle, False, [])
# f_storage_speeds.addMetaData(CustomMetaData("STORAGE_W_SPEED",
# "Measure the CPU Speed for fingerprinting",
# TypeDescription(TypeDescriptor.FINGERPRINT,
# "FP_STORAGE_W_SPEED",
# ""),
# "/STORAGE_W_SPEED"))
# myMsbClient.addFunction(f_storage_speeds)
# def storeData(data):
# print("Storing data")
# f_store_data = Function("STORE_DATA", "Store Data", "Stores Data to the Database", DataType.STRING, storeData, False, [])
# print(myMsbClient.objectToJson(myMsbClient.getSelfDescription()))
# myMsbClient.connect(msb_url)
# myMsbClient.register()
# app.run(host="0.0.0.0", port=1338) | [
"daniel.stock@ipa.fraunhofer.de"
] | daniel.stock@ipa.fraunhofer.de |
78aab04cb0157036a7546311cdb45660958c867b | 4ea06addb40da22573bbfb4a0253406b564ae2cd | /test3in1.py | f068ead397378cf25b16d18e8b1c4e99b4504a6d | [] | no_license | AldyColares/Projetos_MNii | 5eff276daf7f7139b8875fb20bfa405af44639a9 | 43dc45cb2a7890837257f36934d0d32b5e40fc67 | refs/heads/master | 2016-09-11T05:48:43.756753 | 2014-03-21T14:50:45 | 2014-03-21T14:50:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | import re
arquivo = open("arquivo1.txt")
m = int(arquivo.readline().rstrip('\n'))
txt = arquivo.read()
print "grau =",m
print "\nxi\tf(xi)"
print txt
dados = map(float, re.split('\t|\n',txt))
arquivo.close()
a = dados[0]
b = dados[m*2]
fx0 = dados[1]
fxm = dados[m*2+1]
h = (b - a)/m
L = range(m+1)
S=0
i=1
while ( i <= m*2+1 ):
S = S + dados[i]
i = i+2
I1 = (h/2)*(fx0 + fxm) + h*(S-(fx0 + fxm))
i=1
j=0
S1=0
S2=0
k=1
while ( i <= m*2+1 ):
L[j] = dados[i]
i = i+2
j = j+1
while(k<m):
if int(k) % 2 == 0:
S1 = S1 + L[k]
else:
S2 = S2 + L[k]
k = k+1
I2 = (h/3)*(fx0 + fxm + 4*S2 + 2*S1)
S1=0
S2=0
k=1
while(k<m):
if int(k) % 3 == 0:
S1 = S1 + L[k]
else:
S2 = S2 + L[k]
k = k+1
I3 = (3*h/8)*(fx0 + fxm + 3*S2 + 2*S1)
print "\nI Trap =",I1
print "\nI 1/3 Simp =",I2
print "\nI 3/8 Simp =",I3
| [
"dyego@alu.ufc.br"
] | dyego@alu.ufc.br |
cab40ed6e22c6fcff267f7add19123109e54eaf2 | fd714513f0923a4e533ecb5e5ece1e78105544cd | /views.py | abb858051ed528f85a520d3ffa8860149c2c0b4d | [] | no_license | zhaojason98/Bungalow_challenge | 102f1eb45c6aca3256db304fe3b60baccfcbef18 | 003e4cfd780f6ac4424bc61791802cd5f04b94ae | refs/heads/master | 2020-08-11T06:18:47.839710 | 2019-10-11T18:58:58 | 2019-10-11T18:58:58 | 214,508,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | from django.http import HttpResponse
from django.template import loader
from .models import House
def index(request):
houses_list = House.objects.values_list('id')
return houses_list
def description(request, zillow_id):
house = House.objects.get(id=zillow_id)
return house.getDescription()
def price(request, zillow_id):
house = House.objects.get(id=zillow_id)
return house.getPrice()
def estimate(request, zillow_id):
house = House.objects.get(id=zillow_id)
return house.getEstimate()
def tax(request, zillow_id):
house = House.objects.get(id=zillow_id)
return house.getTax()
def location(request, zillow_id):
house = House.objects.get(id=zillow_id)
return house.getLocation() | [
"noreply@github.com"
] | noreply@github.com |
4bd73df7d3611be06c8a1c3f894b8bba2b9a5a2a | b1ba049be6d2afe25be7511c5f87ef7436c25869 | /DOS_DOG/main.py | 31b166d321f1c716fabdce06137f5f260bd993a2 | [
"MIT"
] | permissive | Network-Hub/Dos_Dog | 44ada0bab6159ed37c05efafaa774c16f8978bdc | 569ce825561153818d9260f65daa827e1f27eb42 | refs/heads/master | 2022-11-16T23:20:08.446240 | 2020-07-18T03:03:16 | 2020-07-18T03:03:16 | 280,569,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | from flow_dump import *
import random
if __name__ == '__main__':
print("---start sniff the packet---")
save_path = folder_root + os.sep + "results" + os.sep + "label_" + str(random.randint(1, 1000)) + ".csv"
sniff_main(save_path) # 开始嗅探数据包
| [
"noreply@github.com"
] | noreply@github.com |
a0e9159f3d48fcd72ad68192a845e9493a91adca | 9ec00883eae7a3742dc62e821dabd9515342edcc | /db.py | fc5506987fb479bb400eeb6883c2a94209b7fe55 | [] | no_license | abinayasv/python-web-crawler | 8386c703e3cf097845726b26fffcbab6005fff13 | 51c0a0526826b705f71e741ab4d1376b454421e8 | refs/heads/main | 2023-05-16T06:42:47.691514 | 2021-06-09T09:41:35 | 2021-06-09T09:41:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,018 | py | from pymongo import MongoClient
from datetime import datetime, timedelta
from time import sleep
from config import maxcount,root
from crawler import root,validate
def connectdb():
global linkcollection # for using this variable to another functions
cluster=MongoClient(" #your mongodb connection url ")
db=cluster["WebScrap"] #establishng the db connection
linkcollection=db["webscrap"] # establishing a connection to the collection
return linkcollection
def getPendingLinks():
connectdb() # for using the linkcollection
return linkcollection.find({})
def time24HoursAgo() : #it calculates time for 24 hrs back
today = datetime.today()
BackTo24Hours = today - timedelta(days=1)
return BackTo24Hours
def insertedalready(link):
criteria = { '$and' : [{"Linkcrawled":link},{"createdAt": {"$gte": time24HoursAgo()}}] } # '$and' operator joins two or more queries with a logical AND and returns the documents that match all the conditions.
if (linkcollection.count_documents(criteria) > 0) :
return True
else :
return False
def save(links):
connectdb()
for i in links:
count=0
if (i != ' '):
if (validate(i) and not insertedalready(i)): #Function calls to validate url as well as check whether url previously exists
linkcollection.insert_one({"Linkcrawled": i, "createdAt":datetime.today().replace(microsecond=0)})
count+=1
print("Inserting link")
if(count==0):
print("already inserted") # if the link is already inserted
if linkcollection.count_documents({}) >=maxcount :
print("Maximum limit has reached") # if it reached the maximum limit
time.sleep(100)
| [
"noreply@github.com"
] | noreply@github.com |
6b542e6a7eab78022064b7f12bc849610f028555 | 540506f511b41101aa0a7cb3156582d343b6b036 | /multidigrec.py | 70b3e651dd0365d719bf3710aee029754dafcdd2 | [
"MIT"
] | permissive | RajdeepMondal/NumRec | fed34be63ea98a0521f75b5721dd451d67d87a50 | 39c9bc2c1abd204b49baa3593905cf3ccede2354 | refs/heads/master | 2022-04-26T02:14:52.189318 | 2020-05-04T07:18:27 | 2020-05-04T07:18:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | import cv2
import imutils
from dnn_softmax import *
import numpy as np
import pickle
with open("parameters.pkl", "rb") as f:
parameters = pickle.load(f)
file = "digits.jpg"
img = cv2.imread(file)
img = cv2.resize(img, (700, 200))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
(x, y, w, h) = cv2.boundingRect(c)
dig = thresh[y:y + h, x:x + w]
dig = cv2.resize(dig, (28, 28))
dig = np.pad(dig, ((12, 12),), 'constant', constant_values=(0,))
dig = cv2.resize(dig, (28, 28))
dig = np.array(dig)
dig = dig.flatten()
dig = dig.reshape(dig.shape[0], 1)
AL, _ = L_layer_forward(dig, parameters)
ans3 = np.argmax(AL)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
img = cv2.putText(img, str(ans3), (x - 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
cv2.imshow("frame", img)
cv2.waitKey(0)
cv2.imwrite("digrec.jpg", img)
| [
"noreply@github.com"
] | noreply@github.com |
9894d013e97b0292ca24dc5d96416fe5a8c42619 | cf8bce7244ecc0785ada7764b654a35da182014c | /djtrump/tests/test_sample.py | 085c7af67a5b65a239626de80cd3ea5efdd2659e | [] | no_license | sujonict07/djtrump | 7ed6d9f93b5dde678ace95668be40951b7fd346a | ff8b8e7efe8cadadf9b443c2ad846d2f63c4d622 | refs/heads/master | 2021-06-18T19:59:25.746873 | 2019-10-05T03:21:24 | 2019-10-05T03:21:24 | 212,815,331 | 0 | 1 | null | 2019-10-05T01:44:43 | 2019-10-04T12:56:55 | null | UTF-8 | Python | false | false | 204 | py | from django.test import TestCase
class SampleTestCase(TestCase):
def test_two_plus_two(self):
self.assertEqual(2+2, 4)
def test_nine_minus_three(self):
self.assertEqual(9-3, 6)
| [
"abc@abc.br10"
] | abc@abc.br10 |
a7d71e0de823d24ad5f32fd5bfdfa0f91e0a50dd | a17a15211ac8b47d01c616ae87fccb0e95d8298d | /linkedlist_bin_dec.py | 61096e84653f908fcadeaed85699773a06fd7979 | [] | no_license | prosis369/Data-Structures | 68a12a95db530adc153ca6b38b4310f5ab321f5a | cbfdb37f3145066285bf463360f9486bb64fe580 | refs/heads/master | 2021-06-25T12:10:52.894381 | 2020-11-11T08:54:17 | 2020-11-11T08:54:17 | 149,286,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | '''
1290. Convert Binary Number in a Linked List to Integer
Given head which is a reference node to a singly-linked list. The value of each node in the linked list is either 0 or 1. The linked list holds the binary representation of a number.
Return the decimal value of the number in the linked list.
Example1:
Input: head = [1,0,1]
Output: 5
Explanation: (101) in base 2 = (5) in base 10
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def getDecimalValue(self, head: ListNode) -> int:
temp = head
s = ""
while(temp != None):
s = s + str(temp.val)
temp = temp.next
return(int(s,2))
| [
"noreply@github.com"
] | noreply@github.com |
81e961bd45690f0ec3aaf2d0471c80120c519e90 | eaa13f3f1053d4579ef5b77e475fb04c865fe9cf | /build/frl_msgs/frl_vehicle_msgs/catkin_generated/pkg.develspace.context.pc.py | a989b83f5dd8df824b9da17f3dbf6c8d1631bfd8 | [] | no_license | Ivarsak/UUV_Simulator_OASYS | 1d9fb25855a258ee97d6277dcce1c132cd45198a | a7e4ca3a7c6989334d9fc22d91e0c2fea7bd99b4 | refs/heads/master | 2023-03-04T10:24:21.483313 | 2021-02-08T18:11:30 | 2021-02-08T18:11:30 | 315,768,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ivar/uuv_ws/devel/include".split(';') if "/home/ivar/uuv_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;message_runtime;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "frl_vehicle_msgs"
PROJECT_SPACE_DIR = "/home/ivar/uuv_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"saksvik.ivar@hotmail.com"
] | saksvik.ivar@hotmail.com |
7b1a94c46e89649da2de5c7eac0d1dfafcb723bf | b6924dbfc19907cc62fa503b3ba80b2085d0e82e | /core/log.py | 1d17a6ab498a5cc0a8a2da043f7c49e147b15159 | [] | no_license | PaulWasTaken/service_api | 9bd25b46c8559b3e0670248550838077872c3a5b | 64227fa92efd820bf7a63d2c883874c1abf7996d | refs/heads/master | 2021-06-17T19:20:12.380859 | 2019-07-16T11:23:17 | 2019-07-16T11:28:22 | 196,447,394 | 0 | 0 | null | 2021-04-20T18:22:57 | 2019-07-11T18:35:38 | Python | UTF-8 | Python | false | false | 421 | py | import logging
from config.logger_config import LOGS_PATH, LOG_LEVEL
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(LOGS_PATH)
file_handler.setLevel(LOG_LEVEL)
file_handler.setFormatter(formatter)
def get_logger(name):
logger = logging.getLogger(name)
logger.setLevel(LOG_LEVEL)
logger.addHandler(file_handler)
return logger
| [
"ip98@list.ru"
] | ip98@list.ru |
487d12eaffc34e10ef3412fb7e4c2055d48ba1ed | b5dd802b7165237174153298688df29796c431b4 | /Intermediate_python/Comparison_Operators/And_Or_not.py | 8228159fff3e80c59237b8606fc0cedf7b196e50 | [] | no_license | Jannatul-Ferdousi/Machine_Learning_Using_Python | 444a2ceb95e6097730963ec7d0ea55fa0dbf954f | ec9610d797296a82a74ee561b68c62055e7179f5 | refs/heads/main | 2023-01-12T01:30:46.168681 | 2020-11-20T18:22:56 | 2020-11-20T18:22:56 | 310,195,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | # Define variables
my_kitchen = 18.0
your_kitchen = 14.0
# my_kitchen bigger than 10 and smaller than 18?
print(my_kitchen>10 and my_kitchen<18)
# my_kitchen smaller than 14 or bigger than 17?
print(my_kitchen>17 or my_kitchen<14)
# Double my_kitchen smaller than triple your_kitchen?
print(my_kitchen*2< your_kitchen*3) | [
"noreply@github.com"
] | noreply@github.com |
4863d856a373aa732ddb38fa86db940b81682f64 | dbbff75e5afbea5dbdc0bfc46df0d146f8849ebb | /task_23.5.py | 5c7f999180e6f0c61c742ec6cf4388a379eb3a9b | [] | no_license | mishutka200101/Python-Practice-3-4 | 6460b63feaf25d91932eb6f9c1e0037524df62c3 | 3378bbba820c87d4e186fd4742d3d5af3f8d04bb | refs/heads/main | 2023-04-29T02:39:18.442964 | 2021-05-14T20:37:50 | 2021-05-14T20:37:50 | 362,210,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | def same_by(characteristic, objects):
if not objects:
return True
etalon = characteristic(objects[0])
for obj in objects:
if characteristic(obj) != etalon:
return False
return True
| [
"noreply@github.com"
] | noreply@github.com |
fcfb4efec303377e2ff3d56687c6cff450090c44 | 8eed29374fd95c5686b33e7a93d9991abb8e7dd1 | /CoolPlot/Util/Units.py | 82b38dd5e2c47373c285e5f2fee9c591d5443948 | [
"MIT"
] | permissive | roguextech/CoolPlot | c5de253895f8783f515b1126d2d0e7c4a59625ce | 0967e243934c0970ef830b43befeabacc688cdfd | refs/heads/master | 2022-11-24T08:30:19.379696 | 2020-08-05T12:51:25 | 2020-08-05T12:51:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | py | # -*- coding: utf-8 -*-
from __future__ import print_function, division
from .Quantities import PropertyDict, BaseDimension
class SIunits(PropertyDict):
def __init__(self):
self._D = BaseDimension(add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='Density', symbol=u'd', unit=u'kg/m3')
self._H = BaseDimension(add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='Specific Enthalpy', symbol=u'h', unit=u'J/kg')
self._P = BaseDimension(add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='Pressure', symbol=u'p', unit=u'Pa')
self._S = BaseDimension(add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='Specific Entropy', symbol=u's', unit=u'J/kg/K')
self._T = BaseDimension(add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='Temperature', symbol=u'T', unit=u'K')
self._U = BaseDimension(add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='Specific Internal Energy', symbol=u'u', unit=u'J/kg')
self._Q = BaseDimension(add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='Vapour Quality', symbol=u'x', unit=u'')
class KSIunits(SIunits):
def __init__(self):
super(KSIunits, self).__init__()
self.H.mul_SI = 1e-3
self.H.unit = u'kJ/kg'
self.P.mul_SI = 1e-3
self.P.unit = u'kPa'
self.S.mul_SI = 1e-3
self.S.unit = u'kJ/kg/K'
self.U.mul_SI = 1e-3
self.U.unit = u'kJ/kg'
class EURunits(KSIunits):
def __init__(self):
super(EURunits, self).__init__()
self.P.mul_SI = 1e-5
self.P.unit = u'bar'
self.T.add_SI = -273.15
self.T.unit = u'deg C'
def get_unit_system_cls():
return [SIunits, KSIunits, EURunits]
| [
"jowr@ipu.dk"
] | jowr@ipu.dk |
4ad15d0918ddbf23e1ab05b9d9d98f2d9228f5e6 | 37faf72814c0a49cfde079c8831ce7247a5e6d03 | /build/ar_track_alvar/ar_track_alvar_msgs/cmake/ar_track_alvar_msgs-genmsg-context.py | e920213a0d6a5bcfdb6f980c921d6fd2639a0e8e | [] | no_license | albtang/106A-Project | 93a9736476ae5b4f0ba071760a71742fa293b40a | 46639be993107a8169091f86b5f5190740a5bba1 | refs/heads/master | 2020-09-11T09:51:21.635101 | 2019-12-21T12:34:24 | 2019-12-21T12:34:24 | 222,027,312 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/cc/ee106a/fa19/class/ee106a-abs/ros_workspaces/tetrisBot/106A-Project/src/ar_track_alvar/ar_track_alvar_msgs/msg/AlvarMarker.msg;/home/cc/ee106a/fa19/class/ee106a-abs/ros_workspaces/tetrisBot/106A-Project/src/ar_track_alvar/ar_track_alvar_msgs/msg/AlvarMarkers.msg"
services_str = ""
pkg_name = "ar_track_alvar_msgs"
dependencies_str = "std_msgs;geometry_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "ar_track_alvar_msgs;/home/cc/ee106a/fa19/class/ee106a-abs/ros_workspaces/tetrisBot/106A-Project/src/ar_track_alvar/ar_track_alvar_msgs/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"ee106a-abs@c111-8.local"
] | ee106a-abs@c111-8.local |
bbf8dd661fbdb60587e9c5e81591357bcd3bb949 | fdb6ce818c34d3f93a67e3c4b9e183220e37db78 | /session-hidden-examples-master/hidden/views.py | f871e81e2cc79b0bae62624d69bd618227d3988f | [] | no_license | alexmercertomoki/CodingTraining | 44d8f03c77b85d77d66ae0a0c60e676c7d22e81a | 691633eb0e2c6babfb39d8428848be58ca544140 | refs/heads/master | 2020-04-16T00:27:06.114656 | 2016-09-13T20:03:52 | 2016-09-13T20:03:52 | 68,142,122 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | from django.shortcuts import render
def hidden_demo(request):
context = {}
if not 'first' in request.POST:
return render(request, 'hidden/get-first.html', context)
context['first'] = request.POST['first']
if not 'last' in request.POST:
return render(request, 'hidden/get-last.html', context)
context['last'] = request.POST['last']
if not 'what' in request.POST:
return render(request, 'hidden/get-what.html', context)
context['what'] = request.POST['what']
return render(request, 'hidden/finish.html', context)
| [
"isaacbhuang@gmail.com"
] | isaacbhuang@gmail.com |
35ff7c7b0b2608a161283aad1158714f840e4261 | bf21cd0ef7a94fa106ccd9f91a4bbfdcda7f94ed | /python-basic/chapter06/ex02_1.py | b89200405d271e475c79d5066eb693b18a584a1a | [] | no_license | juneglee/Deep_Learning | fdf8cae1b962aaa0ce557cb53f78a22b6d5ae1e8 | 17a448cf6a7c5b61b967dd78af3d328d63378205 | refs/heads/master | 2023-07-15T03:02:55.739619 | 2021-08-19T14:04:55 | 2021-08-19T14:04:55 | 273,253,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,324 | py | # 예외 고급
# 예외 객체
# try:
# 예외가 발생할 가능성이 있는 구문
# except 예외의 종류 as 예외 객체를 활용할 변수 이름:
# 예외가 발생했을 때 실행할 구문
# 예외 객체
try:
number_input_a = int(input("정수 입력> "))
print("원의 반지름:", number_input_a)
print("원의 둘레:", 2 * 3.14 * number_input_a)
print("원의 넓이:", 3.14 * number_input_a * number_input_a)
except Exception as exception:
print("type(exception):", type(exception))
print("exception:", exception) # Exception은 부모클래스
# 예외 구분하기
# 여러가지 예외가 발생할 수 있는 코드
# 에러 1 : 정수로 변환될수 없는 값을 입력 ex) "yes!!"
# 에러 2 : 리스트의 길이를 넘는 인덱스를 입력한 경우 ex) 100
list_number = [52, 273, 32, 72, 100]
try:
number_input = int(input("정수 입력> "))
print("{}번째 요소: {}".format(number_input, list_number[number_input]))
except Exception as exception:
print("type(exception):", type(exception))
print("exception:", exception)
# 예외 구분하기
# try:
# 예외가 발생할 가능성이 있는 구문
# except 예외의 종류 A:
# 예외A가 발생했을 때 실행할 구문
# except 예외의 종류 B:
# 예외B가 발생했을 때 실행할 구문
# except 예외의 종류 C:
# 예외C가 발생했을 때 실행할 구문
list_number = [52, 273, 32, 72, 100]
try:
number_input = int(input("정수 입력> "))
print("{}번째 요소: {}".format(number_input, list_number[number_input]))
except ValueError:
# ValueError가 발생하는 경우
print("정수를 입력해 주세요!")
except IndexError:
# IndexError가 발생하는 경우
print("리스트의 인덱스를 벗어났어요!")
# 예외 구분 구문과 예외 객체
# as 키워드를 사용하여 추가
list_number = [52, 273, 32, 72, 100]
try:
number_input = int(input("정수 입력> "))
print("{}번째 요소: {}".format(number_input, list_number[number_input]))
except ValueError as exception:
print("정수를 입력해 주세요!")
print("exception:", exception)
except IndexError as exception:
print("리스트의 인덱스를 벗어났어요!")
print("exception:", exception) | [
"klcpop1@gmail.com"
] | klcpop1@gmail.com |
1718f36c060caf2b67a509d51fa38963500c468c | 6e31a7867d658dfa514ded450eb4e69ec57e88df | /Glue/Glue/__init__.py | 97b0db1c061cb9b30d81215636cc390a29202053 | [] | no_license | jasonyu1996/glue | 625c77624a85b4fb26a57dffff0dd6a80887fa24 | d8815fba1764743f5d1cf3d1bbc6f126ea97e25b | refs/heads/master | 2021-07-10T06:14:38.063991 | 2017-09-21T13:35:14 | 2017-09-21T13:35:14 | 104,352,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | """
The flask application package.
"""
from flask import Flask
app = Flask(__name__)
import Glue.views
| [
"yuzhijingcheng1996@hotmail.com"
] | yuzhijingcheng1996@hotmail.com |
1e75d06bbd6ffc672e34760a40a21d2d640e0b85 | 8b5c7bf0e9cc47df3e6a4688e761ea01707cce72 | /0_or_positive_or_negative.py | 4ebd5f053517179f0df889bbf5e566a928d4f799 | [] | no_license | NandakrishnanR/basic-python | b42047d6b77924324f4558b9d18ed950f128c439 | 1c5d6d60d7a75ca0365a00e44a21fe3a24082afe | refs/heads/master | 2020-12-12T12:05:42.137690 | 2020-01-20T16:04:51 | 2020-01-20T16:04:51 | 234,124,100 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | r=float(input("enter a number to be checked"))
if r>0:
print("the number is positive")
elif r<0:
print("the number is negative")
else:
print("the number is zero") | [
"rnandakrishnan2001@gmail.com"
] | rnandakrishnan2001@gmail.com |
680e684399175bbf0c37efb8fc4e80d64e319af0 | 8f8908f8247a0401b6521582ad29d9718732f4f3 | /tes codet.py | 4fbcde1660a12584a51c943d4713f9b812d6064e | [] | no_license | kcb0126/script.tvguide | 1d11a444158d415c42dfb9bd99ad1b26ffb50ee9 | a42b4332f6161ff0a20bef583abade3b64e834ac | refs/heads/master | 2021-01-02T08:48:18.749685 | 2017-08-02T02:56:45 | 2017-08-02T02:56:45 | 98,981,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136,243 | py | #DO NEED IT
if program_start_time < epg_time_2
and program_stop_time > epg_time_3:
print
"passed 1"
if program_stop_time <
epg_time_2:
print "passed 4"
program_finished = prog_stop_clock.split(':')
[1].replace('PM', '').replace('AM', '')
if program_finished == '50':
print
"you are working on this now chrissssssssssss 30"
#OUTPUT THE PROGRAM REMAINING TIME HERE
current_time = int(time.strftime("%M"))
prog_width = self.getControl(int(program_id)).getWidth()
prog_length = int(prog_width) / 11.4 - current_time
prog_length = str(prog_length)
prog_length = prog_length.replace('.0', '')
prog_length = int(prog_length)
print "prog_length"
print prog_length
for program_time in program_remaining:
if int(program_time) <= 105:
if int(prog_length) > 60 and int(prog_length) < 105:
if int(current_time) >= 30 and int(current_time)
< 59:
if prog_width
== 1083:
print
"let change the size in 90 mins program 1"
prog_width = 741
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int
(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str,
progId)
posX =
map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int
(pos_X) - 348
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif prog_width == 1368:
print "let change the size in 90
mins program 1"
prog_width = 1026
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int
(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id
in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 348
posY = int(getY)
self.getControl(int
(prog_id)).setPosition(int(posX), int(posY))
elif program_stop_time > epg_time_2:
program_finished = prog_stop_clock.split(':')[1].replace('PM', '').replace('AM', '')
if program_finished == '15':
if
program_length == 691:
print
"bbc two"
prog_width = 517
self.getControl(int
(program_id)).setWidth(prog_width)
next_program = int(program_id) + 1
next_program_width = self.getControl(int(next_program)).getWidth()
next_programs = int(next_program) + 1
next_programs_width = self.getControl(int
(next_programs)).getWidth()
previous_program = int(program_id) - 1
next_programs_width = self.getControl(int(previous_program)).getWidth()
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int
(program_id)).getY()
progId =
list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for
pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 174
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
#DO NEED IT
elif program_start_time > epg_time_1 and program_stop_time > epg_time_2:
print "passed 2"
#DO NEED IT
elif program_start_time ==
epg_time_1 and program_stop_time == epg_time_3:
print "passed 3"
#DO NEED IT
if epg_time_1 < program_start_time and epg_time_2 ==
program_stop_time:
print "hello
chrisssssssss 1"
if epg_time_1 <
program_start_time:
program_finished =
prog_stop_clock.split(':')[1].replace('PM', '').replace('AM', '')
if program_finished == '00':
if program_length == 1197:
prog_width = 691
self.getControl(int(program_id)).setWidth(prog_width)
elif program_length == 691:
print "heloooooooooooooooooooo 1"
prog_width = 342
self.getControl(int(program_id)).setWidth(prog_width)
next_program = int(program_id) + 1
next_program_width = self.getControl(int(next_program)).getWidth()
next_programs = int(next_program) + 1
next_programs_width =
self.getControl(int(next_programs)).getWidth()
previous_program = int(program_id) - 1
next_programs_width = self.getControl(int(previous_program)).getWidth()
getX = self.getControl(int
(program_id)).getX()
getY =
self.getControl(int(program_id)).getY()
progId = list()
posX =
list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 348
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int
(posX), int(posY))
if
next_programs_width == 342:
if next_program_width == 285 and next_programs_width == 57:
next_program_width = 278
self.getControl(int(next_program)).setWidth
(next_program_width)
getY = self.getControl(int(next_program)).getY()
progId = list()
posX = list()
posY = list()
for
elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, next_program in zip(posX, posY, progId):
if int(pos_X) > 952 and int(pos_Y) == getY:
posX = int(pos_X) - 8
posY = int(getY)
self.getControl
(int(next_program)).setPosition(int(posX), int(posY))
elif program_length == 517:
prog_width = 342
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for
elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId
= map(str, progId)
posX = map
(str, posX)
posY = map(str,
posY)
for pos_X, pos_Y, prog_id
in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 181
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif program_length == 798:
prog_width = 342
self.getControl(int(program_id)).setWidth(prog_width)
elif program_finished >= '15' and program_finished <= '17':
if program_length == 691:
print "heloooooooooooooooooooo 2"
prog_width = 342
self.getControl(int(program_id)).setWidth(prog_width)
next_program = int(program_id) + 1
next_program_width = self.getControl(int
(next_program)).getWidth()
next_programs = int(next_program) + 1
next_programs_width = self.getControl(int(next_programs)).getWidth()
previous_program = int(program_id) - 1
next_programs_width = self.getControl(int
(previous_program)).getWidth()
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY =
list()
for elem in
programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId
= map(str, progId)
posX = map
(str, posX)
posY = map(str,
posY)
for pos_X, pos_Y, prog_id
in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 348
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
if next_programs_width == 342:
if next_program_width == 285 and next_programs_width
== 57:
next_program_width = 278
self.getControl(int(next_program)).setWidth(next_program_width)
getY = self.getControl(int(next_program)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, next_program in zip(posX, posY,
progId):
if int
(pos_X) > 952 and int(pos_Y) == getY:
posX = int(pos_X) - 8
posY = int(getY)
self.getControl(int(next_program)).setPosition(int(posX), int(posY))
elif program_finished == '30':
if program_length == 517:
print "change size 2b"
prog_width = 342
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX =
map(str, posX)
posY = map(str,
posY)
for pos_X, pos_Y, prog_id
in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 181
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif program_length == 691:
print "change size 2c"
prog_width = 342
self.getControl(int(program_id)).setWidth(prog_width)
next_program = int(program_id) + 1
next_program_width = self.getControl(int(next_program)).getWidth()
next_programs = int
(next_program) + 1
next_programs_width = self.getControl(int(next_programs)).getWidth()
previous_program = int(program_id) - 1
next_programs_width = self.getControl(int
(previous_program)).getWidth()
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY =
list()
for elem in
programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId
= map(str, progId)
posX = map
(str, posX)
posY = map(str,
posY)
for pos_X, pos_Y, prog_id
in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 348
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
if next_programs_width == 342:
if next_program_width == 285 and next_programs_width
== 57:
next_program_width = 278
self.getControl(int(next_program)).setWidth(next_program_width)
getY = self.getControl(int(next_program)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, next_program in zip(posX, posY,
progId):
if int
(pos_X) > 952 and int(pos_Y) == getY:
posX = int(pos_X) - 8
posY = int(getY)
self.getControl(int(next_program)).setPosition(int(posX), int(posY))
elif epg_time_3:
program_finished = prog_stop_clock.split(':')[1].replace('PM',
'').replace('AM', '')
print
"helllllllllllllllllllllloooooooooooooooooooooooooooooooooooo"
self.getControl(int(program_id)).setVisible(False)
if program_finished == '00':
if program_length == 798:
print "fuck you"
prog_width = 691
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for
elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId
= map(str, progId)
posX = map
(str, posX)
posY = map(str,
posY)
for pos_X, pos_Y, prog_id
in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 113
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif program_finished == '30':
if program_length == 691:
print "hello chris 24"
prog_width = 342
self.getControl(int(program_id)).setWidth(prog_width)
next_program = int(program_id) + 1
next_program_width = self.getControl(int(next_program)).getWidth()
next_programs = int(next_program) + 1
next_programs_width =
self.getControl(int(next_programs)).getWidth()
previous_program = int(program_id) - 1
next_programs_width = self.getControl(int(previous_program)).getWidth()
getX = self.getControl(int
(program_id)).getX()
getY =
self.getControl(int(program_id)).getY()
progId = list()
posX =
list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 348
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int
(posX), int(posY))
if
next_programs_width == 342:
if next_program_width == 285 and next_programs_width == 57:
next_program_width = 278
self.getControl(int
(next_program)).setWidth(next_program_width)
getY = self.getControl(int(next_program)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append
(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, next_program in zip(posX, posY, progId):
if int(pos_X) > 952
and int(pos_Y) == getY:
posX = int(pos_X) - 8
posY = int(getY)
self.getControl(int(next_program)).setPosition(int(posX),
int(posY))
#I DO NEED THIS
elif program_start_time != epg_time_1 and program_stop_time <
epg_time_3:
print "hello chris 2"
if program_start_time < epg_time_1:
program_finished = prog_stop_clock.split(':')[1].replace
('PM', '').replace('AM', '')
if
program_stop_time < epg_time_3:
if
program_finished == '15':
if
program_length == 691:
prog_width = 517
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 178
posY = int(getY)
self.getControl(int
(prog_id)).setPosition(int(posX), int(posY))
elif program_finished == '00':
if program_length == 517:
prog_width = 342
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 174
posY = int(getY)
self.getControl(int
(prog_id)).setPosition(int(posX), int(posY))
elif program_length == 691:
prog_width = 342
self.getControl(int(program_id)).setWidth(prog_width)
next_program = int(program_id) + 1
next_program_width = self.getControl(int(next_program)).getWidth
()
next_programs = int
(next_program) + 1
next_programs_width = self.getControl(int(next_programs)).getWidth()
previous_program = int(program_id) - 1
next_programs_width = self.getControl(int
(previous_program)).getWidth()
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for
elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY =
map(str, posY)
for pos_X,
pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 348
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX),
int(posY))
if
next_programs_width == 342:
if next_program_width == 285 and next_programs_width == 57:
next_program_width = 278
self.getControl(int(next_program)).setWidth
(next_program_width)
getY = self.getControl(int(next_program)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, next_program in zip(posX,
posY, progId):
if int(pos_X) > 952 and int(pos_Y) == getY:
posX = int(pos_X) - 8
posY = int(getY)
self.getControl(int(next_program)).setPosition(int
(posX), int(posY))
elif
program_stop_time < epg_time_2:
if program_finished == '50':
if program_length == 627:
prog_width = 228
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY
()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX,
posY, progId):
if
int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 400
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif program_stop_time == epg_time_2:
program_finished =
prog_stop_clock.split(':')[1].replace('PM', '').replace('AM', '')
print "program_id"
print (program_id)
#error
ValueError: invalid literal for int() with base 10: on line 2274
if program_finished == '00':
current_time = int(time.strftime("%M"))
prog_widths = self.getControl(int(program_id)).getWidth()
print prog_widths
prog_length = int(prog_widths) / 11.4 - current_time
prog_length = str(prog_length)
prog_length = prog_length.replace
('.0', '')
prog_length = int
(prog_length)
for program_time
in program_remaining:
if
int(program_time) <= 30:
if int(current_time) >= 30 and int(current_time) < 59:
if prog_widths == 1026:
prog_widths = 342
self.getControl(int(program_id)).setWidth(prog_widths)
getX =
self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append
(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and
int(pos_Y) == getY:
posX = int(pos_X) - 684
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int
(posY))
elif
prog_width == 1368:
print "let change the size in 90 mins program 1"
prog_width = 342
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl
(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY
())
progId =
map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 1024 and int(pos_Y) == getY:
posX =
int(pos_X) - 1026
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif int(current_time) >= 30 and int
(current_time) < 59:
if prog_width == 1026:
prog_width = 342
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl
(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id
in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 684
posY = int(getY)
self.getControl(int
(prog_id)).setPosition(int(posX), int(posY))
elif prog_width == 1368:
print "let change the size in 90 mins program 1"
prog_width = 342
self.getControl(int
(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in
programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX,
posY, progId):
if int(pos_X) >= 1024 and int(pos_Y) == getY:
posX = int(pos_X) - 1026
posY = int(getY)
self.getControl(int
(prog_id)).setPosition(int(posX), int(posY))
elif program_stop_time < epg_time_2:
print "epg pass 1"
if
program_start_time < epg_time_1:
program_finished = prog_stop_clock.split(':')[1].replace('PM', '').replace('AM', '')
print "epg pass 2"
if program_stop_time < epg_time_2:
print "epg pass 3"
if program_finished == '50':
print "epg pass 4"
if program_length == 691:
print "epg pass 5"
prog_width = 228
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int
(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY,
progId):
if int
(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 463
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX),
int(posY))
elif
program_length == 342:
print "epg pass 6"
prog_width = 228
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int
(program_id)).getY()
progId = list()
posX
= list()
posY =
list()
for elem in
programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int
(pos_Y) == getY:
posX = int(pos_X) - 114
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif program_start_time >= epg_time_2:
program_finished = prog_stop_clock.split(':')[1].replace
('PM', '').replace('AM', '')
if
program_finished == '00':
if
program_length == 1197:
print
"you are working on this now chrissssssssssss 1"
#prog_width = 691
#self.getControl(int(program_id)).setWidth(prog_width)
elif program_length == 570:
print "you are working on this now chrissssssssssss 2"
prog_width = 342
self.getControl(int(program_id)).setWidth(prog_width)
elif program_length == 691:
print "you are working on this now chrissssssssssss 3"
prog_width = 342
self.getControl(int(program_id)).setWidth(prog_width)
self.getControl(int(program_id)).setVisible(False)
elif program_finished == '10':
if program_length == 691:
print "you are working on this now chrissssssssssss 4"
prog_width = 456
self.getControl(int(program_id)).setWidth(prog_width)
elif program_finished == '15':
print "you are working on this now chrissssssssssss 5"
if program_length == 691:
print "you are working on this now
chrissssssssssss 6"
prog_width =
517
self.getControl(int
(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY
= list()
for elem in
programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId
= map(str, progId)
posX = map
(str, posX)
posY = map(str,
posY)
for pos_X, pos_Y, prog_id
in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 174
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif program_length == 517:
prog_width = 171
print "you are working on this now chrissssssssssss 7"
#self.getControl(int(program_id)).setWidth(prog_width)
self.getControl(int
(program_id)).setVisible(False)
elif
program_finished == '20':
if
program_length == 691:
prog_width = 570
print "you are
working on this now chrissssssssssss 8"
self.getControl(int(program_id)).setWidth(prog_width)
elif program_finished == '30':
if program_length == 517:
prog_width = 342
print
"you are working on this now chrissssssssssss 9"
self.getControl(int(program_id)).setWidth(prog_width)
elif program_finished == '50':
if program_length == 627:
prog_width = 285
print "you are working on this now chrissssssssssss 10"
self.getControl(int(program_id)).setWidth(prog_width)
elif program_length == 342:
prog_width = 228
print "you are working on this now chrissssssssssss 11"
self.getControl(int(program_id)).setWidth(prog_width)
elif program_finished == '55':
if program_length == 691:
prog_width = 634
print "you are working on this now chrissssssssssss 12"
self.getControl(int(program_id)).setWidth
(prog_width)
#I DO NEED THIS
elif program_start_time > epg_time_2 and program_stop_time <
epg_time_3:
print "hello chris 3"
if program_length == 517:
print "you are working on this now chrissssssssssss 14"
elif program_length == 691:
print "you are working on this now chrissssssssssss 15"
#prog_width = 342
#self.getControl(int(program_id)).setWidth(prog_width)
#next_program_id = int(program_id) + 1
#getX = self.getControl(int(next_program_id)).getX()
#getY = self.getControl(int(next_program_id)).getY()
#print "getX"
#print getX
#print "getY"
#print getY
elif program_length == 1197:
prog_width = 513
print "you are working on this now chrissssssssssss 16"
#self.getControl(int(program_id)).setWidth(prog_width)
elif program_start_time < epg_time_2 and program_stop_time < epg_time_3:
if program_start_time < epg_time_3:
program_finished = prog_stop_clock.split(':')
[1].replace('PM', '').replace('AM', '')
print "you are working on this now chrissssssssssss 18as"
if program_start_time < epg_time_1:
program_finished = prog_stop_clock.split(':')[1].replace('PM', '').replace('AM',
'')
print "disable the button 2"
self.getControl(int
(program_id)).setVisible(False)
if
program_stop_time == epg_time_2:
if program_finished == '30':
if program_length == 517:
prog_width = 342
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY
()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX,
posY, progId):
if
int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 175
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif program_length == 691:
prog_width = 342
self.getControl(int
(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and
int(pos_Y) == getY:
posX = int(pos_X) - 342
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif program_stop_time > epg_time_1:
print "test it"
#DO NEED IT
elif epg_time_1 > program_start_time and program_stop_time > epg_time_2:
print "you are working on this now chrissssssssssss 26"
if program_start_time < epg_time_1:
if program_stop_time > epg_time_3:
program_finished = prog_stop_clock.split(':')[1].replace
('PM', '').replace('AM', '')
if
program_finished == '00':
print
"you are working on this now chrissssssssssss 30"
#OUTPUT THE PROGRAM REMAINING TIME HERE
current_time = int(time.strftime("%M"))
prog_width = self.getControl(int(program_id)).getWidth()
prog_length = int(prog_width) / 11.4 -
current_time
prog_length = str
(prog_length)
prog_length =
prog_length.replace('.0', '')
prog_length = int(prog_length)
print "prog_length"
print type
(prog_length)
if int
(prog_length) > 60 and int(prog_length) < 120:
print "the program has passed on if statement 1"
if int(current_time) >= 30 and int(current_time) < 59:
if prog_width == 1026:
print "let change the size
in 90 mins program 1"
prog_width = 691
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int
(program_id)).getY()
progId = list()
posX
= list()
posY =
list()
for elem in
programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int
(pos_Y) == getY:
posX = int(pos_X) - 348
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif prog_width == 1368:
print "let change the size in 90
mins program 1"
prog_width = 1026
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int
(program_id)).getY()
progId = list()
posX
= list()
posY =
list()
for elem in
programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int
(pos_Y) == getY:
posX = int(pos_X) - 348
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
#CALCULATE THE CURRENT TIME FROM 12AM TO 6AM
if program_length == 570:
prog_width = 228
self.getControl(int(program_id)).setWidth
(prog_width)
elif program_length
== 912:
prog_width = 570
self.getControl(int
(program_id)).setWidth(prog_width)
elif program_length == 1254:
prog_width = 912
self.getControl(int(program_id)).setWidth(prog_width)
elif program_length == 1596:
prog_width = 1254
self.getControl(int(program_id)).setWidth(prog_width)
elif program_length == 1938:
prog_width = 1596
self.getControl(int(program_id)).setWidth(prog_width)
elif program_finished == '05':
if program_length == 741:
prog_width = 399
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int
(program_id)).getY()
progId
= list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int
(pos_Y) == getY:
posX = int(pos_X) - 348
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif program_stop_time > epg_time_2:
print "you are working on here now 1"
program_finished = prog_stop_clock.split(':')[1].replace('PM',
'').replace('AM', '')
if
program_finished == '15':
if
program_length == 691:
prog_width = 517
self.getControl(int(program_id)).setWidth(prog_width)
next_program = int(program_id) + 1
next_program_width = self.getControl(int(next_program)).getWidth
()
next_programs = int
(next_program) + 1
next_programs_width = self.getControl(int(next_programs)).getWidth()
previous_program = int(program_id) - 1
next_programs_width = self.getControl(int
(previous_program)).getWidth()
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for
elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY =
map(str, posY)
for pos_X,
pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 178
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX),
int(posY))
elif program_stop_time ==
epg_time_3:
program_finished =
prog_stop_clock.split(':')[1].replace('PM', '').replace('AM', '')
print "you are working on here now 2"
if program_finished == '00':
#OUTPUT THE PROGRAM REMAINING TIME HERE
current_time = int(time.strftime("%M"))
prog_width = self.getControl(int(program_id)).getWidth()
prog_length = int(prog_width) /
11.4 - current_time
prog_length
= str(prog_length)
prog_length =
prog_length.replace('.0', '')
prog_length = int(prog_length)
print "current_time"
print
current_time
for program_time in
program_remaining:
if int
(program_time) <= 60:
if
int(current_time) >= 0 and int(current_time) < 30:
if prog_width == 1026:
prog_width = 691
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl
(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY
())
progId =
map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX =
int(pos_X) - 335
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif prog_width == 1368:
print "let change the size in
90 mins program 1"
prog_width = 691
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int
(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id
in zip(posX, posY, progId):
if int(pos_X) >= 1024 and int(pos_Y) == getY:
posX = int(pos_X) - 677
posY = int(getY)
self.getControl(int
(prog_id)).setPosition(int(posX), int(posY))
elif int(current_time) >= 30 and int(current_time) < 59:
if prog_width == 1026:
prog_width = 691
self.getControl(int
(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in
programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX,
posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 335
posY = int(getY)
self.getControl(int
(prog_id)).setPosition(int(posX), int(posY))
elif prog_width == 1368:
print "let change the size in 90 mins program 1"
prog_width = 691
self.getControl(int
(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in
programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX,
posY, progId):
if int(pos_X) >= 1024 and int(pos_Y) == getY:
posX = int(pos_X) - 677
posY = int(getY)
self.getControl(int
(prog_id)).setPosition(int(posX), int(posY))
if program_length
== 798:
prog_width = 691
self.getControl(int
(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY =
map(str, posY)
for pos_X,
pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 107
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX),
int(posY))
elif program_stop_time >
epg_time_1:
print "hey arsehole"
elif program_stop_time > epg_time_3:
program_finished = prog_stop_clock.split(':')[1].replace('PM', '').replace('AM',
'')
if program_finished == '00':
print "you are working on this now
chrissssssssssss 30"
#OUTPUT THE
PROGRAM REMAINING TIME HERE
current_time = int(time.strftime("%M"))
print current_time
prog_width = self.getControl(int(program_id)).getWidth()
prog_length = int(prog_width) / 11.4 - current_time
prog_length = str(prog_length)
prog_length = prog_length.replace('.0', '')
prog_length = int(prog_length)
print "prog_length"
print prog_length
if
int(prog_length) == '90':
if
prog_width == 1368:
print
"let change the size in 90 mins program 1"
elif
program_start_time < epg_time_1 and program_stop_time == epg_time_3:
if program_stop_time == epg_time_3:
program_finished = prog_stop_clock.split(':')[1].replace('PM', '').replace
('AM', '')
print "you are working on
here now 2"
if program_finished == '00':
if program_length == 1197:
prog_width = 691
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int
(program_id)).getX()
getY =
self.getControl(int(program_id)).getY()
progId = list()
posX =
list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 506
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int
(posX), int(posY))
if program_start_time >
epg_time_1 and program_stop_time < epg_time_2:
print "hello chris 4"
if program_length ==
342:
print "you are working on this now
chrissssssssssss 17"
#self.getControl
(int(program_id)).setVisible(False)
#DO NOT
ADD 45 MINS (PROGRAM_LENGTH = 517) AS IT IS NO NEEDED
elif program_length == 691:
print "you are working on this now chrissssssssssss 19"
#prog_width = 342
#self.getControl(int(program_id)).setWidth(prog_width)
#next_program_id = int(program_id) + 1
#getX = self.getControl(int(next_program_id)).getX()
#getY = self.getControl(int(next_program_id)).getY()
#print "getX"
#print getX
#print "getY"
#print getY
elif program_length == 517:
print "you are working on this now chrissssssssssss 20"
#prog_width = 171
#self.getControl(int(program_id)).setWidth(prog_width)
self.getControl(int(program_id)).setVisible(False)
elif program_start_time < epg_time_1 and program_stop_time < epg_time_2:
print "epg time 7"
if program_stop_time < epg_time_2:
program_finished = prog_stop_clock.split(':')[1].replace('PM', '').replace('AM', '')
if program_finished == '20':
print "epg pass 8"
if program_length == 691:
print "epg pass 9"
prog_width = 228
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for
elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId
= map(str, progId)
posX = map
(str, posX)
posY = map(str,
posY)
for pos_X, pos_Y, prog_id
in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 463
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif program_length == 342:
print "epg pass 10"
prog_width = 228
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId
= map(str, progId)
posX = map
(str, posX)
posY = map(str,
posY)
for pos_X, pos_Y, prog_id
in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 114
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif program_start_time < epg_time_1 and program_stop_time >
epg_time_2:
if program_stop_time >
epg_time_2:
program_finished =
prog_stop_clock.split(':')[1].replace('PM', '').replace('AM', '')
if program_finished == '50':
if program_length == 691:
print "helooooooooooooooooooooooooooooo chris"
prog_width = 577
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId
= map(str, progId)
posX = map
(str, posX)
posY = map(str,
posY)
for pos_X, pos_Y, prog_id
in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 114
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
#DO NEED IT
if
program_start_time > epg_time_1 and program_stop_time > epg_time_3:
if program_start_time < epg_time_2:
program_finished = prog_stop_clock.split(':')[1].replace('PM', '').replace
('AM', '')
#NEED TO FIND OUT WHAT TIME
if program_finished == '??':
if
program_length == 1368:
prog_width =
1026
self.getControl(int
(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X,
pos_Y, prog_id in zip(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 348
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
self.getControl(int(prog_id)).setPosition(int(posX),
int(posY))
#DO NEED IT
if epg_time_1 == program_start_time and program_stop_time > epg_time_3:
print "you are working on this now a!"
if program_stop_time > epg_time_3:
program_finished = prog_stop_clock.split(':')[1].replace('PM', '').replace('AM', '')
print "you are working on this now b!"
if program_finished == '00':
print "you are working on this now chrissssssssssss 30"
#OUTPUT THE PROGRAM REMAINING TIME HERE
current_time = int(time.strftime("%M"))
prog_width = self.getControl(int
(program_id)).getWidth()
prog_length
= int(prog_width) / 11.4 - current_time
prog_length = str(prog_length)
prog_length = prog_length.replace('.0', '')
prog_length = int(prog_length)
print "prog_length"
print
prog_length
for program_time in
program_remaining:
if int
(program_time) <= 60:
if
int(prog_length) > 60 and int(prog_length) < 120:
if int(current_time) >= 30 and int(current_time) < 59:
if prog_width == 1026:
print "let change
the size in 90 mins program 1"
prog_width = 691
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int
(program_id)).getX()
getY = self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip
(posX, posY, progId):
if int(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 348
posY = int(getY)
self.getControl(int(prog_id)).setPosition(int(posX), int(posY))
elif prog_width == 1368:
print "let change the size in 90 mins
program 1"
prog_width = 1026
self.getControl(int(program_id)).setWidth(prog_width)
getX = self.getControl(int(program_id)).getX()
getY =
self.getControl(int(program_id)).getY()
progId = list()
posX = list()
posY = list()
for elem in programs_button:
progId.append(elem.getId())
posX.append(elem.getX())
posY.append(elem.getY())
progId = map(str, progId)
posX = map(str, posX)
posY = map(str, posY)
for pos_X, pos_Y, prog_id in zip(posX, posY, progId):
if int
(pos_X) >= 724 and int(pos_Y) == getY:
posX = int(pos_X) - 348
posY = int(getY)
self.getControl(int
(prog_id)).setPosition(int(posX), int(posY)) | [
"kcb0126@outlook.com"
] | kcb0126@outlook.com |
1181b1fcaa5e87e38ec926ff5331b78ffa583ba7 | ffed44c171deea0d5ef2af7d0ddcdf7e6733f32a | /sale_product_smart_buttons/models/__init__.py | daba963f5fbf01121d9e6ab8c4911c1345081c49 | [] | no_license | Chandresh-SerpentCS/SerpentCS_Contributions | bec39730be5a886a7aa59282b3cf1cc894a67b65 | 9a03deb2b43fdb3313bb96f435082a8bb81e310d | refs/heads/10.0 | 2022-03-02T02:03:05.117022 | 2020-07-28T05:47:11 | 2020-07-28T05:47:11 | 85,973,203 | 3 | 1 | null | 2017-03-23T16:38:20 | 2017-03-23T16:38:20 | null | UTF-8 | Python | false | false | 108 | py | # -*- coding: utf-8 -*-
# See LICENSE file for full copyright and licensing details.
from . import product
| [
"jay.vora@serpentcs.com"
] | jay.vora@serpentcs.com |
b3371a9161743e81a68b2a0fb27d8d0fa7865ed3 | 569b47e199981f35477f6de53fb64413653d5997 | /show devices.py | ae9752e6b0f7d50a3dd40ab7e55750ca62a86ea6 | [] | no_license | joosthoi1/spotify-thing | 078bdb894e79afe6a5c00cb7c8bdb69c2bd9a8de | 12dacaa307ec7b92bfef71c047f84d2ff40ed7f1 | refs/heads/master | 2020-06-26T04:16:44.956477 | 2019-08-01T12:48:04 | 2019-08-01T12:48:04 | 199,526,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,745 | py | import tkinter as tk
import spotify_wrapper
import json
import requests
import time
class main:
def __init__(self):
self.root = tk.Tk()
with open("config.json", 'r') as file:
config = json.loads(file.read())
username = config['username']
client_id = config['client_id']
client_secret = config['client_secret']
scope = 'user-library-read user-modify-playback-state user-read-playback-state'
s =spotify_wrapper.Spotify()
self.token = s.get_token(
username = username,
client_id=client_id,
client_secret=client_secret,
scope = scope,
redirect_uri='http://google.com/'
)
self.headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.token}',
}
self.ids = []
while True:
response = requests.get(
"https://api.spotify.com/v1/me/player/devices",
headers=self.headers
)
if response.status_code == 429:
time.sleep(1)
else:
devices = response.json()['devices']
for i in devices:
if 'id' in i:
if i['id'] not in self.ids:
self.ids.append(i['id'])
tk.Button(
self.root,
text=f"{i['name']} - {i['type']}",
width=50
).pack(side='top',anchor='nw')
self.root.update()
if __name__ == "__main__":
main()
| [
"joosthoi1@openviza.com"
] | joosthoi1@openviza.com |
9e94751b6f70c73ed790cef4cef4bfb8083f9ffd | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_traipsed.py | f59c7ae5d2434f5d2f1133296a72f7b2307b4aa4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _TRAIPSED():
def __init__(self,):
self.name = "TRAIPSED"
self.definitions = traipse
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['traipse']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
a8de915bd630c78b7ed9df7605161ba20b2c4146 | c46eaf84859f830e63ac09d87870ad1aefc4303a | /Python_Rust_Module/fibrs/setup.py | 72e94b2f54b65c1ed6e732dcab7911459c245054 | [
"MIT"
] | permissive | glella/fib | 7a2b21d6607e50f1916ee4a34baf463c130554ea | 440bdb4da5c4ecb06cabeb4b09770a69e204114a | refs/heads/main | 2023-04-01T15:51:25.049402 | 2021-04-13T23:58:58 | 2021-04-13T23:58:58 | 349,891,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | import sys
from setuptools import setup
try:
from setuptools_rust import Binding, RustExtension
except ImportError:
import subprocess
errno = subprocess.call(
[sys.executable, '-m', 'pip', 'install', 'setuptools-rust'])
if errno:
print("Please install setuptools-rust package")
raise SystemExit(errno)
else:
from setuptools_rust import Binding, RustExtension
setup_requires = ['setuptools-rust>=0.9.2']
install_requires = []
setup(
name='fibrs',
version='0.1',
classifiers=[
'License :: OSI Approved :: MIT License',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Rust',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
],
rust_extensions=[
RustExtension('fibrs.fibrs', binding=Binding.PyO3)],
packages=['fibrs'],
zip_safe=False,
)
| [
"arkorott@gmail.com"
] | arkorott@gmail.com |
a8d1c3855133be357e3ac72d35616e8b7fc0d18b | ce07ccf78739a768971f393222fdca4a56315241 | /employee_management/employee_management/doctype/ord/ord.py | 5cfedfcf1c3f5f11200b1042214ecfbf25a91a73 | [
"MIT"
] | permissive | Gdinesh03/Frappe | 563e0ddbe925be536f65f925787ed321a6098c0d | efd2d1568b6f5b8a4e0ff31e06a415c717a3d32a | refs/heads/master | 2023-08-27T19:24:12.024442 | 2021-09-14T07:04:27 | 2021-09-14T07:04:27 | 406,260,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Gopi and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class Ord(Document):
def validate(self):
total = 0
for d in self.get('product_details'):
total += int(d.product_price)
self.total_amount = total
# self.total = mow
# @frappe.whitelist()
# def get_pro(orderb):
# source = frappe.db.sql(''' select * from `tabOrderb` where name = %s''',orderb,as_dict=1)
# for i in source:
# # frappe.log_error(i,"kk")
# sam = frappe.db.sql(''' select product_total from `tabProductdetb` where parent = %s''',i.name,as_dict=1)
# for d in sam:
# mow = sum(float(d.product_total) for d in sam)
# return mow
| [
"vivekananthan112599@gmail.com"
] | vivekananthan112599@gmail.com |
2f9d9d0a5f33a0f8e9805fc11884091fcaef038d | 9015783bae7e68571fd349d59a0e7b2c54c5a4a8 | /Factory Method/Bin.py | e143263d0c4e44d4540eb27010b7bda28b4ffdf3 | [] | no_license | Bujno/Design-patterns | 0079fbee05983d099963ccf33996892910f1e1e8 | af9b7ae0ed2a2152b6deed5b59d59369108f7f51 | refs/heads/main | 2023-07-04T13:23:00.178447 | 2021-08-10T17:11:28 | 2021-08-10T17:11:28 | 393,768,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | from Sweet import Sweet
class GlassBin:
def __init__(self, sweet_type: Sweet):
self.sweet_type = sweet_type
self.limit = 100
self.minimum = 10
self.set_of_sweets = {self.sweet_type.create_sweet() for _ in range(10)}
def restock(self):
if len(self.set_of_sweets) >= self.minimum:
return
self.set_of_sweets.add(self.sweet_type.create_sweet())
def get_sweet(self):
return self.set_of_sweets.pop() | [
"0k.bujnowicz@gmail.com"
] | 0k.bujnowicz@gmail.com |
165fe338a7a24e1cc0a569fbbd334ca25723fb18 | f6679cc558ed0f1610745d8883e37cd5fd010810 | /optmlstat/ml/modeling/bayesian_least_squares_base.py | a4cf61cd1e0e2b04a587eb2fa9a5bf1b9ab47591 | [
"MIT"
] | permissive | sungheeyun/optmlstat | af984c5e1e270430b1b8fac88ccf341ec9313489 | ba89796d26eefe08b497eaf401b780d68094d128 | refs/heads/master | 2023-07-28T03:16:08.956797 | 2023-07-18T00:13:11 | 2023-07-18T00:13:11 | 234,828,129 | 4 | 1 | MIT | 2022-07-20T04:04:46 | 2020-01-19T02:28:46 | Python | UTF-8 | Python | false | false | 2,158 | py | from typing import Tuple
import abc
import numpy as np
from stats.dists.gaussian import Gaussian
from functions.function_base import FunctionBase
from ml.modeling.bayesian_modeler_base import BayesianModelerBase
class BayesianLeastSquaresBase(BayesianModelerBase):
def get_predictor(self) -> FunctionBase:
assert False
@abc.abstractmethod
def get_predictive_dist(
self, x_array_1d: np.ndarray
) -> Tuple[float, float]:
pass
@abc.abstractmethod
def get_prior(self) -> Gaussian:
pass
@classmethod
def solve_linear_sys_using_lower_tri_from_chol_fac(
cls, lower_tri: np.ndarray, y_array_1d: np.ndarray
) -> np.ndarray:
z_array_1d: np.ndarray = cls.forward_substitution(
lower_tri, y_array_1d
)
x_array_1d: np.ndarray = cls.backward_substitution(
lower_tri.T, z_array_1d
)
return x_array_1d
@classmethod
def forward_substitution(
cls, lower_tri: np.ndarray, y_array_1d: np.ndarray
) -> np.ndarray:
vec_size: int = y_array_1d.size
assert lower_tri.shape == (vec_size, vec_size), (
lower_tri.shape,
y_array_1d.shape,
)
x_array_1d: np.ndarray = np.ndarray(shape=(vec_size,), dtype=float)
for idx in range(vec_size):
x_array_1d[idx] = (
y_array_1d[idx]
- np.dot(lower_tri[idx, :idx], x_array_1d[:idx])
) / lower_tri[idx, idx]
return x_array_1d
@classmethod
def backward_substitution(
cls, upper_tri: np.ndarray, y_array_1d: np.ndarray
) -> np.ndarray:
vec_size: int = y_array_1d.size
assert upper_tri.shape == (vec_size, vec_size), (
upper_tri.shape,
y_array_1d.shape,
)
x_array_1d: np.ndarray = np.ndarray(shape=(vec_size,), dtype=float)
for idx in range(vec_size - 1, -1, -1):
x_array_1d[idx] = (
y_array_1d[idx]
- np.dot(upper_tri[idx, idx + 1 :], x_array_1d[idx + 1 :])
) / upper_tri[idx, idx]
return x_array_1d
| [
"sunghee.yun@gmail.com"
] | sunghee.yun@gmail.com |
6d2bcb830f1e1bd1d6ad5e6a33c2a240bceaeb70 | 7606590d781a134cb1134fcf222f3eee6ce19219 | /contours.py | 7e06f119536b0f48d706000474ee09cedeb6ca49 | [] | no_license | lera000/use_openCV | ccca755938a4ce2feeb30c041d326130dad7afbe | b504f807de5380b91d89d1b9235ad48c559a838e | refs/heads/main | 2023-03-27T09:13:34.643495 | 2021-03-26T08:28:03 | 2021-03-26T08:28:03 | 351,703,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | import cv2
import numpy as np
img = cv2.imread('D:/rec.jpg', cv2.IMREAD_UNCHANGED)
# изменение цвета на серый
img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = 100
ret, thresh_img = cv2.threshold(img_grey, thresh, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# создание пустого изображения для контуров
img_contours = np.zeros(img.shape)
# рисовка контуров на пустом изображении
cv2.drawContours(img_contours, contours, 0, (0, 255, 0), 3)
cv2.imwrite('D:/rec1.png', img_contours)
| [
"noreply@github.com"
] | noreply@github.com |
b502ac7418a2e4602dfb5cab8106552da7e0256a | 43667bd0af1086bc4a03fa82043dce0c448ab331 | /Tests.py | 8de830c641171728751e69c0245cad059b804d9e | [] | no_license | Dineshsgit/ea_transaction_data_analysis | 7a74f0dd476ecc42330686d8acb5b1074ce51bc8 | 08000e68c9717334c3214ad37c3e4e7f8694fb26 | refs/heads/main | 2023-05-10T15:26:38.707483 | 2021-06-16T02:35:08 | 2021-06-16T02:35:08 | 377,102,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | import unittest
import logging
import pandas as pd
import os, os.path
from Main import read_input_file, generate_hash, split_dataframe, find_top_suburbs, find_top_agents_by_suburb, get_config
class ETLTest(unittest.TestCase):
def test_split_dataframe(self):
data = {'Product': ['Desktop Computer', 'Tablet', 'Printer', 'Laptop', 'super Computer',
'mobile phone', 'tablet', 'smart watch', 'ear phones'],
'Price': [850, 200, 150, 1300, 10000, 1000, 1200, 400, 50]
}
df = pd.DataFrame(data, columns=['Product', 'Price'])
split_dataframe(df, 'test_output/', 2)
path = os.path.dirname(os.path.realpath(__file__)) + '/' + 'test_output/'
self.assertEqual(len(os.listdir(path)), 5)
if __name__ == '__main__':
unittest.main()
| [
"dineshveluri@qantasloyalty.com"
] | dineshveluri@qantasloyalty.com |
edde427c7169f9711b7de62ead15d113ad9ef1cc | ab52c09c428f73ad0a43a112a68dc7fe71fdacaf | /CSVfileUpload/CSVapp/forms.py | 5a9cf36f79baf2a2f177d593dc008c4ff8ac0b5e | [] | no_license | Raarav/web-engineering | cebb42ee2b25eb46eda0079402049a476cfbd1e9 | 330aacb51ca492318092824701fabdaf5a450546 | refs/heads/master | 2020-04-28T09:25:41.430471 | 2019-07-28T07:32:54 | 2019-07-28T07:32:54 | 175,166,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | from django import forms
from CSVapp.models import form
class EventsForm(forms.ModelForm):
class Meta:
model = form
fields = "__all__" | [
"33652351+Raarav@users.noreply.github.com"
] | 33652351+Raarav@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.