blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d2d41ab5c8dcaa1b0c5a061f0fe767f4d00b5703 | 4b7d5c8824df4462a338993efcdfa3b17199ff5b | /基础/day1/guessage_while.py | f90a4e7c568144a273ae7dc8e8d360cfb0196b82 | [] | no_license | kobe24shou/python | 9c287babfb357e7f650fab453f3e60614b7a71fc | f78f147101f182207a69f0dc8e1595b54280164a | refs/heads/master | 2021-06-02T12:40:59.424542 | 2020-06-28T06:13:51 | 2020-06-28T06:13:51 | 101,620,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | #!/usr/bin/env python
# -*-coding:utf-8-*-
# Author:ls
# aishou24@gmail.com
age = 50
flag = True
while flag:
user_input_age = int(input("Age is :"))
if user_input_age == age:
print("Yes")
flag = False
elif user_input_age > age:
print("Is bigger")
else:
print("Is smaller")
print("End")
print("------------------breake 版本------------------------")
#break # 终止
age1 = 50
# flag = True
# break
while True:
user_input_age = int(input("Age is :"))
if user_input_age == age1:
print("Yes")
break
elif user_input_age > age1:
print("Is bigger")
else:
print("Is smaller")
| [
"aishou24@gmail.com"
] | aishou24@gmail.com |
f05d91a779158e39e065a91876b3ee0594373239 | 6e9c9128054da7eea28a4627381df28f95416ee5 | /finance_ml/labeling/betsides.py | b3df71764d3b3a2d8c9c13dde03c34083689205f | [
"MIT"
] | permissive | BTCTON/finance_ml | c5a4ad2486608ad19c92c04c70fe513be135c236 | a585be2d04db5a749eb6b39b7336e5aeb30d6327 | refs/heads/master | 2021-12-23T07:53:13.791609 | 2021-10-15T01:47:41 | 2021-10-15T01:47:41 | 158,898,508 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,925 | py | import numbers
import pandas as pd
import numpy as np
import multiprocessing as mp
from ..multiprocessing import mp_pandas_obj
def _cusum_side(diff, h, k=0, molecule=None):
side = []
s_pos, s_neg = 0, 0
timestamps = []
th = None
for t in molecule:
if th is None:
th = h.loc[t]
s_pos = max(0, s_pos + diff.loc[t] - k)
s_neg = min(0, s_neg + diff.loc[t] + k)
if s_pos > th:
s_pos = 0
timestamps.append(t)
th = h.loc[t]
side.append(1)
elif s_neg < -th:
s_neg = 0
timestamps.append(t)
th = h.loc[t]
side.append(-1)
side = pd.Series(side, index=pd.DatetimeIndex(timestamps))
return side
def cusum_side(close, h, k=0, use_log=True, num_threads=None):
"""Sample points with CUSUM Filter and use its direction as betting side
Args:
close (pd.Series): Price series
h (float or pd.Series): Threasholds to sampmle points.\
If specified with float, translate to pd.Series(h, index=close.index)
k (float, optional): Minimum speed parameter to hit threashold.\
Defaults to 0, which means inactive
Returns:
pd.Series: Betting sides at sampled points
"""
if num_threads is None:
num_threads = mp.cpu_count()
# asssum that E y_t = y_{t-1}
side = []
s_pos, s_neg = 0, 0
if use_log:
diff = np.log(close).diff().dropna()
else:
diff = close.diff().dropna()
# time variant threshold
if isinstance(h, numbers.Number):
h = pd.Series(h, index=diff.index)
h = h.reindex(diff.index, method='bfill')
h = h.dropna()
side = mp_pandas_obj(func=_cusum_side,
pd_obj=('molecule', h.index),
num_threads=num_threads,
diff=diff, h=h, k=k)
return side | [
"f.j.akimoto@gmail.com"
] | f.j.akimoto@gmail.com |
e4a5581eacba722b9bd59eaf6b2c79e06c407dd6 | 955f9d3fb34af54de2f046d17bbac11c1474819e | /abc111/b.py | 806ef54082f0f620dd15f1c7e64280e5d3c590c3 | [] | no_license | shimewtr/AtCoderPracticePython | 5bb4c28119fced2d111bd1810e0e290f25b6a191 | f3c22ec1f7a36a27848070c5c6ca4e1717b04ac6 | refs/heads/master | 2023-01-12T17:28:44.770138 | 2020-11-19T22:50:22 | 2020-11-19T22:50:22 | 204,830,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
n = int(input())
for i in range(n, 1000):
s = str(i)
if s[0] == s[1] == s[2]:
print(i)
break
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """111"""
output = """111"""
self.assertIO(input, output)
def test_input_2(self):
print("test_input_2")
input = """112"""
output = """222"""
self.assertIO(input, output)
def test_input_3(self):
print("test_input_3")
input = """750"""
output = """777"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
| [
"wawawatataru@gmail.com"
] | wawawatataru@gmail.com |
167b95dc17b9c8dee4e07a8205bc2fafd07bd0d8 | 707054dbae74908940b72a462553dda70b97d7d2 | /home/models.py | 6f0c7933685422f0cfa0ec268884ce9f6b0d648d | [] | no_license | nghiatd16/spoj_tournament | ea6b59d6efd0f10fd1993c2252f8afe3b3ffb685 | 21f79224059fbeb84907db7ddc9c050c8da307a8 | refs/heads/master | 2020-04-19T22:32:36.998098 | 2019-02-03T13:25:59 | 2019-02-03T13:25:59 | 168,471,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,485 | py | from django.db import models
import numpy as np
import math
import time
# Create your models here.
class Member(models.Model):
full_name = models.CharField(max_length=100)
username = models.CharField(max_length=20)
grade = models.CharField(max_length=5)
num_solved = models.IntegerField()
score = models.FloatField(default=0)
target = models.IntegerField(default=1)
lst_solved = models.CharField(max_length=15000)
lastrank = models.IntegerField(default=1)
def parse_lst_solved(self, col_per_row=4):
lst_str_solved = self.lst_solved.__str__().split(' ')
return Member.reshape_list(lst_str_solved, col_per_row=col_per_row)
def get_list_solved(self):
return self.lst_solved.__str__().strip().split(' ')
def get_set_solved(self):
return set(self.lst_solved.__str__().strip().split(' '))
def get_list_exclude(self, other):
self_set_str_solved = set(self.lst_solved.__str__().split(' '))
other_set_str_solved = set(other.lst_solved.__str__().split(' '))
self_res = []
other_res = []
for ele in self_set_str_solved:
if ele not in other_set_str_solved:
self_res.append(ele)
for ele in other_set_str_solved:
if ele not in self_set_str_solved:
other_res.append(ele)
return (self_res, other_res)
@staticmethod
def reshape_list(lst_str_solved, col_per_row=3):
num_row = math.ceil(len(lst_str_solved)/col_per_row)
res = []
c_id = 0
for i in range(num_row):
tmp = []
for j in range(col_per_row):
tmp.append((lst_str_solved[c_id], "https://www.spoj.com/PTIT/problems/{}/".format(lst_str_solved[c_id])))
if c_id == len(lst_str_solved)-1 :
break
c_id += 1
res.append(tmp)
return res
def __str__(self):
return "[{} - {} - {}]".format(self.full_name, self.num_solved, self.target)
def __eq__(self, other):
if self.score == other.score and self.num_solved == other.num_solved and self.username == other.username:
return True
return False
def __gt__(self, other):
if self.score != other.score:
return self.score > other.score
if self.num_solved != num_solved:
return self.num_solved > other.num_solved
return self.username > other.username
def __lt__(self, other):
if self.score != other.score:
return self.score < other.score
if self.num_solved != num_solved:
return self.num_solved < other.num_solved
return self.username < other.username
class Topic(models.Model):
url = models.CharField(max_length=100)
name = models.CharField(max_length=20)
def __str__(self):
return "Topic[name:{} - url:{}]".format(self.name, self.url)
def get_arr_name_columns(self):
return ["name", "url"]
def get_name_columns(self):
arr = self.get_arr_name_columns()
rs = ""
for i in range(len(arr)):
rs += arr[i]
if i != len(arr)-1:
rs += ", "
return rs
def get_refer(self):
return "%s, %s"
def get_value(self):
return (self.name, self.url)
class Problem(models.Model):
code = models.CharField(max_length=15, primary_key=True)
score = models.FloatField() | [
"nghiatd.proptit@gmail.com"
] | nghiatd.proptit@gmail.com |
bffb107f57f0d36dc20178f29c6dc99e51e19baf | 2ea49bfaa6bc1b9301b025c5b2ca6fde7e5bb9df | /contributions/IlyaGusev/Python/Data Structures/2016-10-22.py | 75d1b2c5f1aa536abbd5ffc5220a703ae5c4ac1f | [] | no_license | 0x8801/commit | 18f25a9449f162ee92945b42b93700e12fd4fd77 | e7692808585bc7e9726f61f7f6baf43dc83e28ac | refs/heads/master | 2021-10-13T08:04:48.200662 | 2016-12-20T01:59:47 | 2016-12-20T01:59:47 | 76,935,980 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | Following PEP 8 styling guideline.
`bytes` type
Get the most of `int`s
There is more to copying
Implementing **weak** references | [
"phoenixilya@gmail.com"
] | phoenixilya@gmail.com |
a53f15b3da988af03b2566c0cf91fc45e52d5bf2 | 27b86f422246a78704e0e84983b2630533a47db6 | /src/ezdxf/acis/const.py | 7be54fa05ffa8b157ce4698f620934753fe88c3e | [
"MIT"
] | permissive | mozman/ezdxf | 7512decd600896960660f0f580cab815bf0d7a51 | ba6ab0264dcb6833173042a37b1b5ae878d75113 | refs/heads/master | 2023-09-01T11:55:13.462105 | 2023-08-15T11:50:05 | 2023-08-15T12:00:04 | 79,697,117 | 750 | 194 | MIT | 2023-09-14T09:40:41 | 2017-01-22T05:55:55 | Python | UTF-8 | Python | false | false | 5,301 | py | # Copyright (c) 2022, Manfred Moitzi
# License: MIT License
import enum
from ezdxf.version import __version__
# SAT Export Requirements for Autodesk Products
# ---------------------------------------------
# Script to create test files:
# examples/acistools/create_3dsolid_cube.py
# DXF R2000, R2004, R2007, R2010: OK, tested with TrueView 2022
# ACIS version 700
# ACIS version string: "ACIS 32.0 NT"
# record count: 0, not required
# body count: 1, required
# ASM header: no
# end-marker: "End-of-ACIS-data"
# DXF R2004, R2007, R2010: OK, tested with TrueView 2022
# ACIS version 20800
# ACIS version string: "ACIS 208.00 NT"
# record count: 0, not required
# body count: n + 1 (asm-header), required
# ASM header: "208.0.4.7009"
# end-marker: "End-of-ACIS-data"
# SAB Export Requirements for Autodesk Products
# ---------------------------------------------
# DXF R2013, R2018: OK, tested with TrueView 2022
# ACIS version 21800
# ACIS version string: "ACIS 208.00 NT"
# record count: 0, not required
# body count: n + 1 (asm-header), required
# ASM header: "208.0.4.7009"
# end-marker: "End-of-ASM-data"
ACIS_VERSION = {
400: "ACIS 4.00 NT", # DXF R2000, no asm header - only R2000
700: "ACIS 32.0 NT", # DXF R2000-R2010, no asm header
20800: "ACIS 208.00 NT", # DXF R2013 with asm-header, asm-end-marker
21800: "ACIS 218.00 NT", # DXF R2013 with asm-header, asm-end-marker
22300: "ACIS 223.00 NT", # DXF R2018 with asm-header, asm-end-marker
}
ASM_VERSION = {
20800: "208.0.4.7009", # DXF R2004, R2007, R2010
21800: "208.0.4.7009", # DXF R2013, default version for R2013 and R2018
22300: "222.0.0.1700", # DXF R2018
}
EZDXF_BUILDER_ID = f"ezdxf v{__version__} ACIS Builder"
MIN_EXPORT_VERSION = 700
# ACIS version 700 is the default version for DXF R2000, R2004, R2007 and R2010 (SAT)
# ACIS version 21800 is the default version for DXF R2013 and R2018 (SAB)
DEFAULT_SAT_VERSION = 700
DEFAULT_SAB_VERSION = 21800
DATE_FMT = "%a %b %d %H:%M:%S %Y"
END_OF_ACIS_DATA_SAT = "End-of-ACIS-data"
END_OF_ACIS_DATA_SAB = b"\x0e\x03End\x0e\x02of\x0e\x04ACIS\x0d\x04data"
END_OF_ASM_DATA_SAT = "End-of-ASM-data"
END_OF_ASM_DATA_SAB = b"\x0e\x03End\x0e\x02of\x0e\x03ASM\x0d\x04data"
BEGIN_OF_ACIS_HISTORY_DATA = "Begin-of-ACIS-History-data"
END_OF_ACIS_HISTORY_DATA = "End-of-ACIS-History-data"
DATA_END_MARKERS = (
END_OF_ACIS_DATA_SAT,
BEGIN_OF_ACIS_HISTORY_DATA,
END_OF_ASM_DATA_SAT,
)
NULL_PTR_NAME = "null-ptr"
NONE_ENTITY_NAME = "none-entity"
NOR_TOL = 1e-10
RES_TOL = 9.9999999999999995e-7
BOOL_SPECIFIER = {
"forward": True,
"forward_v": True,
"reversed": False,
"reversed_v": False,
"single": True,
"double": False,
}
ACIS_SIGNATURE = b"ACIS BinaryFile" # DXF R2013/R2018
ASM_SIGNATURE = b"ASM BinaryFile4" # DXF R2018
SIGNATURES = [ACIS_SIGNATURE, ASM_SIGNATURE]
def is_valid_export_version(version: int):
return version >= MIN_EXPORT_VERSION and version in ACIS_VERSION
class Tags(enum.IntEnum):
NO_TYPE = 0x00
BYTE = 0x01 # not used in files!
CHAR = 0x02 # not used in files!
SHORT = 0x03 # not used in files!
INT = 0x04 # 32-bit signed integer
FLOAT = 0x05 # not used in files!
DOUBLE = 0x06 # 64-bit double precision floating point value
STR = 0x07 # count is the following 8-bit uchar
STR2 = 0x08 # not used in files!
STR3 = 0x09 # not used in files!
# bool value for reversed, double, I - depends on context
BOOL_TRUE = 0x0A
# bool value forward, single, forward_v - depends on context
BOOL_FALSE = 0x0B
POINTER = 0x0C
ENTITY_TYPE = 0x0D
ENTITY_TYPE_EX = 0x0E
SUBTYPE_START = 0x0F
SUBTYPE_END = 0x10
RECORD_END = 0x11
LITERAL_STR = 0x12 # count ia a 32-bit uint, see transform entity
LOCATION_VEC = 0x13 # vector (3 doubles)
DIRECTION_VEC = 0x14 # vector (3 doubles)
# Enumeration are stored as strings in SAT and ints in SAB.
# It's not possible to translate SAT enums (strings) to SAB enums (int) and
# vice versa without knowing the implementation details. Each enumeration
# is specific to the class where it is used.
ENUM = 0x15
# 0x16: ???
UNKNOWN_0x17 = 0x17 # double
# entity type structure:
# 0x0D 0x04 (char count of) "body" = SAT "body"
# 0x0E 0x05 "plane" 0x0D 0x07 "surface" = SAT "plane-surface"
# 0x0E 0x06 "ref_vt" 0x0E 0x03 "eye" 0x0D 0x06 "attrib" = SAT "ref_vt-eye-attrib"
class Flags(enum.IntFlag):
HAS_HISTORY = 1
class AcisException(Exception):
pass
class InvalidLinkStructure(AcisException):
pass
class ParsingError(AcisException):
pass
class ExportError(AcisException):
pass
class EndOfAcisData(AcisException):
pass
class Features:
LAW_SPL = 400
CONE_SCALING = 400
LOFT_LAW = 400
REF_MIN_UV_GRID = 400
VBLEND_AUTO = 400
BL_ENV_SF = 400
ELLIPSE_OFFSET = 500
TOL_MODELING = 500
APPROX_SUMMARY = 500
TAPER_SCALING = 500
LAZY_B_SPLINE = 500
DM_MULTI_SURF = 500
GA_COPY_ACTION = 600
DM_MULTI_SURF_COLOR = 600
RECAL_SKIN_ERROR = 520
TAPER_U_RULED = 600
DM_60 = 600
LOFT_PCURVE = 600
EELIST_OWNER = 600
ANNO_HOOKED = 700
PATTERN = 700
ENTITY_TAGS = 700
AT = 700
NET_LAW = 700
STRINGLESS_HISTORY = 700
| [
"me@mozman.at"
] | me@mozman.at |
ccb13e3581e5f4fcdbefec32265612838a553659 | b119f7e1f21510928e59cd5b6f16d284d6f868a3 | /djangodocker/djangodocker/urls.py | fbdfd995b634c92a1b518cdfb6644b77ba0590d2 | [] | no_license | a-bautista/Django_Tutorials | 6433d0ee2e9f2cff80ac4f84af150bfa6011de25 | 7fdffc32ac1dcf0e2a2f88d265d8d0265a267b53 | refs/heads/master | 2022-12-13T12:07:30.113489 | 2019-12-13T05:31:31 | 2019-12-13T05:31:31 | 132,667,576 | 0 | 0 | null | 2022-12-08T06:35:51 | 2018-05-08T21:32:11 | Python | UTF-8 | Python | false | false | 1,041 | py | """djangodocker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from .views import (home, about)
# connect this application with the dashboard application by including the task.urls
# you get access to the other apps by typing 127.0.0.1:8000/task/create
urlpatterns = [
path('', home),
path('admin/', admin.site.urls),
path('about/', about),
path('task/', include('task.urls'))
]
| [
"alex.bautista.ramos.90@gmail.com"
] | alex.bautista.ramos.90@gmail.com |
ed240718c67426f61c98d597d6846f52ef4543b3 | 82f1c3338ee636ee08ec0009c413b40c495f5c95 | /core/settings/base.py | a8ae00dab226edfffa9ed5a7c2045abf9dd08bf7 | [] | no_license | DevHerles/rest | 6be3714ff43d398aedb9dcf1194b3659a38598aa | a723095d77a454c7259871b3ee980f6c3c40ecc6 | refs/heads/main | 2023-05-30T03:02:48.522541 | 2021-06-04T21:55:08 | 2021-06-04T21:55:08 | 362,266,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,661 | py | from pathlib import Path
from datetime import timedelta
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e+3qek7(i5evq)87ff5d8e@bjsd&q_h)w5qejoojqhhx%$4j+h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
BASE_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
LOCAL_APPS = [
'apps.base',
'apps.common',
'apps.organs',
'apps.work_types',
'apps.organic_units',
'apps.settings',
'apps.users',
'apps.partners',
'apps.healths',
'apps.symptoms',
]
THIRD_APPS = [
'rest_framework.authtoken',
'rest_framework',
'simple_history',
'drf_yasg',
]
INSTALLED_APPS = BASE_APPS + LOCAL_APPS + THIRD_APPS
SWAGGER_SETTINGS = {'DOC_EXPANSION': 'none'}
TOKEN_EXPIRED_AFTER_SECONDS = 900
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
DATE_INPUT_FORMATS = ('%d-%m-%Y', '%Y-%m-%d')
LANGUAGE_CODE = 'es-GB'
TIME_ZONE = 'America/Lima'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTH_USER_MODEL = 'users.User'
CORS_ALLOWED_ORIGINS = ["http://localhost:3000"]
CORS_ORIGIN_WHITELIST = ["http://localhost:3000"]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
"DATE_INPUT_FORMATS": ["%Y-%m-%d"],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework_simplejwt.authentication.JWTAuthentication',
]
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': True,
'UPDATE_LAST_LOGIN': False,
'ALGORITHM': 'HS256',
'SIGNING_KEY': SECRET_KEY,
'VERIFYING_KEY': None,
'AUDIENCE': None,
'ISSUER': None,
'AUTH_HEADER_TYPES': ('Bearer', 'JWT'),
'AUTH_HEADER_NAME': 'HTTP_AUTHORIZATION',
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user',
'USER_AUTHENTICATION_RULE':
'rest_framework_simplejwt.authentication.default_user_authentication_rule',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken', ),
'TOKEN_TYPE_CLAIM': 'token_type',
'JTI_CLAIM': 'jti',
'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),
}
| [
"herles.incalla@gmail.com"
] | herles.incalla@gmail.com |
eddc122e28483b47ab38679b8af707a0a5342b2b | dbc216c71fa2cd447d9203bff21f85c48481847b | /python/METConfig_Truth.py | 3635a16b9804cf6f4542a7bcd8a2e563e32c649c | [] | no_license | rjwang/Reconstruction-MET-METReconstruction | be9082dc5a64744948a2cbc5f1a6ac35b3376944 | 2286131d6984cfc5e875ae32c9a4691f61de6ff1 | refs/heads/master | 2021-01-21T16:04:59.181902 | 2016-09-16T21:26:47 | 2016-09-16T21:26:47 | 68,415,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,265 | py | from METReconstruction.METRecoFlags import metFlags
from METReconstruction.METRecoConfig import BuildConfig, METConfig
from METReconstruction.METAssocConfig import METAssocConfig, AssocConfig
## Simple truth terms
cfg_truth = METConfig('Truth',
[BuildConfig('NonInt'),
BuildConfig('Int'),
BuildConfig('IntOut'),
BuildConfig('IntMuons')],
doRegions=True
)
metFlags.METConfigs()[cfg_truth.suffix] = cfg_truth
metFlags.METOutputList().append(cfg_truth.suffix)
metFlags.METOutputList().append(cfg_truth.suffix+"Regions")
### Truth association maps
#
#############################################################################
## AntiKt4LCTopo
#cfg_truthassoc_akt4lc = METAssocConfig('Truth_AntiKt4LCTopo',
# [AssocConfig('Truth','AntiKt4LCTopoJets')],
# doTruth=True
# )
#
#metFlags.METAssocConfigs()[cfg_truthassoc_akt4lc.suffix] = cfg_truthassoc_akt4lc
#metFlags.METAssocOutputList().append(cfg_truthassoc_akt4lc.suffix)
#
#############################################################################
## AntiKt4EMTopo
#cfg_truthassoc_akt4em = METAssocConfig('Truth_AntiKt4EMTopo',
# [AssocConfig('Truth','AntiKt4EMTopoJets')],
# doTruth=True
# )
#
#metFlags.METAssocConfigs()[cfg_truthassoc_akt4em.suffix] = cfg_truthassoc_akt4em
#metFlags.METAssocOutputList().append(cfg_truthassoc_akt4em.suffix)
#
#############################################################################
## AntiKt4EMPFlow
#
#from RecExConfig.RecFlags import rec
#if rec.doInDet() and metFlags.DoPFlow():
# cfg_truthassoc_akt4pf = METAssocConfig('Truth_AntiKt4EMPFlow',
# [AssocConfig('Truth','AntiKt4EMPFlowJets')],
# doTruth=True
# )
#
# metFlags.METAssocConfigs()[cfg_truthassoc_akt4pf.suffix] = cfg_truthassoc_akt4pf
# metFlags.METAssocOutputList().append(cfg_truthassoc_akt4pf.suffix)
| [
"r.jiewang@gmail.com"
] | r.jiewang@gmail.com |
2eac54655f5b985851187bcd96c6e111a90da1e0 | 8c87341eff7aa9b0face6281ed8644f87b531975 | /models.py | d4610f83b9a2b5b128a708d5ee58be9f8e86667b | [] | no_license | nprapps/breaking-news-facts | 455d27aa5c818ee8d292d81781b17d2cff3ef5e1 | b6aa8d2b4f31c12e8899ce099e2827304cb4500e | refs/heads/master | 2021-01-13T01:30:09.548232 | 2013-10-11T17:23:29 | 2013-10-11T17:23:29 | 10,252,811 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,337 | py | import datetime
import time
from peewee import *
from app_config import get_secrets
secrets = get_secrets()
psql_db = PostgresqlDatabase('breaking',
user=secrets['APPS_USER'],
password=secrets['APPS_PASS']
)
def delete_tables():
try:
Event.drop_table()
except:
pass
try:
Fact.drop_table()
except:
pass
def create_tables():
Event.create_table()
Fact.create_table()
class Event(Model):
"""
An event with a series of facts.
"""
name = CharField()
start_date = DateField()
class Meta:
database = psql_db
db_table = 'events'
def get_detail_uri(self):
return '/event-%s.json' % self.id
def get_admin_url(self):
return '/admin/events/%s/' % self.id
def __unicode__(self):
return self.name
def primary_facts(self):
return Fact\
.select()\
.join(Event)\
.where(Fact.event == self)\
.where(Fact.related_facts >> None)\
.order_by(Fact.timestamp.desc())
def as_dict(self):
output = {}
output['name'] = self.name
output['start_time'] = time.mktime(self.start_date.timetuple())
output['detail_uri'] = self.get_detail_uri()
return output
class Fact(Model):
"""
An instance of a fact. Related to a master fact.
"""
STATUS_LIST = ['Confirmed: False', 'Confirmed: True', 'Unconfirmed: Not Verifying', 'Unconfirmed: Verifying']
event = ForeignKeyField(Event, null=True)
statement = TextField()
attribution = TextField()
timestamp = DateTimeField()
# Status choices (enforced at the app level, sadly):
# 0 - Has been confirmed as false.
# 1 - Has been confirmed as true.
# 2 - Neither confirmed nor denied nor checking.
# 3 - Checking.
status = IntegerField(default=2)
related_facts = ForeignKeyField('self', null=True)
public = BooleanField(default=False)
approved = BooleanField(default=False)
reporter = CharField()
class Meta:
database = psql_db
db_table = 'facts'
def __unicode__(self):
return self.statement
def status_widget(self):
template = "<select class='form-control'>"
for status in [0,1,2,3]:
template += "<option"
if self.status == status:
template += " selected"
template += ">%s</option>" % self.STATUS_LIST[status]
template += "</select>"
return template
def get_pretty_time(self):
minute = str(self.timestamp.minute).zfill(2)
hour = self.timestamp.strftime('%-I')
ampm = self.timestamp.strftime('%p')
return '%s:%s %s' % (hour, minute, ampm)
def get_status(self):
return self.STATUS_LIST[self.status]
def get_related_facts(self):
if Fact.select().where(Fact.related_facts == self).count() == 0:
return None
return Fact.select().where(Fact.related_facts == self).order_by(Fact.timestamp.desc())
def as_dict(self):
output = dict(self.__dict__['_data'])
output['timestamp'] = time.mktime(output['timestamp'].timetuple())
output['time_string'] = self.timestamp.isoformat()
output.pop('event')
output.pop('related_facts')
return output
| [
"jeremyjbowers@gmail.com"
] | jeremyjbowers@gmail.com |
39e4c0efd14beeb857c28a288b11086173e2d379 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4057/codes/1685_2471.py | 763bcec1f0339cd7ddf8b9131953dd922ec8bebd | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | i = int(input("Idade: "))
m = float(input("Massa corporal: "))
print ("Entradas: ", i, "anos e IMC ", m)
if (i > 0) and (i <= 130)and (m > 0):
if (i < 45) and (m < 22):
print("Risco: Baixo")
elif (i < 45) and (m >= 22):
print("Risco: Medio")
elif (i >= 45) and (m < 22):
print("Risco: Medio")
elif (i >= 45) and (m >= 22):
print("Risco: Alto")
else:
print("Dados invalidos") | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
c2be04decc5965600ac2292cab586ac24015fd4a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_084/ch149_2020_04_13_20_05_51_616666.py | d4effc79069f55ab7427c8bcca855bb2eef68d48 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | s=float(input('qual é o seu salario? '))
d=int(input('qual o numero de dependentes voce tem? '))
bc=0
if s >= 1045.00:
bc=s-s*0.075-d*189.59
elif 1045.01<= s <= 2089.60:
bc=s-s*0.09-d*189.59
elif 2089.61<= s <= 3134.40:
bc=s-s*0.12-d*189.59
elif 3134.41 <= s <= 6101.06:
bc=s-s*0.14-d*189.59
elif 6101.07 <= s:
bc=s-671.12-d*189.59
print(bc)
if s> 1903.98:
IRRF=bc*0-0
elif 1903.99 <= s <= 2826.65:
IRRF=bc*0-0
print(IRRF)
elif 2826.66 <= s <= 3751.05:
IRRF=bc*0.075-142.8
print(IRRF)
elif 3751.06 <= s <= 3751.05:
IRRF=bc*0.15-354.8
print(IRRF)
elif 3751.06 <= s <=4664.68 :
IRRF=bc*0.225-636.13
print(IRRF)
else:
IRRF=bc*0.275-869.36
print(IRRF)
| [
"you@example.com"
] | you@example.com |
77cd6e9cbe4628bb18c47cbeaef453ed29eaa4fa | 2c635d6b558a65e62a9d37c12abf9e4ecbe8938c | /Word Pattern/Word Pattern.py | ebab0efb357c3284674b254e795a3f19b8cfea06 | [] | no_license | GreatStephen/MyLeetcodeSolutions | c698e13b7088fc9236250b6ec10331b88fe99ed1 | 73a8f79f2cd5c769b195c503f0346893b102acdc | refs/heads/master | 2023-03-01T04:53:19.698040 | 2021-02-05T22:28:18 | 2021-02-05T22:28:18 | 284,350,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | class Solution:
def wordPattern(self, pattern: str, str: str) -> bool:
ss = str.split(' ')
l_s, s_l = {}, {}
if len(pattern)!=len(ss):
return False
for letter, s in zip(pattern, ss):
if letter in l_s and s in s_l:
if l_s[letter]!=s or s_l[s]!=letter:
return False
elif letter not in l_s and s not in s_l:
l_s[letter] = s
s_l[s] = letter
else:
return False
return True | [
"litianyou97@gmail.com"
] | litianyou97@gmail.com |
4fc90913119a9447897f1ab6e324c787fdd0a931 | b0365a11976fc19e350ba3c448b2bc3720c3eb73 | /project/qt3/slider1.py | 5d3c85dd67c55740f2c2d7d19a3da8406bc8bcc2 | [] | no_license | excellencemichel/progrk | bd4e6797c21ed921ce4a3d75378ca752cece459d | e3144f78d9313ca9e2c836dcf53cf1bc4b3f10b8 | refs/heads/master | 2021-04-15T07:54:06.240231 | 2018-12-29T03:42:10 | 2018-12-29T03:42:10 | 116,013,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | #! /usr/bin/python
#-*-coding:utf-8-*-
from PyQt5 import QtGui
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (
QApplication,
QMainWindow, QDialog,
QWidget, QVBoxLayout,
QLineEdit, QSlider,
)
class Window(QWidget):
"""
Cette classe nous montre qu'on peut soi-même faire les
dimensions de la fenêtre à la main
"""
def __init__(self):
super().__init__() # Appel du constructeur de la classe QMainWindow
self.title = "PyQt5 Window QSlider Part one"
self.top = 100
self.left = 100
self.width = 400
self.height = 200
self.setWindowIcon(QtGui.QIcon("icons/line.png")) #ça na pas marché
self.init_window()
def init_window(self):
vboxLayout = QVBoxLayout()
self.lineEdit = QLineEdit(self)
vboxLayout.addWidget(self.lineEdit)
self.lineEdit.move(100, 50)
self.slider = QSlider(Qt.Horizontal, self) #Par defaut les slider sorte en verticale
self.slider.move(100, 20)
self.slider.setMinimum(1)
self.slider.setMaximum(99)
self.slider.setValue(20) #Si on donne une veleur minimum >= à setValue cela ne marche pas setValue donne pas alors la valeur par défaut au slider
self.slider.setTickPosition(QSlider.TicksBelow) #Les pointiers en bas
vboxLayout.addWidget(self.slider)
self.setWindowTitle(self.title)
self.setGeometry(self.top, self.left, self.width, self.height)
self.show()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = Window()
sys.exit(app.exec_())
| [
"bnvnmmnl@gmail.com"
] | bnvnmmnl@gmail.com |
ebdf4950594c969568cdc67d72e3d9eaf158ea10 | 0c6b4e9c5ecc5a7595717f9699953b227486ef3e | /tests/unit/modules/network/slxos/test_slxos_linkagg.py | 12600b9228050e9f902327f04bf5342b5639978e | [] | no_license | ansible-collection-migration/ansible.misc | d9c92e8bb0c17b3e2a92976215f523c2afaa5a46 | 3c02be2a8c03b2e375a1e1f37b0c119145ea358c | refs/heads/master | 2020-12-26T23:11:36.544511 | 2020-02-03T22:18:53 | 2020-02-03T22:18:53 | 237,681,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,064 | py | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible_collections.ansible.misc.tests.unit.compat.mock import patch
from ansible_collections.ansible.misc.tests.unit.modules.utils import set_module_args
from ansible_collections.ansible.misc.plugins.modules import slxos_linkagg
from ..slxos_module import TestSlxosModule, load_fixture
class TestSlxosLinkaggModule(TestSlxosModule):
module = slxos_linkagg
def setUp(self):
super(TestSlxosLinkaggModule, self).setUp()
self._patch_get_config = patch(
'ansible_collections.ansible.misc.plugins.modules.slxos_linkagg.get_config'
)
self._patch_load_config = patch(
'ansible_collections.ansible.misc.plugins.modules.slxos_linkagg.load_config'
)
self._get_config = self._patch_get_config.start()
self._load_config = self._patch_load_config.start()
def tearDown(self):
super(TestSlxosLinkaggModule, self).tearDown()
self._patch_get_config.stop()
self._patch_load_config.stop()
def load_fixtures(self, commands=None):
config_file = 'slxos_config_config.cfg'
self._get_config.return_value = load_fixture(config_file)
self._load_config.return_value = None
def test_slxos_linkagg_group_present(self, *args, **kwargs):
set_module_args(dict(
group='10',
state='present'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface port-channel 10',
'exit'
],
'changed': True
}
)
def test_slxos_linkagg_group_members_active(self, *args, **kwargs):
set_module_args(dict(
group='10',
mode='active',
members=[
'Ethernet 0/1',
'Ethernet 0/2'
]
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface port-channel 10',
'exit',
'interface Ethernet 0/1',
'channel-group 10 mode active',
'interface Ethernet 0/2',
'channel-group 10 mode active'
],
'changed': True
}
)
def test_slxos_linkagg_group_member_removal(self, *args, **kwargs):
set_module_args(dict(
group='20',
mode='active',
members=[
'Ethernet 0/10',
]
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface port-channel 20',
'exit',
'interface Ethernet 0/11',
'no channel-group'
],
'changed': True
}
)
def test_slxos_linkagg_group_members_absent(self, *args, **kwargs):
set_module_args(dict(
group='20',
state='absent'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'no interface port-channel 20'
],
'changed': True
}
)
set_module_args(dict(
group='10',
state='absent'
))
result = self.execute_module(changed=False)
self.assertEqual(
result,
{
'commands': [],
'changed': False
}
)
def test_slxos_linkagg_invalid_argument(self, *args, **kwargs):
set_module_args(dict(
group='10',
shawshank='Redemption'
))
result = self.execute_module(failed=True)
self.assertEqual(result['failed'], True)
self.assertTrue(re.match(
r'Unsupported parameters for \((basic.pyc|basic.py)\) module: '
'shawshank Supported parameters include: aggregate, group, '
'members, mode, purge, state',
result['msg']
))
| [
"ansible_migration@example.com"
] | ansible_migration@example.com |
19c155e9dee77e5313030207c70e8e1fbeeee78b | e9f40b2ae17b5bf7f7fba339b00cb59e2cce34fa | /python_basic/OO/class_and_instance/class_demo2.py | 1d6a488c04cb909e22e3d4d6b164df1d56394e27 | [] | no_license | linyouwei/pycharm | 0d8dbfd83fcc88077137bcbec063186ce0fb622c | 246fe3ab855f7614fd05f2d31239170077791822 | refs/heads/master | 2021-01-19T17:38:35.698089 | 2018-03-26T10:12:50 | 2018-03-26T10:12:50 | 101,077,696 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | #encoding=utf8
class Student(object):
def __init__(self,name,score):
self.name = name
self.score = score
def print_score(self):
print("%s:%s"%(self.name,self.score))
def get_grade(self):
if self.score >= 90:
return 'A'
elif self.score >= 60:
return 'B'
else:
return 'C'
if __name__ == "__main__":
bart1= Student("lin",100)
print(bart1.get_grade())
bart1.print_score()
| [
"yjlyw020150@163.com"
] | yjlyw020150@163.com |
ba0073deff1af9e08e786690c7e8f7d3324ce4af | 0905b794ccd3f3e4af9819a3c77505ba43067556 | /reporter/uhl_reports/bioresource/data_quality/redcap.py | aa242c6af30cc7fd9a9d3c4722227c3d1417558e | [
"MIT"
] | permissive | LCBRU/reporter | 57807fd358eee46d37c529e08baa1a76164588f8 | 8cb0ae403346e375a5e99d1d4df375cf2d5f3b81 | refs/heads/master | 2021-09-27T23:22:39.806232 | 2021-09-27T11:34:10 | 2021-09-27T11:34:10 | 88,853,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,494 | py | #!/usr/bin/env python3
from reporter.connections import RedcapInstance
from reporter.application_abstract_reports.redcap.percentage_complete import (
RedcapPercentageCompleteReport,
)
from reporter.application_abstract_reports.redcap.withdrawn_or_excluded_with_data import (
RedcapWithdrawnOrExcludedWithDataReport,
)
from reporter.emailing import (
RECIPIENT_BIORESOURCE_ADMIN as RECIPIENT_ADMIN,
RECIPIENT_BIORESOURCE_MANAGER as RECIPIENT_MANAGER,
RECIPIENT_IT_DQ,
)
from reporter.application_abstract_reports.redcap.web_data_quality import (
RedcapWebDataQuality,
)
from reporter.application_abstract_reports.redcap.data_quality import (
RedcapInvalidDate,
RedcapInvalidStudyNumber,
RedcapRecordInvalidStudyNumber,
RedcapInvalidHeightInCm,
RedcapInvalidHeightInFeetAndInches,
RedcapInvalidWeightInKg,
RedcapInvalidWeightInStonesAndPounds,
)
from reporter.core import Schedule
REDCAP_PROJECT_ID = 9
REDCAP_INSTANCE = RedcapInstance.internal
class BioresRedcapPercentageCompleteReport(RedcapPercentageCompleteReport):
def __init__(self):
super().__init__(
study_name='Bioresource',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
schedule=Schedule.never,
)
class BioresourceRedcapWithdrawnOrExcludedWithDataReport(
RedcapWithdrawnOrExcludedWithDataReport):
def __init__(self):
super().__init__(
study_name='Bioresource',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
schedule=Schedule.never,
)
class BioresourceRedcapWebDataQuality(RedcapWebDataQuality):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
recipients=[RECIPIENT_IT_DQ],
schedule=Schedule.never,
)
class BioresourceRedcapInvalidDate(
RedcapInvalidDate):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class BioresourceRedcapInvalidStudyNumber(
RedcapInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
fields=['record_id'],
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class BioresourceRedcapRecordInvalidStudyNumber(
RedcapRecordInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class BioresourceRedcapInvalidHeightInCm(
RedcapInvalidHeightInCm):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
fields=['your_height_centimetres'],
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class BioresourceRedcapInvalidHeightInFeetAndInches(
RedcapInvalidHeightInFeetAndInches):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
feet_field='your_height_feet',
inches_field='your_height_inches',
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class BioresourceRedcapInvalidWeightInKg(
RedcapInvalidWeightInKg):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
fields=['your_weight_kg'],
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class BioresourceRedcapInvalidWeightInStonesAndPounds(
RedcapInvalidWeightInStonesAndPounds):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=REDCAP_PROJECT_ID,
stones_field='your_weight_stones',
pounds_field='your_weight_pounds',
recipients=[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
| [
"rabramley@gmail.com"
] | rabramley@gmail.com |
f39c672ffe5160b7086bffe27ce2ab6182a9a372 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_RESOURCES/my-gists/__CONTAINER/_OLD/_python/file_to_string.py | 3a9ebee124dca93b69b2b27ba4087bae66ed4ea7 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 500 | py | #!/usr/bin/env python
import sys
filename = sys.argv[1]
# These do not remove \n
with open(filename) as f:
s = "".join(f.readlines())
with open(filename) as f:
s = "".join(f)
with open(filename) as f:
s = f.read() # Fastest according to my tests.
# These remove \n
with open(filename) as f:
s = " ".join(line.replace("\n", "") for line in f)
with open(filename) as f:
s = " ".join(line.rstrip() for line in f)
with open(filename) as f:
s = f.read().replace("\n", "")
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
16b7fe171308835af2c635735c7bdd7d91120fb0 | 99052370591eadf44264dbe09022d4aa5cd9687d | /build/learning_ros/Part_5/joint_space_planner/catkin_generated/pkg.installspace.context.pc.py | be9fecd76557ed5f3b5b526f243f453739bb4c03 | [] | no_license | brucemingxinliu/ros_ws | 11b1a3e142132925d35b3adf929f1000392c5bdc | 45f7e553ea20b79e3e93af5f77a1b14b64184875 | refs/heads/master | 2021-01-24T03:36:47.043040 | 2018-02-26T00:53:37 | 2018-02-26T00:53:37 | 122,892,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/toshiki/ros_ws/install/include".split(';') if "/home/toshiki/ros_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ljoint_space_planner".split(';') if "-ljoint_space_planner" != "" else []
PROJECT_NAME = "joint_space_planner"
PROJECT_SPACE_DIR = "/home/toshiki/ros_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"mxl592@case.edu"
] | mxl592@case.edu |
7f0f66410133136707d71e99d19bc7bc6c5702bd | 35271f6bd874799df9a93dbe5bcc50272b619dc1 | /ML/Pytorch/Basics/pytorch_rnn_gru_lstm.py | 7f3646cb6e7ef201b2942163c3ed1d7e44f6a136 | [
"MIT"
] | permissive | aladdinpersson/Machine-Learning-Collection | c724186b64ae52efa6f9d4e97f37477900901d35 | 558557c7989f0b10fee6e8d8f953d7269ae43d4f | refs/heads/master | 2023-08-31T20:52:06.493437 | 2023-03-21T11:44:08 | 2023-03-21T11:44:08 | 250,184,708 | 5,653 | 2,543 | MIT | 2023-09-02T03:51:36 | 2020-03-26T07:02:40 | Python | UTF-8 | Python | false | false | 5,541 | py | """
Example code of a simple RNN, GRU, LSTM on the MNIST dataset.
Programmed by Aladdin Persson <aladdin.persson at hotmail dot com>
* 2020-05-09 Initial coding
* 2022-12-16 Updated with more detailed comments, docstrings to functions, and checked code still functions as intended.
"""
# Imports
import torch
import torch.nn.functional as F # Parameterless functions, like (some) activation functions
import torchvision.datasets as datasets # Standard datasets
import torchvision.transforms as transforms # Transformations we can perform on our dataset for augmentation
from torch import optim # For optimizers like SGD, Adam, etc.
from torch import nn # All neural network modules
from torch.utils.data import (
DataLoader,
) # Gives easier dataset managment by creating mini batches etc.
from tqdm import tqdm # For a nice progress bar!
# Set device
device = "cuda" if torch.cuda.is_available() else "cpu"
# Hyperparameters
input_size = 28
hidden_size = 256
num_layers = 2
num_classes = 10
sequence_length = 28
learning_rate = 0.005
batch_size = 64
num_epochs = 3
# Recurrent neural network (many-to-one)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size * sequence_length, num_classes)
def forward(self, x):
# Set initial hidden and cell states
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# Forward propagate LSTM
out, _ = self.rnn(x, h0)
out = out.reshape(out.shape[0], -1)
# Decode the hidden state of the last time step
out = self.fc(out)
return out
# Recurrent neural network with GRU (many-to-one)
class RNN_GRU(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN_GRU, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size * sequence_length, num_classes)
def forward(self, x):
# Set initial hidden and cell states
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# Forward propagate LSTM
out, _ = self.gru(x, h0)
out = out.reshape(out.shape[0], -1)
# Decode the hidden state of the last time step
out = self.fc(out)
return out
# Recurrent neural network with LSTM (many-to-one)
class RNN_LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN_LSTM, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size * sequence_length, num_classes)
def forward(self, x):
# Set initial hidden and cell states
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# Forward propagate LSTM
out, _ = self.lstm(
x, (h0, c0)
) # out: tensor of shape (batch_size, seq_length, hidden_size)
out = out.reshape(out.shape[0], -1)
# Decode the hidden state of the last time step
out = self.fc(out)
return out
# Load Data
train_dataset = datasets.MNIST(
root="dataset/", train=True, transform=transforms.ToTensor(), download=True
)
test_dataset = datasets.MNIST(
root="dataset/", train=False, transform=transforms.ToTensor(), download=True
)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)
# Initialize network (try out just using simple RNN, or GRU, and then compare with LSTM)
model = RNN_LSTM(input_size, hidden_size, num_layers, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Train Network
for epoch in range(num_epochs):
for batch_idx, (data, targets) in enumerate(tqdm(train_loader)):
# Get data to cuda if possible
data = data.to(device=device).squeeze(1)
targets = targets.to(device=device)
# forward
scores = model(data)
loss = criterion(scores, targets)
# backward
optimizer.zero_grad()
loss.backward()
# gradient descent update step/adam step
optimizer.step()
# Check accuracy on training & test to see how good our model
def check_accuracy(loader, model):
num_correct = 0
num_samples = 0
# Set model to eval
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device).squeeze(1)
y = y.to(device=device)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
# Toggle model back to train
model.train()
return num_correct / num_samples
print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:2f}")
print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}")
| [
"aladdin.persson@hotmail.com"
] | aladdin.persson@hotmail.com |
0b19dbee65c1f62954819d4263d13f2b84de00f3 | c1c5a8dc79cacf3b419bad77881213c5db2f80c3 | /Kattis/Appalling_Architecture.py | 21abef0663c911871759173c181ea7ffba02d6ae | [] | no_license | EoinDavey/Competitive | 7ff8b6b6225814ac60c3ace659bb63190eb52420 | b2b6909b93f5c073b684477f8a4b06dac22ec678 | refs/heads/master | 2023-01-08T00:06:19.076941 | 2022-12-26T14:00:31 | 2022-12-26T14:00:31 | 67,259,478 | 17 | 1 | null | 2022-01-19T18:17:59 | 2016-09-02T22:46:26 | C++ | UTF-8 | Python | false | false | 427 | py | h, w = [int(x) for x in input().split()]
l = ""
sm = 0
total = 0
for _ in range(h):
l = input()
for i in range(w):
if l[i] == '.':
continue
sm += i
total += 1
lft = 0
for i in range(w):
if l[i] != '.':
lft = i - 0.5
break
rght = 0
for i in range(w):
if l[i] != '.':
rght = i + 0.5
if sm < lft * total:
print("left")
elif sm > rght * total:
print("right")
else:
print("balanced")
| [
"eoind@vey.ie"
] | eoind@vey.ie |
f7e1145af886703fccb6c648225954a86303bf15 | 84f7ab8ae18acda2b15c3118ac18c4e8c0df1a73 | /tests/evaluator_test.py | 560752062aa9f9cb9a5bf3d2f5a95a63dfff39d9 | [
"Apache-2.0"
] | permissive | lejarx/gafe | 35ef45ec041d7bd76c973c841a01a478b4ba137c | 125d587e39dd2eb94fba6667fffa6d07e508542f | refs/heads/master | 2021-07-08T15:46:12.657591 | 2017-10-03T08:47:59 | 2017-10-03T08:47:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | '''
Evaluator tests.
'''
import os
import unittest
import numpy as np
from sklearn.datasets import load_iris
from gafe.evaluator import Evaluator
class EvaluatorTest(unittest.TestCase):
def test_init(self):
eva = Evaluator()
self.assertEqual(eva._metric, 'neg_log_loss')
self.assertEqual(eva._cv_folds, 5)
def test_evolve(self):
iris = load_iris()
X = iris.data
y = iris.target
eva = Evaluator()
score = eva.evaluate(X, y)
self.assertTrue(score < 0.25)
score = eva.evaluate(X, y, X_vald=X, y_vald=y)
self.assertTrue(score < 0.25)
| [
"pplonski86@gmail.com"
] | pplonski86@gmail.com |
9de54d8964ab1708694daac3c7a203950e994384 | c55074cae33470f6a6f994b6029425a85818496e | /ci/push/push_request_status.py | ecdf79408a0a7f0f92ca547ca33b282c1840756e | [
"BSD-3-Clause"
] | permissive | marcalbaladejo/CumulusCI | 4ebf590e71f4847f157b33f47da775070e0c7feb | f619d0d984f7bbfa3c6fcd6e33e41e64105cb4f5 | refs/heads/master | 2021-01-18T18:45:06.098227 | 2018-05-28T12:58:22 | 2018-05-28T12:58:22 | 62,956,012 | 1 | 1 | BSD-3-Clause | 2018-05-28T12:58:23 | 2016-07-09T15:32:32 | Python | UTF-8 | Python | false | false | 3,568 | py | import os
import sys
import csv
import time
from push_api import SalesforcePushApi
# Force UTF8 output
reload(sys)
sys.setdefaultencoding('UTF8')
completed_statuses = ['Succeeded','Failed','Cancelled']
if __name__ == '__main__':
try:
username = os.environ.get('SF_USERNAME')
password = os.environ.get('SF_PASSWORD')
serverurl = os.environ.get('SF_SERVERURL')
push_request_id = os.environ.get('PUSH_REQUEST')
subscriber_where = os.environ.get('SUBSCRIBER_WHERE', None)
default_where = {'PackagePushRequest': "Id = '%s'" % push_request_id}
if subscriber_where:
default_where['PackageSubscriber'] = subscriber_where
push_api = SalesforcePushApi(username, password, serverurl, lazy=['subscribers','jobs'], default_where=default_where)
push_request = push_api.get_push_request_objs("Id = '%s'" % push_request_id, limit=1)[0]
interval = 10
if push_request.status not in completed_statuses:
print 'Push request is not yet complete. Polling for status every %s seconds until completion...' % interval
i = 0
while push_request.status not in completed_statuses:
if i == 10:
print 'This is taking a while! Polling every 60 seconds...'
interval = 60
time.sleep(interval)
# Clear the method level cache on get_push_requests and get_push_request_objs
push_api.get_push_requests.cache.clear()
push_api.get_push_request_objs.cache.clear()
# Get the push_request again
push_request = push_api.get_push_request_objs("Id = '%s'" % push_request_id, limit=1)[0]
print push_request.status
i += 1
failed_jobs = []
success_jobs = []
cancelled_jobs = []
jobs = push_request.get_push_job_objs()
for job in jobs:
if job.status == 'Failed':
failed_jobs.append(job)
elif job.status == 'Succeeded':
success_jobs.append(job)
elif job.status == 'Cancelled':
cancelled_jobs.append(job)
print "Push complete: %s succeeded, %s failed, %s cancelled" % (len(success_jobs),len(failed_jobs),len(cancelled_jobs))
failed_by_error = {}
for job in failed_jobs:
errors = job.get_push_error_objs()
for error in errors:
error_key = (error.error_type, error.title, error.message, error.details)
if error_key not in failed_by_error:
failed_by_error[error_key] = []
failed_by_error[error_key].append(error)
if failed_jobs:
print ""
print "-----------------------------------"
print "Failures by error type"
print "-----------------------------------"
for key, errors in failed_by_error.items():
print " "
print "%s failed with..." % (len(errors))
print " Error Type = %s" % key[0]
print " Title = %s" % key[1]
print " Message = %s" % key[2]
print " Details = %s" % key[3]
except SystemExit:
sys.exit(1)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print '-'*60
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
print '-'*60
sys.exit(2)
| [
"jlantz@salesforce.com"
] | jlantz@salesforce.com |
f3f0a5e9be18f742ea1dfe2ba0c45ba7c077fe17 | 92209cc6de47e868dfaddae2e61048e40c7dfe66 | /irc3/dec.py | 8a969e4b7512fb7d818f2122dff763ef1c53f483 | [
"LicenseRef-scancode-ietf",
"CC-BY-3.0"
] | permissive | valhallasw/irc3 | 5f2305ca6c8df764da9a2ed0ba2eb1dda67dfb1f | 628d1345cb5b09d90b087ae23d5caf26b25a2e7d | refs/heads/master | 2020-12-28T23:15:30.414896 | 2014-12-23T18:01:19 | 2014-12-23T18:01:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,168 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import functools
import venusian
import re
def plugin(wrapped):
"""register a class as plugin"""
setattr(wrapped, '__irc3_plugin__', True)
setattr(wrapped, '__irc3d_plugin__', False)
return wrapped
class event(object):
"""register a method or function an irc event callback::
>>> @event('^:\S+ 353 [^&#]+(?P<channel>\S+) :(?P<nicknames>.*)')
... def on_names(bot, channel=None, nicknames=None):
... '''this will catch nickname when you enter a channel'''
... print(channel, nicknames.split(':'))
The callback can be either a function or a plugin method
If you specify the `iotype` parameter to `"out"` then the event will be
triggered when the regexp match something **sent** by the bot.
For example this event will repeat private messages sent by the bot to the
`#irc3` channel::
>>> @event(r'PRIVMSG (?P<target>[^#]+) :(?P<data>.*)', iotype='out')
... def msg3(bot, target=None, data=None):
... bot.privmsg('#irc3',
... '<{0}> {1}: {2}'.format(bot.nick, target, data))
"""
venusian = venusian
def __init__(self, regexp, callback=None, iotype='in',
venusian_category='irc3.rfc1459'):
try:
re.compile(getattr(regexp, 're', regexp))
except Exception as e:
raise e.__class__(str(e) + ' in ' + getattr(regexp, 're', regexp))
self.regexp = regexp
self.iotype = iotype
self.callback = callback
self.venusian_category = venusian_category
def async_callback(self, kwargs): # pragma: no cover
return self.callback(**kwargs)
def compile(self, config):
regexp = getattr(self.regexp, 're', self.regexp)
if config:
regexp = regexp.format(**config)
self.cregexp = re.compile(regexp)
def __call__(self, wrapped):
def callback(context, name, ob):
obj = context.context
if info.scope == 'class':
self.callback = getattr(
obj.get_plugin(ob),
wrapped.__name__)
else:
@functools.wraps(wrapped)
def wrapper(**kwargs):
return wrapped(obj, **kwargs)
self.callback = wrapper
# a new instance is needed to keep this related to *one* bot
# instance
e = self.__class__(self.regexp, self.callback,
venusian_category=self.venusian_category,
iotype=self.iotype)
obj.attach_events(e)
info = self.venusian.attach(wrapped, callback,
category=self.venusian_category)
return wrapped
def __repr__(self):
s = getattr(self.regexp, 'name', self.regexp)
name = self.__class__.__name__
return '<bound {0} {1} to {2}>'.format(name, s, self.callback)
def extend(func):
"""Allow to extend a bot:
Create a module with some usefull routine:
.. literalinclude:: ../examples/myextends.py
..
>>> import sys
>>> sys.path.append('examples')
>>> from irc3 import IrcBot
>>> IrcBot.defaults.update(async=False, testing=True)
Now you can use those routine in your bot::
>>> bot = IrcBot()
>>> bot.include('myextends')
>>> print(bot.my_usefull_function(1))
my_usefull_function(*(1,))
>>> print(bot.my_usefull_method(2))
my_usefull_method(*(2,))
"""
def callback(context, name, ob):
obj = context.context
if info.scope == 'class':
@functools.wraps(func)
def f(self, *args, **kwargs):
plugin = obj.get_plugin(ob)
return getattr(plugin, func.__name__)(*args, **kwargs)
setattr(obj, func.__name__, f.__get__(obj, obj.__class__))
else:
setattr(obj, func.__name__, func.__get__(obj, obj.__class__))
info = venusian.attach(func, callback, category='irc3.extend')
return func
| [
"gael@gawel.org"
] | gael@gawel.org |
a858f211dff6a15e0d298437b89542752845bdc2 | a33098d9f7f7402d07c7bb0663e260cab4772fd2 | /src/users/posts/form.py | 2dc9c69af1abf8af4a88703290b5b9829adf8efe | [] | no_license | EgbieAndersonUku1/myBlog | 7906803c5c2f4300f1bcc672f397045894cc65b2 | e4344064012aefa79042ba8d39911b29fb5b7554 | refs/heads/master | 2018-09-08T09:28:25.532806 | 2018-06-04T22:45:48 | 2018-06-04T22:45:48 | 106,434,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | from flask_ckeditor import CKEditorField
from flask_wtf.file import FileField, FileAllowed
from users.base.base_ckeditor import BaseCKEditorForm
from wtforms import validators
class PostForm(BaseCKEditorForm):
post = CKEditorField('body', validators=[validators.DataRequired()])
image = FileField('Post image', validators=[FileAllowed(['png', 'jpeg', 'jpg', 'gif'],
'Only the file extension jpg, png, gif and jpeg are allowed')])
| [
"jayunderwood2011@hotmail.com"
] | jayunderwood2011@hotmail.com |
ef1b5b110530027b90cb6abec967fb2dd7351f1a | 43c24c890221d6c98e4a45cd63dba4f1aa859f55 | /test/tests/class_setattr.py | 2c965ee92a1f411d2b66a16ec117e84a366a141f | [
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | jmgc/pyston | c8e4df03c33c6b81d20b7d51a781d9e10148238e | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | refs/heads/master | 2020-12-11T07:51:58.968440 | 2020-09-11T14:38:38 | 2020-09-11T14:38:38 | 39,242,644 | 0 | 0 | NOASSERTION | 2020-09-11T14:38:39 | 2015-07-17T08:09:31 | Python | UTF-8 | Python | false | false | 365 | py | # expected: fail
class C(object):
pass
# Make sure we can't skirt the tp_slot-updating logic in type.__setattr__
# by trying to use object.__setattr__ which wouldn't do the internal bookkeeping:
def badrepr():
raise Exception()
c = C()
c.a = 1
try:
object.__setattr__(C, '__repr__', badrepr)
assert 0
except TypeError as e:
print e
c.b = 2
| [
"kmod@dropbox.com"
] | kmod@dropbox.com |
7718d73f031823f1b9ebf57030912b374108f3ba | 34b09bc83e5726fccb524a93cf2742f5aeadedef | /1. List1/3.py | ea7f0c9ec964633f5a8273b37e01bb56d9eee59c | [] | no_license | mjson1954/WIC | 57eb20ffe7aaf8695d679c893efacdeede573e72 | 670112209aacd274d09f6e9a89d948120486bfc8 | refs/heads/master | 2023-03-20T00:57:19.740025 | 2021-03-05T10:52:51 | 2021-03-05T10:52:51 | 289,925,829 | 0 | 0 | null | 2021-02-21T02:16:11 | 2020-08-24T12:46:58 | Python | UTF-8 | Python | false | false | 360 | py | T = int(input())
for test_case in range(1, T + 1):
N=int(input())
count=[0 for _ in range(10)]
num=input()
for j in range(len(num)):
count[int(num[j])]+=1
max_value=max(count)
for j in range(len(count)):
if(count[j]==max_value):
max_index=j
print("#{0} {1} {2}".format(test_case, max_index, max_value))
| [
"mjson1954@gmail.com"
] | mjson1954@gmail.com |
a48fc9cb60eb6c923be2d70b30f0f7886cc487cc | 51f2492a5c207e3664de8f6b2d54bb93e313ca63 | /atcoder/abc047/c.py | cae6160fc3353b018fde5958ce7efe990bf07b41 | [
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | abeaumont/competitive-programming | 23c5aabd587d7bb15a61efd3428838cb934233dd | a24c9b89941a59d344b51dc1010de66522b1a0dd | refs/heads/master | 2023-09-01T09:50:58.267361 | 2023-07-31T18:00:10 | 2023-07-31T18:00:10 | 117,589,708 | 618 | 262 | WTFPL | 2023-07-12T17:36:20 | 2018-01-15T20:00:56 | C++ | UTF-8 | Python | false | false | 158 | py | #!/usr/bin/env python3
# https://abc047.contest.atcoder.jp/tasks/arc063_a
s = input()
c = 0
for i in range(len(s) - 1):
if s[i] != s[i + 1]: c += 1
print(c)
| [
"alfredo.beaumont@gmail.com"
] | alfredo.beaumont@gmail.com |
eddde73e43c26cf544ab18b8129edda1c503753b | f8e0a0584f0a808311085996597389c9592025af | /news/models.py | 9a7f0d29ffb421a42c0e1a8c628f0f54155a2412 | [] | no_license | virginiah894/Moringa-Tribune | 5073e93d38538185820630c3933b48e183e92209 | 2af5daabad0bdd7f2895f7bd28816d7ad975ad9a | refs/heads/master | 2021-09-09T20:57:37.903815 | 2019-12-13T08:45:46 | 2019-12-13T08:45:46 | 227,795,226 | 0 | 0 | null | 2021-09-08T01:31:55 | 2019-12-13T08:45:02 | Python | UTF-8 | Python | false | false | 1,182 | py | from django.db import models
import datetime as dt
class Editor(models.Model):
first_name = models.CharField(max_length =30)
last_name = models.CharField(max_length =30)
email = models.EmailField()
phone_number = models .CharField(max_length=10,blank=True)
def __str__(self):
return self.first_name
def save_editor(self):
self.save()
class Meta:
ordering = ['first_name']
class tags(models.Model):
name = models.CharField(max_length = 30)
def __str__(self):
return self.name
class Article(models.Model):
title = models.CharField(max_length=60)
post = models.TextField()
editor = models.ForeignKey(Editor)
tags = models.ManyToManyField(tags)
pub_date = models.DateTimeField(auto_now_add=True)
article_image = models.ImageField(upload_to = 'articles/')
@classmethod
def todays_news(cls):
today = dt.date.today()
news = cls.objects.filter(pub_date__date = today)
return news
@classmethod
def days_news(cls,date):
news = cls.objects.filter(pub_date__date = date)
return news
@classmethod
def search_by_title(cls,search_term):
news= cls.objects.filter(title__icontains=search_term)
return news
| [
"virgyperry@gmail.com"
] | virgyperry@gmail.com |
1b2931848fb0d2a2684a071cf19b28957ec21eef | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /sDvjdcBrbHoXKvDsZ_9.py | edb3defdc96258659cf2fc8e61b896bc9232188e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | """
Write a function that returns `True` if a given name can generate an array of
words.
### Examples
anagram("Justin Bieber", ["injures", "ebb", "it"]) ➞ True
anagram("Natalie Portman", ["ornamental", "pita"]) ➞ True
anagram("Chris Pratt", ["chirps", "rat"]) ➞ False
# Not all letters are used
anagram("Jeff Goldblum", ["jog", "meld", "bluffs"]) ➞ False
# "s" does not exist in the original name
### Notes
* Each letter in the name may only be used once.
* All letters in the name must be used.
"""
def anagram(name, words):
newname = sorted(''.join(name.split()).lower())
newwords = sorted(''.join(words).lower())
return newwords == newname
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
b3f7befc0eebb80abe6961bb65b8ef7294dceba2 | 8b6b6ef3ac079aabcc0c44243447388bef87f671 | /Projects/customDS.py | 2ae2a9d67492f47a2805f04e91bd174016fe2e62 | [] | no_license | ava6969/DataStructureAlgorithm | c4f35f2b616cd0393050c89b4c42bbad81c5ebcf | 6e88c4aa2b18765d7c4f8a0d3bca5c62260cb0d2 | refs/heads/master | 2022-12-03T18:35:11.994182 | 2020-08-05T15:35:40 | 2020-08-05T15:35:40 | 283,895,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,778 | py | class Node:
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
def __init__(self, head):
self.head = head
def append(self, value):
if self.head is None:
self.head = Node(value)
return
# Move to the tail (the last node)
node = self.head
while node.next:
node = node.next
node.next = Node(value)
return
def to_list(self):
lst = []
ptr = self.head
while ptr:
lst.append(ptr.value)
ptr = ptr.next
return lst
def flatten(self):
return self._flatten(self.head) # <-- self.head is a node for NestedLinkedList
''' A recursive function '''
def _flatten(self, node):
# A termination condition
if node.next is None:
return merge(node.value, None) # <-- First argument is a simple LinkedList
# _flatten() is calling itself untill a termination condition is achieved
return merge(node.value, self._flatten(node.next)) # <-- Both arguments are a simple LinkedList each
def __repr__(self):
return ' '.join([w for w in self.flatten().to_list()])
# util functions
def merge(list1, list2):
merged = LinkedList(None)
if list1 is None:
return list2
if list2 is None:
return list1
list1_elt = list1.head
list2_elt = list2.head
while list1_elt is not None or list2_elt is not None:
if list1_elt is None:
merged.append(list2_elt)
list2_elt = list2_elt.next
elif list2_elt is None:
merged.append(list1_elt)
list1_elt = list1_elt.next
elif list1_elt.value <= list2_elt.value:
merged.append(list1_elt)
list1_elt = list1_elt.next
else:
merged.append(list2_elt)
list2_elt = list2_elt.next
return merged
class HuffBaseNode:
def __init__(self, weight):
self._weight = weight
self.code = None
def is_leaf(self):
return NotImplemented
def weight(self):
return self._weight
def __add__(self, other):
return self.weight() + other.weight()
def __lt__(self, other):
return self.weight() < other.weight()
def __gt__(self, other):
return self.weight() > other.weight()
def __eq__(self, other):
return self.weight() == other.weight()
class HuffLeafNode(HuffBaseNode):
def __init__(self, element, weight):
super().__init__(weight)
self.element = element
self.visited = False
def value(self):
return self.element
def is_leaf(self):
return True
def __repr__(self):
return f"el: {self.element}, wt: {self._weight}"
class HuffInternalNode(HuffBaseNode):
def __init__(self, weight, left, right):
super().__init__(weight)
self.left = left
self.right = right
def is_leaf(self):
return False
def __repr__(self):
tabs = '\t'
return f'\n{tabs}weight: {self._weight}\n{tabs}left: {self.left.__repr__()}\n{tabs}right: {self.right.__repr__()}'
class HuffTree:
def __init__(self, node):
self.root = node
def _root(self):
return self.root
def weight(self):
return self.root.weight()
def __repr__(self):
return 'root:' + self.root.__repr__()
def __add__(self, other):
return self.root.weight() + other.weight()
def __lt__(self, other):
return self.root.weight() < other.weight()
def __gt__(self, other):
return self.root.weight() > other.weight()
def __eq__(self, other):
return self.root.weight() == other.weight() | [
"ava6969@rit.edu"
] | ava6969@rit.edu |
819d3872ba14d41ded94549d3c76b5a1426f8f46 | 56451b41a2a5f58ea3a1eaa265ab4bda3bf4a54e | /util/dataset_loader.py | 49b42fc9aa289da317860e4a7e9bcf2186671c79 | [] | no_license | peternara/Temperature-Scaling-Modesty-Loss | 9c6285953b0012f00386092264d96a404f9dfcd8 | 7b6faadc2ac2ee989fdf80d674232800c337abda | refs/heads/master | 2021-09-20T15:04:41.996247 | 2018-08-11T00:25:59 | 2018-08-11T00:25:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,972 | py | from torchvision import transforms, datasets
import torch
def load_data(d, train=False, batch_size=100):
""" Create and return dataloader for different dataset """
if d == "CIFAR10":
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ])
data_set = datasets.CIFAR10(root='Dataset/', train=train, download=True, transform=transform)
return torch.utils.data.DataLoader(data_set, batch_size=batch_size, shuffle=False)
elif d == "CIFAR100":
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ])
data_set = datasets.CIFAR100(root='Dataset/', train=train, download=True, transform=transform)
return torch.utils.data.DataLoader(data_set, batch_size=batch_size, shuffle=False)
elif d == "ImageNet":
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
return torch.utils.data.DataLoader(datasets.ImageFolder("Dataset/ILSVRC", transform), batch_size=batch_size,
shuffle=False)
elif d == "SVHN":
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
if not train:
data_set = datasets.SVHN(root='Dataset/', split="test", download=True, transform=transform)
else:
data_set = datasets.SVHN(root='Dataset/', split="train", download=True, transform=transform)
return torch.utils.data.DataLoader(data_set, batch_size=batch_size)
else:
raise TypeError("Dataset inconnu")
| [
"noreply@github.com"
] | peternara.noreply@github.com |
06a27a5131b47c30f58ea7a14ae0ebb90452cdd7 | d7b9b490c954c7a9160b69f8ce2c907ef4681ecb | /sponsors/migrations/0020_sponsorshipbenefit_unavailable.py | 35c842d1e237e7274eb7082a46c37a7648d461c2 | [
"Apache-2.0"
] | permissive | python/pythondotorg | 00db93a4b1789a4d438806d106d9cee3349ad78c | c4ee749942227ca75c8e670546afe67232d647b2 | refs/heads/main | 2023-08-28T20:04:24.735314 | 2023-08-03T19:12:29 | 2023-08-03T19:12:29 | 6,127,047 | 1,131 | 646 | Apache-2.0 | 2023-08-24T15:57:04 | 2012-10-08T16:00:15 | Python | UTF-8 | Python | false | false | 588 | py | # Generated by Django 2.0.13 on 2021-02-26 15:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sponsors", "0019_sponsor_twitter_handle"),
]
operations = [
migrations.AddField(
model_name="sponsorshipbenefit",
name="unavailable",
field=models.BooleanField(
default=False,
help_text="If selected, this benefit will not be available to applicants.",
verbose_name="Benefit is unavailable",
),
),
]
| [
"noreply@github.com"
] | python.noreply@github.com |
d2f9d512547e26c1ad69be07364a71dcada3972a | 30323e6d5e179994cc25438def9de3dfc07be4a5 | /src/aulas/06.py | 3c4f9ebdb4be7f0f40b9c691bef140cfa1130c9b | [] | no_license | claudimf/python_oo_2 | 31f9c065be6bd9905fe85c6ea5b8cc715cc4e463 | 76b23a0a60433fbe62775aae9e1f0cd8af0b324b | refs/heads/main | 2023-03-18T09:09:48.044179 | 2021-03-11T19:23:44 | 2021-03-11T19:23:44 | 346,146,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | class Programa:
def __init__(self, nome, ano):
self._nome = nome.title()
self.ano = ano
self._likes = 0
@property
def likes(self):
return self._likes
def dar_like(self):
self._likes += 1
@property
def nome(self):
return self._nome
@nome.setter
def nome(self, novo_nome):
self._nome = novo_nome.title()
class Filme(Programa):
def __init__(self, nome, ano, duracao):
super().__init__(nome, ano)
self.duracao = duracao
def imprime(self):
print(f'{self._nome} - {self.ano} - {self.duracao} min - {self._likes} Likes')
class Serie(Programa):
def __init__(self, nome, ano, temporadas):
super().__init__(nome, ano)
self.temporadas = temporadas
def imprime(self):
print(f'{self._nome} - {self.ano} - {self.temporadas} temporadas - {self._likes} Likes')
vingadores = Filme('vingadores - guerra infinita', 2018, 160)
vingadores.dar_like()
atlanta = Serie('atlanta', 2018, 2)
atlanta.dar_like()
atlanta.dar_like()
filmes_e_series = [vingadores, atlanta]
print('\nimprimindo...\n')
for programa in filmes_e_series:
programa.imprime() | [
"claudi.freitas.prs@synergiaconsultoria.com.br"
] | claudi.freitas.prs@synergiaconsultoria.com.br |
56561bec3c40305d5c936acd30ffbfb98423bb19 | c6a4069e265325e836e4ee79fae0f5490f1a1c47 | /main/fight.py | ecdf67993300f502fc89fad3b4f83cdf03f39bd1 | [] | no_license | astoeff/clean-code-course-project | b2ca1d10b226ea95b602d2535810c9af5aadb244 | 2b64956ea1b33cba405ccd500bf1a5472a65e9c4 | refs/heads/master | 2022-11-19T05:04:20.992189 | 2020-07-17T17:12:59 | 2020-07-17T17:12:59 | 274,676,681 | 0 | 0 | null | 2020-07-17T17:13:00 | 2020-06-24T13:32:49 | Python | UTF-8 | Python | false | false | 2,299 | py | from constants import (DIRECTIONS_WITH_THEIR_OPPOSITES_DICTIONARY, FIGHT_INITIAL_INFORMATION_PART,
PLAYER_ZERO_DAMAGE_WHEN_ATTACKING, FIGHT_HERO_ATTACK_INFORMATION_PART,
FIGHT_HERO_CANNOT_ATTACK_INFORMATION_PART, FIGHT_ENEMY_ATTACK_INFORMATION_PART,
FIGHT_ENEMY_CANNOT_ATTACK_INFORMATION_PART)
class Fight:
def __init__(self, hero, enemy, distance=0, direction=0):
self.hero = hero
self.enemy = enemy
self.distance = distance
self.direction = direction
self.information_parts = [FIGHT_INITIAL_INFORMATION_PART + str(self.enemy)]
@property
def oposite_direction(self):
return DIRECTIONS_WITH_THEIR_OPPOSITES_DICTIONARY[self.direction]
def set_information_parts(self, part_to_append):
self.information_parts.append(part_to_append)
def hero_attack(self):
damage_from_attack = self.hero.attack()
information_part_to_append = FIGHT_HERO_ATTACK_INFORMATION_PART + str(damage_from_attack)
hero_can_not_attack = damage_from_attack == PLAYER_ZERO_DAMAGE_WHEN_ATTACKING
if hero_can_not_attack:
information_part_to_append = FIGHT_HERO_CANNOT_ATTACK_INFORMATION_PART
self.set_information_parts(information_part_to_append)
self.enemy.take_damage(damage_from_attack)
def enemy_attack(self):
damage_from_attack = self.enemy.attack()
information_part_to_append = FIGHT_ENEMY_ATTACK_INFORMATION_PART + str(damage_from_attack)
enemy_can_not_attack = damage_from_attack == PLAYER_ZERO_DAMAGE_WHEN_ATTACKING
if enemy_can_not_attack:
information_part_to_append = FIGHT_ENEMY_CANNOT_ATTACK_INFORMATION_PART
self.set_information_parts(information_part_to_append)
self.hero.take_damage(damage_from_attack)
def execute(self):
is_fight_in_progress = self.hero.is_alive() and self.enemy.is_alive()
while is_fight_in_progress:
if self.hero.is_alive():
self.hero_attack()
if self.enemy.is_alive():
self.enemy_attack()
is_fight_in_progress = self.hero.is_alive() and self.enemy.is_alive()
def get_fight_information(self):
return self.information_parts
| [
"antoni.1998@abv.bg"
] | antoni.1998@abv.bg |
38f1ba00d5c6f04f70636de75b00cc1ff16b61a0 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/tdeboissiere_DeepLearningImplementations/DeepLearningImplementations-master/DenseRecNet/run_cifar10.py | 8954c52f75b145ad8895faf0eb255a39e448500e | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 7,214 | py | from __future__ import print_function
import os
import time
import json
import argparse
import denserecnet
import numpy as np
import keras.backend as K
from keras.datasets import cifar10
from keras.optimizers import Adam
from keras.utils import np_utils
def run_cifar10(batch_size,
nb_epoch,
depth,
nb_dense_block,
nb_filter,
growth_rate,
dropout_rate,
learning_rate,
weight_decay,
plot_architecture):
""" Run CIFAR10 experiments
:param batch_size: int -- batch size
:param nb_epoch: int -- number of training epochs
:param depth: int -- network depth
:param nb_dense_block: int -- number of dense blocks
:param nb_filter: int -- initial number of conv filter
:param growth_rate: int -- number of new filters added by conv layers
:param dropout_rate: float -- dropout rate
:param learning_rate: float -- learning rate
:param weight_decay: float -- weight decay
:param plot_architecture: bool -- whether to plot network architecture
"""
###################
# Data processing #
###################
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
nb_classes = len(np.unique(y_train))
img_dim = X_train.shape[1:]
if K.image_dim_ordering() == "th":
n_channels = X_train.shape[1]
else:
n_channels = X_train.shape[-1]
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# Normalisation
X = np.vstack((X_train, X_test))
# 2 cases depending on the image ordering
if K.image_dim_ordering() == "th":
for i in range(n_channels):
mean = np.mean(X[:, i, :, :])
std = np.std(X[:, i, :, :])
X_train[:, i, :, :] = (X_train[:, i, :, :] - mean) / std
X_test[:, i, :, :] = (X_test[:, i, :, :] - mean) / std
elif K.image_dim_ordering() == "tf":
for i in range(n_channels):
mean = np.mean(X[:, :, :, i])
std = np.std(X[:, :, :, i])
X_train[:, :, :, i] = (X_train[:, :, :, i] - mean) / std
X_test[:, :, :, i] = (X_test[:, :, :, i] - mean) / std
###################
# Construct model #
###################
model = denserecnet.DenseNet(nb_classes,
img_dim,
depth,
nb_dense_block,
growth_rate,
nb_filter,
dropout_rate=dropout_rate,
weight_decay=weight_decay)
# Model output
model.summary()
# Build optimizer
opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=["accuracy"])
if plot_architecture:
from keras.utils.visualize_util import plot
plot(model, to_file='./figures/densenet_archi.png', show_shapes=True)
####################
# Network training #
####################
print("Training")
list_train_loss = []
list_test_loss = []
list_learning_rate = []
for e in range(nb_epoch):
if e == int(0.5 * nb_epoch):
K.set_value(model.optimizer.lr, np.float32(learning_rate / 10.))
if e == int(0.75 * nb_epoch):
K.set_value(model.optimizer.lr, np.float32(learning_rate / 100.))
split_size = batch_size
num_splits = X_train.shape[0] / split_size
arr_splits = np.array_split(np.arange(X_train.shape[0]), num_splits)
l_train_loss = []
start = time.time()
for batch_idx in arr_splits:
X_batch, Y_batch = X_train[batch_idx], Y_train[batch_idx]
train_logloss, train_acc = model.train_on_batch(X_batch, Y_batch)
l_train_loss.append([train_logloss, train_acc])
test_logloss, test_acc = model.evaluate(X_test,
Y_test,
verbose=0,
batch_size=64)
list_train_loss.append(np.mean(np.array(l_train_loss), 0).tolist())
list_test_loss.append([test_logloss, test_acc])
list_learning_rate.append(float(K.get_value(model.optimizer.lr)))
# to convert numpy array to json serializable
print('Epoch %s/%s, Time: %s' % (e + 1, nb_epoch, time.time() - start))
d_log = {}
d_log["batch_size"] = batch_size
d_log["nb_epoch"] = nb_epoch
d_log["optimizer"] = opt.get_config()
d_log["train_loss"] = list_train_loss
d_log["test_loss"] = list_test_loss
d_log["learning_rate"] = list_learning_rate
json_file = os.path.join('./log/experiment_log_cifar10.json')
with open(json_file, 'w') as fp:
json.dump(d_log, fp, indent=4, sort_keys=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run CIFAR10 experiment')
parser.add_argument('--batch_size', default=64, type=int,
help='Batch size')
parser.add_argument('--nb_epoch', default=30, type=int,
help='Number of epochs')
parser.add_argument('--depth', type=int, default=7,
help='Network depth')
parser.add_argument('--nb_dense_block', type=int, default=1,
help='Number of dense blocks')
parser.add_argument('--nb_filter', type=int, default=16,
help='Initial number of conv filters')
parser.add_argument('--growth_rate', type=int, default=12,
help='Number of new filters added by conv layers')
parser.add_argument('--dropout_rate', type=float, default=0.2,
help='Dropout rate')
parser.add_argument('--learning_rate', type=float, default=1E-3,
help='Learning rate')
parser.add_argument('--weight_decay', type=float, default=1E-4,
help='L2 regularization on weights')
parser.add_argument('--plot_architecture', type=bool, default=False,
help='Save a plot of the network architecture')
args = parser.parse_args()
print("Network configuration:")
for name, value in parser.parse_args()._get_kwargs():
print(name, value)
list_dir = ["./log", "./figures"]
for d in list_dir:
if not os.path.exists(d):
os.makedirs(d)
run_cifar10(args.batch_size,
args.nb_epoch,
args.depth,
args.nb_dense_block,
args.nb_filter,
args.growth_rate,
args.dropout_rate,
args.learning_rate,
args.weight_decay,
args.plot_architecture)
| [
"659338505@qq.com"
] | 659338505@qq.com |
b66b465a380d4e26fe2fe4a7d4d23968a5dc804e | e797d6ec2088b3471d15ce802f1d79d931194f3a | /NonRPFRasterLoader_ToolValidator.py | 3d574f94818729ab191b74a13b0639f4dc7003ef | [] | no_license | mfunk/MA-Storage | 20a5427644b4cd7929e5e07c5af35c79839de0d6 | c103f346111c1c4d46408d69be46f8bc1ddddc3a | refs/heads/master | 2020-04-15T23:42:38.718055 | 2014-07-21T16:57:31 | 2014-07-21T16:57:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,660 | py | #!/usr/bin/env python
class ToolValidator:
"""Class for validating a tool's parameter values and controlling
the behavior of the tool's dialog."""
def __init__(self):
"""Setup the Geoprocessor and the list of tool parameters."""
import arcgisscripting as ARC
self.GP = ARC.create(9.3)
self.params = self.GP.getparameterinfo()
def initializeParameters(self):
"""Refine the properties of a tool's parameters. This method is
called when the tool is opened."""
# 0 - Input catalog
# 1 - Input rasters (multiple)
# 2 - Product
# 3 - Scale
# 4 - Series
# 5 - Configuration Keyword
# 6 - Output catalog
self.params[6].ParameterDependencies = [0]
self.params[6].Schema.Clone = True
self.params[6].Schema.FieldsRule = "All"
return
def updateParameters(self):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parmater
has been changed."""
return
def updateMessages(self):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
# check that the catalog is a valid RPF catalog
if (self.params[0].Altered == True):
gp = self.GP
input_catalog = str(self.params[0].Value)
isValidRPF = self.ValidRPFCatalog(input_catalog)
if (isValidRPF == False):
self.GP.params[0].SetErrorMessage("Input catalog is not a valid Military Analyst RPF catalog.")
# check string lengths
if (self.params[2].Altered == True):
if (len(self.params[2].Value) > 25):
self.params[2].SetErrorMessage("Product string exceeds maximum length of 25 characters.")
if (self.params[4].Altered == True):
if (len(self.params[4].Value) > 5):
self.params[4].SetErrorMessage("Series string exceeds maximum length of 5 characters.")
return
def ValidRPFCatalog(self,inputTable):
# ensure required fields exist (they will be true if this is a true MA raster catalog)
isValidRPFCat = True
checkfield1 = self.GP.ListFields(inputTable, "PRODUCT", "*")
checkfield2 = self.GP.ListFields(inputTable, "SERIES", "*")
checkfield3 = self.GP.ListFields(inputTable, "SCALE", "*")
checkfield4 = self.GP.ListFields(inputTable, "FULL_NAME", "*")
#if not (checkfield1.Next() and checkfield2.Next() and checkfield3.Next() and checkfield4.Next()) :
if not (checkfield1[0] and checkfield2[0] and checkfield3[0] and checkfield4[0]):
isValidRPFCat = False
return isValidRPFCat
| [
"mfunk@esri.com"
] | mfunk@esri.com |
6f87adaee3c5827635ea027b2d5a1ba7c53ad949 | 2df1bce0d11ba43ad213f887b68b8bc1e1e41d33 | /bin/terminal.py | 517ecda5789203347258f60e8731864ddb316bed | [] | no_license | rheiland/pc4training | 80aead99e7859ec004044985492db736c8e0c6e4 | d37af1d9c0db228254b7679fe04cdff88d1558a1 | refs/heads/master | 2020-09-06T02:54:58.824853 | 2019-11-13T16:53:12 | 2019-11-13T16:53:12 | 220,296,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from ipywidgets import Output
from IPython.display import display, HTML
class TerminalTab(object):
def __init__(self):
# self.tab = Output(layout={'height': '600px'})
self.tab = Output(layout={'height': 'auto'})
self.tab.append_display_data(HTML(filename='doc/about.html'))
from ipywidgets import HTML, Tab, Layout
tab = Tab([HTML(value="<iframe width='100%' height='100%' src='../terminals/new'></iframe>", layout=Layout(height='600px'))])
tab.set_title(0, "Terminal")
display(tab)
| [
"heiland@indiana.edu"
] | heiland@indiana.edu |
1d4357ed6ca5e069e3e9bd0e47f3243eb8abe665 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/kikoAnalysis/wndResFiles/70-tideGauge.py | 782e71b5030c2122e3b4de8378784692225ba5a2 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 11:52:48 2020
@author: Michael Tadesse
"""
import os
import pandas as pd
dir_in = "/lustre/fs0/home/mtadesse/eraFiveConcat"
os.chdir(dir_in)
tgList = os.listdir()
x = 70
y = 71
#looping through individual tide gauges
for ii in range(x, y):
os.chdir(tgList[ii])
print(tgList[ii])
uwnd = pd.read_csv('wnd_u.csv')
vwnd = pd.read_csv('wnd_v.csv')
#check sizes of uwnd and vwnd
if uwnd.shape == vwnd.shape:
print("all good!")
else:
print("sizes not equal")
uwnd.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1, inplace = True)
vwnd.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1, inplace = True)
#sort by date
uwnd = uwnd.sort_values(by = 'date')
vwnd = vwnd.sort_values(by = 'date')
#reset indices
uwnd.reset_index(inplace = True)
vwnd.reset_index(inplace = True)
uwnd.drop(['index'], axis = 1, inplace = True)
vwnd.drop(['index'], axis = 1, inplace = True)
#get squares of uwnd and vwnd
uSquare = uwnd.iloc[:, 1:]**2
vSquare = vwnd.iloc[:, 1:]**2
#sum and take square root
wndResultant = (uSquare + vSquare)**0.5
wndResultant = pd.concat([pd.DataFrame(uwnd['date']), wndResultant], axis = 1)
#save file
wndResultant.to_csv("wndRest.csv")
os.chdir(dir_in)
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
469e93af04882d13a845caada52a607ad02fef3e | 1c52ae4b10bb033e8f65a66254a13ba8a22d5e03 | /helium/common/permissions.py | 18f57d68c540967cc5992d700522d0141e76e1a9 | [
"MIT"
] | permissive | vaibhavmathur91/platform | 2fa488e449b02e7a82e4759517663822addb6a34 | 529b7047fbbcdbcfc4766156331da1b6c9ced0fa | refs/heads/master | 2020-03-18T15:57:27.883761 | 2018-05-25T23:13:54 | 2018-05-25T23:13:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | import logging
from rest_framework import permissions
__author__ = 'Alex Laird'
__copyright__ = 'Copyright 2018, Helium Edu'
__version__ = '1.0.0'
logger = logging.getLogger(__name__)
class IsOwner(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return obj.get_user() == request.user
| [
"alexdlaird@gmail.com"
] | alexdlaird@gmail.com |
39b37f4b8dad13e40b2b69dff70d5dfde738a0cf | 37f675391762db798b712a0da0b760f03adc3b44 | /NEWS_HUB/bin/gunicorn | e9b7c5956d85b26d498dd35f1f5cf362f4575eb6 | [
"MIT"
] | permissive | Ken-mbira/News_Hub | 03c7d9d25b0e9b85949d0b3c9052369e0ee34f2c | c68768dd4f958c9dc74300d036ad69c518d3ce80 | refs/heads/master | 2023-08-06T18:16:03.668655 | 2021-09-13T12:57:11 | 2021-09-13T12:57:11 | 404,722,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | #!/home/kenmbira/Documents/MoringaProjects/Week8/NEWS_HUB/NEWS_HUB/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"ken.mbira@student.moringaschool.com"
] | ken.mbira@student.moringaschool.com | |
d7e27c0c62004c1e8373b6bb6984fb7b6c32f33c | 3c831000ed8639c9187df6252ec7077a9a31d7df | /calender_visualizer.py | 34e5dbebaaeb66d368c3e8cc89df18b378a0cea7 | [] | no_license | robbynickles/mjc_schedulerB | 0790f7e1ddeba87c5c28e81e923e44338fd3ef54 | fa09632972ea071b7e629df479c2af1093add97f | refs/heads/master | 2021-01-02T08:34:05.965140 | 2014-07-27T17:15:20 | 2014-07-27T17:15:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,373 | py | from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.uix.slider import Slider
from kivy.graphics import Color, Bezier, Line, Rectangle
from random import randint
half = [ i +':00'for i in ['12', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11']]
curry_me = lambda s: lambda i: i + s
am = curry_me('A')
pm = curry_me('P')
times = map(am, half) + map(pm, half)
relevant_times = times[6:23]
days = dict( zip( ['M','T','W','TH','F'], range(5) ) )
def split_time(hour):
colon = hour.find(':')
return hour[:colon] + ":00" + hour[-1], int(hour[colon+1:-1])
def set_origin(ox, oy):
def give_point(day,hour):
hour, min = split_time(hour)
x_gap, y_gap = 100, 20
ret = [days[day]*x_gap + ox, relevant_times.index(hour)*y_gap + (y_gap*(min/60.0)) + oy]
return ret
return give_point
def extract_times( time_string ):
spl = time_string.split()
if len(spl) >= 3:
return spl[0], spl[2]
else:
return '06:00A', '06:00A'
give_widg_point = set_origin(-200, -200)
give_canv_point = set_origin(190, 96)
class Calender(FloatLayout):
def __init__(self, *args, **kwargs):
super(Calender, self).__init__(*args, **kwargs)
self.d = 10
self.current_point = None
self.build_border()
def build_border( self ):
for d in days.keys():
pos = give_widg_point(d,'06:00A')
pos[1] -= 40
self.add_widget(Label(text=d, pos=pos))
for t in relevant_times:
pos = give_widg_point('M',t)
pos[0] -= 100
self.add_widget(Label(text=t, pos=pos))
def add_block( self, day_list, start, end, color, text ):
for day in day_list:
if day in days.keys():
# A course may have non-traditional days and times that don't
# need calender representation.
p1, p2 = give_canv_point(day, start), give_canv_point(day, end)
canvas_holder = FloatLayout(pos=(0,0), size=(1000, 100))
r,g,b = color
with canvas_holder.canvas:
#Color(r,g,b)
Color(123, 23, 89)
Rectangle(size=(100,p2[1]-p1[1]), pos=p1)
self.add_widget(canvas_holder)
def add_course( self, course_dict ):
color = randint(0,255), randint(0,255), randint(0,255)
time_data = zip(course_dict['Days'], course_dict['Times'], course_dict['Type'])
for d_list, t, ty in time_data:
start, end = extract_times(t)
self.add_block( d_list, start, end, color, ty )
course_dict = {'Name': ['MART-175'], 'Title': ['Color Photography', '08/25/14-12/13/14', 'Material Fee = $45.00'], 'Section': ['2672'], 'Days': ['T', 'TH', 'T', 'TH'], 'Times': ['01:15P - 02:40P', '02:40P - 04:05P', '02:40P - 04:05P', '01:15P - 02:40P'], 'Avail': ['Open'], 'Location': ['MADM 208, West', 'MADM 208, West'
, 'MADM 208, West', 'MADM 208, West'], 'Units': ['3'], 'Instructor': ['Staff03'], 'Type': ['LEC', 'LAB', 'LAB', 'LAB'], 'Important Notes': [''], 'Max/': ['20/11']}
if __name__ == '__main__':
class Main(App):
def build(self):
c = Calender()
c.add_course( course_dict )
return c
main = Main()
main.run()
| [
"r.nickles7@gmail.com"
] | r.nickles7@gmail.com |
1c29dc1ecfe316faa78fb563f818acb64a7520f9 | a613e5ec5d996bb1a60e6f4d417f44fe7241f867 | /Arrays/Merge overlapping ranges.py | 9bbaea8f72939902888ff0dd2ad24449a7cd16ba | [] | no_license | Nisar-1234/Data-structures-and-algorithms-1 | f0e2d9e63ee8baa35f12d106ee879ccb060c4caf | 777634f01d8b10a92a97c927ec09499ba08a28a4 | refs/heads/main | 2023-06-26T04:03:30.474025 | 2021-07-28T19:33:28 | 2021-07-28T19:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | def merge(ranges):
ranges.sort()
stack = []
stack.append(ranges[0])
for i in range(1,len(ranges)):
if ranges[i][0] <= stack[-1][1]:
stack[-1][1] = max(stack[-1][1],ranges[i][1])
else:
stack.append(ranges[i])
return stack
ranges = [[2,13],[8,20]]
print(merge(ranges)) | [
"noreply@github.com"
] | Nisar-1234.noreply@github.com |
02f1ceb7c09effbcdc1c59b7067690ae0c023e77 | 6bc0cef468f97914fab31dd83bd417b4a5321051 | /py_checkio_solutions/Scientific Expedition/sum_by_type.py | ffa4fc777499a9847d22d02d2db4f233d0699a2f | [] | no_license | todatech/checkio | 14f19ef111a3f222b369937c90746c47bf2c3a63 | 763a9e0f81470302b173a4a700b77bed4f71de7a | refs/heads/master | 2023-02-01T16:04:39.018699 | 2020-12-21T01:46:38 | 2020-12-21T01:46:38 | 303,469,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | #!/usr/bin/env checkio --domain=py run sum-by-type
# You have a list. Each value from that list can be either a string or an integer. Your task here is to return two values. The first one is a concatenation of all strings from the given list. The second one is a sum of all integers from the given list.
#
# Input:An array of strings ans integers
#
# Output:A list or tuple
#
# Precondition:both given ints should be between -1000 and 1000
#
#
# END_DESC
from typing import Tuple
def sum_by_types(items: list) -> Tuple[str, int]:
str_ans = ''
num_ans = 0
for n in items:
if type(n) is str:
str_ans += n
if type(n) is int:
num_ans += n
return (str_ans, num_ans)
if __name__ == '__main__':
print("Example:")
print(sum_by_types([]))
# These "asserts" are used for self-checking and not for an auto-testing
assert sum_by_types([]) == ('', 0)
assert sum_by_types([1, 2, 3]) == ('', 6)
assert sum_by_types(['1', 2, 3]) == ('1', 5)
assert sum_by_types(['1', '2', 3]) == ('12', 3)
assert sum_by_types(['1', '2', '3']) == ('123', 0)
assert sum_by_types(['size', 12, 'in', 45, 0]) == ('sizein', 57)
print("Coding complete? Click 'Check' to earn cool rewards!") | [
"tonani@gmail.com"
] | tonani@gmail.com |
28307b976fc960f266e1401750875eb574c139e9 | 05ff9a0778ae16c4b3f29a4e4198e3f829dee409 | /ecommerce_app/migrations/0014_paynowpayment.py | 8d6fc7931cacfb59e11ca1edb1b61580301c8544 | [] | no_license | Inoxevious/malinafro | 8aa87b3b2a5473430ff57790ebccb2aaba6d8493 | 7b5b255997a9f54272c4320ed939b8e24c84b910 | refs/heads/main | 2023-01-20T14:32:23.049381 | 2020-12-02T09:21:47 | 2020-12-02T09:21:47 | 314,222,344 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,727 | py | # Generated by Django 3.0.8 on 2020-11-24 14:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('ecommerce_app', '0013_order_status'),
]
operations = [
migrations.CreateModel(
name='PaynowPayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cellphone', models.CharField(blank=True, max_length=100, null=True)),
('email', models.EmailField(blank=True, max_length=100, null=True)),
('reference', models.CharField(max_length=100)),
('paynow_reference', models.CharField(max_length=100)),
('amount', models.DecimalField(decimal_places=2, max_digits=10)),
('details', models.CharField(blank=True, max_length=500)),
('init_status', models.CharField(blank=True, max_length=10)),
('poll_url', models.CharField(max_length=500)),
('browser_url', models.CharField(max_length=500)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(max_length=10)),
('paid', models.BooleanField(default=False)),
('confirmed_at', models.DateTimeField(blank=True, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"mpasiinnocent@gmail.com"
] | mpasiinnocent@gmail.com |
aba30190b9406ca34264eac7310ecebc0beed81d | ad9bd58a3ec8fa08dfcc994d4101ee815a9f5bc0 | /02_algorithm/baekjoon/problem/1000~9999/7562.나이트의이동/7562.py | 4723f88cb7b7f2278519430d1b57259805f660b1 | [] | no_license | wally-wally/TIL | 93fc1d0e3bc7d030341ed54155294c68c48b4c7d | 936783bc86f563646c0398c24e2fcaa707f0ed23 | refs/heads/master | 2023-04-28T08:59:48.235747 | 2023-04-12T12:06:52 | 2023-04-12T12:06:52 | 195,918,111 | 40 | 7 | null | 2020-09-29T16:20:46 | 2019-07-09T02:31:02 | Python | UTF-8 | Python | false | false | 1,199 | py | import sys
sys.stdin = open('input_7562.txt', 'r')
def BFS(row, col):
queue = []
dx = [-2, -1, +1, +2, +2, +1, -1, -2]
dy = [+1, +2, +2, +1, -1, -2, -2, -1]
queue.append([row, col])
visited[row][col] = True
move_cnt = 0
while True:
temp_list = []
move_cnt += 1
while len(queue) != 0:
element = queue.pop(0)
for idx in range(8):
new_row, new_col = element[0] + dx[idx], element[1] + dy[idx]
if 0 <= new_row < I and 0 <= new_col < I:
if not visited[new_row][new_col]:
if new_row == goal[0] and new_col == goal[1]:
return move_cnt
else:
visited[new_row][new_col] = True
temp_list.append([new_row, new_col])
for temp in temp_list:
queue.append(temp)
for _ in range(int(input())):
I = int(input())
start = list(map(int, input().split()))
goal = list(map(int, input().split()))
visited = [[False] * I for _ in range(I)]
if start == goal:
print(0)
else:
print(BFS(start[0], start[1])) | [
"wallys0213@gmail.com"
] | wallys0213@gmail.com |
eb87b42045e37801ae1ccabf5fe794faad9c9aa5 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /kws_streaming/layers/dct_test.py | b359966280714820cd4c94950e26aec8aedb04af | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 2,605 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kws_streaming.layers.dct."""
import numpy as np
from kws_streaming.layers import dct
from kws_streaming.layers.compat import tf
from kws_streaming.layers.compat import tf1
tf1.disable_eager_execution()
class DCTTest(tf.test.TestCase):
def test_tf_dct_vs_dct_direct(self):
signal_size = 64
# input signal
signal = np.random.rand(1, 1, signal_size)
# build mfcc model and run it
input_signal = tf.keras.Input(
shape=(
1,
signal_size,
), batch_size=1)
output = tf.signal.mfccs_from_log_mel_spectrograms(input_signal)
model = tf.keras.Model(input_signal, output)
model.summary()
mfcc_output = model.predict(signal)
# build dct model and run it
input_signal = tf.keras.Input(
shape=(
1,
signal_size,
), batch_size=1)
output = dct.DCT()(input_signal)
model = tf.keras.Model(input_signal, output)
model.summary()
dct_output = model.predict(signal)
self.assertAllClose(
mfcc_output[0][0], dct_output[0][0], rtol=1e-5, atol=1e-6)
def test_tf_dct_vs_dct_matmul(self):
signal_size = 51
# input signal
signal = np.random.rand(1, 1, signal_size)
# build dct model using tf function
input_signal = tf.keras.Input(
shape=(
1,
signal_size,
), batch_size=1)
output = dct.DCT(use_tf=False)(input_signal)
model1 = tf.keras.Model(input_signal, output)
model1.summary()
model1_output = model1.predict(signal)
# build dct model using direct matmul
input_signal = tf.keras.Input(
shape=(
1,
signal_size,
), batch_size=1)
output = dct.DCT(use_tf=True)(input_signal)
model2 = tf.keras.Model(input_signal, output)
model2.summary()
model2_output = model2.predict(signal)
self.assertAllClose(
model1_output, model2_output, rtol=1e-5, atol=1e-5)
if __name__ == "__main__":
tf.test.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
b08e0b567a118d75a7d4962d1ede12db6030b181 | f957ad3b17e4172791ef93c38dc131f34be3545f | /corpus/location.py | 055bd4a146c76db39ef38918a757b68baf6a2eee | [
"Apache-2.0"
] | permissive | LeMyst/ConferenceCorpus | e43c1806165fbb24a6b312a3ad81d4142b1edfe5 | 727afb6f5c9b2e3807260a6161a27531b2f77dd2 | refs/heads/main | 2023-09-05T08:39:29.811243 | 2021-11-08T07:43:02 | 2021-11-08T07:43:02 | 426,408,944 | 0 | 0 | Apache-2.0 | 2021-11-09T22:38:53 | 2021-11-09T22:38:52 | null | UTF-8 | Python | false | false | 6,815 | py | '''
Created on 2021-08-11
@author: wf
'''
#from lodstorage.entity import EntityManager
from geograpy.locator import LocationContext
from OSMPythonTools.nominatim import Nominatim
import os
import logging
class LocationLookup:
'''
lookup locations
'''
preDefinedLocations={
"Not Known": None,
"Online": None,
"Albuquerque, New Mexico, USA":"Q34804",
"Alexandria, Virginia, USA":"Q88",
"Amsterdam": "Q727",
"Amsterdam, Amsterdam": "Q727",
"Amsterdam Netherlands": "Q727",
"Amsterdam, Netherlands": "Q727",
"Amsterdam, The Netherlands":"Q727",
"Bergen, Norway":"Q26793",
"Bremen, Germany": "Q24879",
"Cancun, Mexico":"Q8969",
"Cancún, Mexico": "Q8969",
"Cambridge, United Kingdom": "Q21713103",
"Cambridge, UK": "Q21713103",
"Cambridge, USA": "Q49111",
"Cambridge, MA":"Q49111",
"Cambridge, Massachusetts, USA":"Q49111",
"Cambridge, MA, USA":"Q49111",
"Charleston, South Carolina, USA":"Q47716",
"Gdansk, Poland":"Q1792",
"Heraklion, Crete, Greece":"Q160544",
"Los Angeles California": "Q65",
"Los Angeles CA USA": "Q65",
"Luxembourg, Luxembourg":"Q1842",
"Macau, Macau, China":"Q14773",
"Monterrey, Mexico":"Q81033",
"Montreal, QC": "Q340",
"Montreal, QC, Canada": "Q340",
"Montrèal, Canada": "Q340",
"New Brunswick, New Jersey, USA":"Q138338",
"New Delhi": "Q987",
"New Delhi, India": "Q987",
"New Orleans, LA": "Q34404",
"Palo Alto, USA": "Q47265",
"Palo Alto, California, USA": "Q47265",
"Pasadena, California, USA":"Q485176",
"Phoenix": "Q16556",
"Phoenix, AZ": "Q16556",
"Phoenix AZ USA": "Q16556",
"Phoenix, Arizona, USA": "Q16556",
"Phoenix, USA": "Q16556",
"Phoenix, USA": "Q16556",
"Phoenix, AZ, USA": "Q16556",
"Salamanca, Spain": "Q15695",
"Santa Barbara, California": "Q159288",
"Santa Barbara, CA": "Q159288",
"Santa Barbara, CA, USA": "Q159288",
"Santa Barbara CA USA": "Q159288",
"Santa Barbara, USA": "Q159288",
"Santa Barbara, California, USA": "Q159288",
"Santa Fe, New Mexico": "Q38555",
"Santa Fe, NM, USA": "Q38555",
"Santa Fe, New Mexico, USA": "Q38555",
"Santa Fe, USA": "Q38555",
"Santa Fe, New Mexico, United States": "Q38555",
"Skovde, Sweden": "Q21166",
"Snowbird, Utah, USA": "Q3487194",
"St. Louis, MO, USA": "Q38022",
"St. Petersburg": "Q656",
"Saint-Petersburg, Russia":"Q656",
"Thessaloniki": "Q17151",
"Thessaloniki, Greece": "Q17151",
"Trondheim, Norway":"Q25804",
"Valencia": "Q8818",
"Valencia, Spain": "Q8818",
"Valencia, Valencia, Spain": "Q8818",
"York, UK":"Q42462"
}
other={
"Washington, DC, USA": "Q61",
"Bangalore": "Q1355",
"Bangalore, India": "Q1355",
"Xi'an": "Q5826",
"Xi'an, China": "Q5826",
"Virtual Event USA": "Q30",
"Virtual USA": "Q30",
"London United Kingdom": "Q84",
"Brno":"Q14960",
"Cancun":"Q8969",
"Gothenburg Sweden": "Q25287",
"Zurich, Switzerland": "Q72",
"Barcelona Spain": "Q1492",
"Vienna Austria": "Q1741",
"Seoul Republic of Korea": "Q8684",
"Seattle WA USA": "Q5083",
"Singapore Singapore":"Q334",
"Tokyo Japan": "Q1490",
"Vancouver BC Canada": "Q24639",
"Vancouver British Columbia Canada": "Q24639",
"Paris France": "Q90",
"Nagoya": "Q11751",
"Marrakech":"Q101625",
"Austin Texas":"Q16559",
"Chicago IL USA":"Q1297",
"Bangkok Thailand":"Q1861",
"Firenze, Italy":"Q2044",
"Florence Italy":"Q2044",
"Timisoara":"Q83404",
"Langkawi":"Q273303",
"Beijing China":"Q956",
"Berlin Germany": "Q64",
"Prague Czech Republic":"Q1085",
"Portland Oregon USA":"Q6106",
"Portland OR USA":"Q6106",
"Pittsburgh PA USA":"Q1342",
"Новосибирск":"Q883",
"Los Angeles CA USA":"Q65",
"Kyoto Japan": "Q34600"
}
def __init__(self):
'''
Constructor
'''
self.locationContext=LocationContext.fromCache()
cacheRootDir=LocationContext.getDefaultConfig().cacheRootDir
cacheDir=f"{cacheRootDir}/.nominatim"
if not os.path.exists(cacheDir):
os.makedirs(cacheDir)
self.nominatim = Nominatim(cacheDir=cacheDir)
logging.getLogger('OSMPythonTools').setLevel(logging.ERROR)
def getCityByWikiDataId(self,wikidataID:str):
'''
get the city for the given wikidataID
'''
citiesGen=self.locationContext.cityManager.getLocationsByWikidataId(wikidataID)
if citiesGen is not None:
cities=list(citiesGen)
if len(cities)>0:
return cities[0]
else:
return None
def lookupNominatim(self,locationText:str):
location=None
nresult=self.nominatim.query(locationText,params={"extratags":"1"})
nlod=nresult._json
if len(nlod)>0:
nrecord=nlod[0]
if "extratags" in nrecord:
extratags=nrecord["extratags"]
if "wikidata" in extratags:
wikidataID=extratags["wikidata"]
location=self.getCityByWikiDataId(wikidataID)
return location
def lookup(self,locationText:str):
if locationText in LocationLookup.preDefinedLocations:
locationId=LocationLookup.preDefinedLocations[locationText]
if locationId is None:
return None
else:
location=self.getCityByWikiDataId(locationId)
if location is None:
print(f"❌❌-predefinedLocation {locationText}→{locationId} wikidataId not resolved")
return location
lg=self.lookupGeograpy(locationText)
ln=self.lookupNominatim(locationText)
if ln is not None and lg is not None and not ln.wikidataid==lg.wikidataid:
print(f"❌❌{locationText}→{lg}!={ln}")
return None
return lg
def lookupGeograpy(self,locationText:str):
'''
lookup the given location by the given locationText
'''
locations=self.locationContext.locateLocation(locationText)
if len(locations)>0:
return locations[0]
else:
return None
| [
"wf@bitplan.com"
] | wf@bitplan.com |
3ef28cffc4c66730c648b8fa86a3b1eb738a771c | b521802cca8e4ee4ff5a5ffe59175a34f2f6d763 | /maya/maya-utils/Scripts/Animation/2019-2-15 Tim Cam_Route_Manager/.history/Cam_Main/Cam_Main/Cam_Item_Layout_20190119192913.py | c890a70582358ec2307bee469503689adbba361f | [] | no_license | all-in-one-of/I-Do-library | 2edf68b29558728ce53fe17168694ad0353a076e | 8972ebdcf1430ccc207028d8482210092acf02ce | refs/heads/master | 2021-01-04T06:58:57.871216 | 2019-12-16T04:52:20 | 2019-12-16T04:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,099 | py | # -*- coding:utf-8 -*-
# Require Header
import os
import json
from functools import partial
# Sys Header
import sys
import traceback
import subprocess
import plugin.Qt as Qt
from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
def loadUiType(uiFile):
import plugin.Qt as Qt
if Qt.__binding__.startswith('PyQt'):
from Qt import _uic as uic
return uic.loadUiType(uiFile)
elif Qt.__binding__ == 'PySide':
import pysideuic as uic
else:
import pyside2uic as uic
import xml.etree.ElementTree as xml
from cStringIO import StringIO
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uiFile, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('%s'%widget_class)
return form_class, base_class
from Qt.QtCompat import wrapInstance
DIR = os.path.dirname(__file__)
UI_PATH = os.path.join(DIR,"ui","Cam_Item_Layout.ui")
GUI_STATE_PATH = os.path.join(DIR, "json" ,'GUI_STATE.json')
form_class , base_class = loadUiType(UI_PATH)
from maya import cmds
class Cam_Item_Layout(form_class,base_class):
def __init__(self,MainWindow):
super(Cam_Item_Layout,self).__init__()
self.setupUi(self)
self.MainWindow = MainWindow
self.Item_Add_BTN.clicked.connect(self.Item_Add_Fn)
self.Item_Clear_BTN.clicked.connect(self.Item_Clear_Fn)
self.Cam_Item_Num = 0
self.Cam_Item_Scroll.verticalScrollBar().valueChanged.connect(self.Scroll_Fn)
self.Scroll_Offset = 0
self.Attr = {}
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Name"] = ""
# Note 功能按键
self.Batch_Keyframe_BTN.clicked.connect(self.Batch_Keyframe_Fn)
self.Select_Path_BTN.clicked.connect(self.Select_Path_Fn)
self.Select_Path_BTN.clicked.connect(self.Select_Path_Fn)
def Batch_Keyframe_Fn(self):
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
Path = child.Attr["Add_Motion_Path_LE"]
if cmds.objExists(Path):
offset = cmds.keyframe(Path,q=1)[0]
cmds.keyframe("%s.uValue"% Path,e=1,iub=1,r=1,o="over",tc=-offset)
def Select_Path_Fn(self):
cmds.select(cl=1)
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
if cmds.objExists(child.Attr["Add_Motion_Path_LE"]):
cmds.select(child.Attr["Add_Motion_Path_LE"],add=1)
def Item_Add_Fn(self):
self.Cam_Item_Num += 1
return Cam_Item(self,self.MainWindow)
def Item_Clear_Fn(self):
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Name"] = ""
for i,child in enumerate(self.Item_Layout.children()):
if i != 0:
child.deleteLater()
def Scroll_Fn(self):
self.Scroll_Offset = self.Cam_Item_Scroll.verticalScrollBar().value()
UI_PATH = os.path.join(DIR,"ui","Cam_Item.ui")
form_class , base_class = loadUiType(UI_PATH)
class Cam_Item(form_class,base_class):
def __init__(self,parent,MainWindow):
super(Cam_Item,self).__init__()
self.setupUi(self)
self.MainWindow = MainWindow
self.Cam_Del_BTN.clicked.connect(self.Cam_Del_BTN_Fn)
self.Cam_Con_CB.stateChanged.connect(self.Cam_Con_CB_Fn)
# Note 初始化创建参数
TotalCount = len(parent.Item_Layout.children())
parent.Item_Layout.layout().insertWidget(TotalCount-1,self)
self.Cam_LE.setText("Cam_Item_%s" % parent.Cam_Item_Num)
self.Cam_Num_Label.setText(u"镜头%s" % TotalCount)
self.setObjectName("Cam_Item_%s" % TotalCount)
self.Num = TotalCount
self.Attr = {}
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Strat_Time_SB"] = 0
self.Attr["End_Time_SB"] = 0
self.MainWindow.Save_Json_Fun()
def Cam_Del_BTN_Fn(self):
self.deleteLater()
ChildrenList = self.parent().children()
for i,child in enumerate(ChildrenList):
if i != 0:
if i > self.Num:
# Note 修正 child 的序号
child.Num -= 1
child.Cam_Num_Label.setText(u"镜头%s" % (i-1))
child.setObjectName("Cam_Item_%s" % (i-1))
else:
child.Cam_Num_Label.setText(u"镜头%s" % i)
child.setObjectName("Cam_Item_%s" % i)
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Strat_Time_SB"] = ""
self.Attr["End_Time_SB"] = ""
self.MainWindow.Save_Json_Fun()
def Cam_Con_CB_Fn(self,state):
ChildrenList = self.parent().children()
for i,child in enumerate(ChildrenList):
if i != 0:
if child != self:
child.Cam_Con_CB.blockSignals(True)
child.Cam_Con_CB.setChecked(False)
if state == 2:
self.Cam_Con_CB.setChecked(True)
else:
self.Cam_Con_CB.setChecked(False)
for i,child in enumerate(ChildrenList):
if i != 0:
if child != self:
child.Cam_Con_CB.blockSignals(False)
| [
"2595715768@qq.com"
] | 2595715768@qq.com |
b89f3d90b055cd7e487503b5d88b55929f5bba30 | 2e4290bc1bee155cb8f95bdf7681b58325c8746e | /School/crop/forms.py | 06dc1db5004099b9a33e20985574b5242b21b1d6 | [] | no_license | codingspider/Schoolscript | bb7b539655417e8ee92dae27cedad69c386f5d80 | 7b61d7edb0b5ca4d4767622a02d8727f55510aec | refs/heads/master | 2022-12-14T12:06:15.351705 | 2020-09-08T11:22:27 | 2020-09-08T11:22:27 | 289,896,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | from PIL import Image
from .models import PointOfInterest, Rental
from django import forms
from django.core.files import File
# class PhotoForm(forms.ModelForm):
# x = forms.FloatField(widget=forms.HiddenInput())
# y = forms.FloatField(widget=forms.HiddenInput())
# width = forms.FloatField(widget=forms.HiddenInput())
# height = forms.FloatField(widget=forms.HiddenInput())
#
# class Meta:
# model = Photo
# fields = ('file', 'x', 'y', 'width', 'height', )
# widgets = {
# 'file': forms.FileInput(attrs={
# 'accept': 'image/*' # this is not an actual validation! don't rely on that!
# })
# }
#
# def save(self):
# photo = super(PhotoForm, self).save()
#
# x = self.cleaned_data.get('x')
# y = self.cleaned_data.get('y')
# w = self.cleaned_data.get('width')
# h = self.cleaned_data.get('height')
#
# image = Image.open(photo.file)
# cropped_image = image.crop((x, y, w+x, h+y))
# resized_image = cropped_image.resize((200, 200), Image.ANTIALIAS)
# resized_image.save(photo.file.path)
#
# return photo
class LocationForm(forms.ModelForm):
class Meta:
model = PointOfInterest
fields = "__all__"
class RentalForm(forms.ModelForm):
class Meta:
model = Rental
fields = "__all__"
| [
"engrokon.rok@gmail.com"
] | engrokon.rok@gmail.com |
e6401021ad628fdb35351d2021abefaacd6de2d1 | d98d5d1af8c31bb7aa0b628d48e504db2ebecbc8 | /分子反映分类/demo.py | ba68e44b7314565046756df256fdf2aa2c14c27c | [] | no_license | dugzzuli/kaggleDemo | 1d52b931e4399551bc92d7cd40bc9453223ede49 | 65c91c42bf9b01eaca3c071b1ce210f214814433 | refs/heads/master | 2021-01-20T04:15:34.768985 | 2017-04-30T14:57:57 | 2017-04-30T14:57:57 | 89,662,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,112 | py | """Kaggle competition: Predicting a Biological Response.
Blending {RandomForests, ExtraTrees, GradientBoosting} + stretching to
[0,1]. The blending scheme is related to the idea Jose H. Solorzano
presented here:
http://www.kaggle.com/c/bioresponse/forums/t/1889/question-about-the-process-of-ensemble-learning/10950#post10950
'''You can try this: In one of the 5 folds, train the models, then use
the results of the models as 'variables' in logistic regression over
the validation data of that fold'''. Or at least this is the
implementation of my understanding of that idea :-)
The predictions are saved in test.csv. The code below created my best
submission to the competition:
- public score (25%): 0.43464
- private score (75%): 0.37751
- final rank on the private leaderboard: 17th over 711 teams :-)
Note: if you increase the number of estimators of the classifiers,
e.g. n_estimators=1000, you get a better score/rank on the private
test set.
Copyright 2012, Emanuele Olivetti.
BSD license, 3 clauses.
"""
from __future__ import division
import numpy as np
import load_data
from sklearn.cross_validation import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
def logloss(attempt, actual, epsilon=1.0e-15):
"""Logloss, i.e. the score of the bioresponse competition.
"""
attempt = np.clip(attempt, epsilon, 1.0-epsilon)
return - np.mean(actual * np.log(attempt) +
(1.0 - actual) * np.log(1.0 - attempt))
if __name__ == '__main__':
np.random.seed(0) # seed to shuffle the train set
n_folds = 10
verbose = True
shuffle = False
X, y, X_submission = load_data.load()
if shuffle:
idx = np.random.permutation(y.size)
X = X[idx]
y = y[idx]
skf = list(StratifiedKFold(y, n_folds))
print(skf)
clfs = [RandomForestClassifier(n_estimators=100, n_jobs=-1, criterion='gini'),
RandomForestClassifier(n_estimators=100, n_jobs=-1, criterion='entropy'),
ExtraTreesClassifier(n_estimators=100, n_jobs=-1, criterion='gini'),
ExtraTreesClassifier(n_estimators=100, n_jobs=-1, criterion='entropy'),
GradientBoostingClassifier(learning_rate=0.05, subsample=0.5, max_depth=6, n_estimators=50)]
print( "Creating train and test sets for blending.")
#X 训练数据集个数 训练模型个数
dataset_blend_train = np.zeros((X.shape[0], len(clfs)))
# 测试数据集 的个数
dataset_blend_test = np.zeros((X_submission.shape[0], len(clfs)))
for j, clf in enumerate(clfs):
print( j, clf)
#创建 针对当前的分类模型 传入的数据为一个元祖
dataset_blend_test_j = np.zeros((X_submission.shape[0], len(skf)))
for i, (train, test) in enumerate(skf):
print(skf)
print( "Fold", i)
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
print(X_test)
| [
"bingwei2642@qq.com"
] | bingwei2642@qq.com |
422e81d7dc8990d09e10b9f966d4575ed58f6625 | aef1c0d4a32fa10afae10393c850960f9d89cdbc | /MiRegionCO/apps/noticia/migrations/0005_auto_20170722_1413.py | 539cffd200c033897d8312e8478b827075b333a1 | [] | no_license | joselofierro/MiRegionCO | 587059244fc153f32c6eaac8e41fab05bdeb5937 | 781491dc81a2dff7a8ae237d4ea7e23b31a31c52 | refs/heads/master | 2021-11-09T11:23:41.189863 | 2018-01-16T16:53:34 | 2018-01-16T16:53:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-22 14:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('noticia', '0004_noticia_destacada'),
]
operations = [
migrations.AddField(
model_name='noticia',
name='duracion',
field=models.CharField(default=1, max_length=5),
preserve_default=False,
),
migrations.AlterField(
model_name='noticia',
name='titular',
field=models.CharField(max_length=100),
),
]
| [
"juliofierro@Mac-mini-de-JULIO.local"
] | juliofierro@Mac-mini-de-JULIO.local |
6c12f87994931874e395ce5e3a254320cbfa4375 | ded564e6571f59df13a3f5d753c6c54f207261c1 | /thermo/units.py | 57c989069162d21bb322e48ddecc82bec6677b5e | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | simonmb/thermo | 580ae53e764e00e601a5ef4a918e8d4a77442257 | 9abbb0ea71abe8677155e029d01aebe74cce137f | refs/heads/master | 2021-12-13T23:34:50.774780 | 2021-11-11T22:27:21 | 2021-11-11T22:27:21 | 144,257,869 | 1 | 0 | MIT | 2018-08-10T08:15:47 | 2018-08-10T08:15:46 | null | UTF-8 | Python | false | false | 3,232 | py | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2017, 2018, 2019 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
__all__ = ['u']
import types
import re
import inspect
import functools
import collections
import thermo
import numpy as np
try:
import pint
from pint import _DEFAULT_REGISTRY as u
from pint import DimensionalityError
except ImportError: # pragma: no cover
raise ImportError('The unit handling in fluids requires the installation '
'of the package pint, available on pypi or from '
'https://github.com/hgrecco/pint')
from fluids.units import wraps_numpydoc, wrap_numpydoc_obj
__funcs = {}
failed_wrapping = False
for name in dir(thermo):
if name == '__getattr__' or name == '__test__':
continue
obj = getattr(thermo, name)
if isinstance(obj, types.FunctionType):
pass
# obj = wraps_numpydoc(u)(obj)
elif type(obj) == type and (obj in (thermo.Chemical, thermo.Mixture, thermo.Stream,
thermo.ChemicalConstantsPackage, thermo.PropertyCorrelationsPackage)
or thermo.eos.GCEOS in obj.__mro__
or thermo.activity.GibbsExcess in obj.__mro__
or thermo.TDependentProperty in obj.__mro__
or thermo.MixtureProperty in obj.__mro__
or thermo.Flash in obj.__mro__
):
if obj in (thermo.eos_mix.PSRKMixingRules, thermo.eos_mix.PSRK):
# Not yet implemented
continue
try:
obj = wrap_numpydoc_obj(obj)
except Exception as e:
failed_wrapping = True
print('Current implementation of %s contains documentation not '
'parseable and cound not be wrapped to use pint:' %str(obj))
print(e)
elif isinstance(obj, str):
continue
if name == '__all__':
continue
__all__.append(name)
__funcs.update({name: obj})
globals().update(__funcs)
| [
"Caleb.Andrew.Bell@gmail.com"
] | Caleb.Andrew.Bell@gmail.com |
1da55da5caaa3b90460be0fb0e117a0a33a47b72 | 847815fd6d24859dd0e41a3e53fd29df63b0e8f3 | /solutions/CombinationSumII.py | 27391dee0f2916f041e7fa29ac75d529b173e5ce | [] | no_license | howardhe0329/leetcode | 68c2f901ed15e1904241bb31f9fcba5cdc0cb6dd | 588a86282b8cc74fa14d810eb3a532c5c3e6de81 | refs/heads/master | 2020-07-04T13:03:08.134205 | 2015-12-25T14:40:20 | 2015-12-25T14:40:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | __author__ = 'Daoyuan'
from BaseSolution import *
class CombinationSumII(BaseSolution):
def __init__(self):
BaseSolution.__init__(self)
self.push_test(
params = ([10,1,2,7,6,1,5], 8,),
expects = [
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6],
],
expect_unordered = True
)
self.push_test(
params = ([4,3,2,1,1], 5,),
expects = [
[1, 4],
[1, 1, 3],
[2, 3],
],
expect_unordered = True
)
self.push_test(
params = ([2],5),
expects = []
)
def solution(self, candidates, target):
nums = sorted(candidates)
return list(self.combine(nums, target))
def combine(self, nums, target):
if len(nums) == 0:
return
last = -1
for i in xrange(len(nums)):
if last == nums[i]:
continue
last = nums[i]
if nums[i] == target:
yield [nums[i],]
elif nums[i] > target:
return
else:
for next in self.combine( nums[i+1:], target - nums[i]):
yield [nums[i],] + next | [
"this@caunion.me"
] | this@caunion.me |
4e13646e0695fb15fe65d9cba62592a5336a05f7 | 401fc99cefe615f8ebefb6dd9c2b043c506f5bd0 | /tests/units/test_helpers.py | 6604d3b7ffe74c4a297010609d9e2ed3d39c7a8e | [
"MIT"
] | permissive | atviriduomenys/spinta | 0f85496860ebbcecfccd8dde2bf219564ee66baa | 1fac5b6b75ec65188d815078fd135bc05d49b31c | refs/heads/master | 2023-09-02T13:22:58.411937 | 2023-08-18T12:59:17 | 2023-08-18T12:59:17 | 168,724,854 | 12 | 4 | MIT | 2023-09-14T13:29:39 | 2019-02-01T16:16:11 | Python | UTF-8 | Python | false | false | 1,069 | py | import pytest
from spinta.units.helpers import is_si_unit
from spinta.units.helpers import is_time_unit
@pytest.mark.parametrize('unit', [
'1D',
'D',
'Y',
'3M',
'12H',
])
def test_valid_time_unit(unit: str):
assert is_time_unit(unit)
@pytest.mark.parametrize('unit', [
'D1',
'd',
'YY',
'',
' D',
'D ',
])
def test_invalid_time_unit(unit: str):
assert not is_time_unit(unit)
@pytest.mark.parametrize('unit', [
'm',
'1m',
'10m',
'm^2',
'm²',
'km¹⁰',
'kg⋅m²⋅s⁻³⋅A⁻¹',
'kg*m^2*s^-3⋅A^-1',
'8kg⋅m²⋅s⁻³⋅A⁻¹',
'mg/l',
'g/m^2',
'mg/m^3',
'mm',
'U/m^2',
'U/m^3',
'%',
'ha',
'min',
'h',
'bar',
'U',
'10^6s',
'10⁶s',
'μ/m³',
'yr',
'3mo',
'yr 2mo 4wk',
'°C',
'°',
])
def test_valid_unit(unit: str):
assert is_si_unit(unit)
@pytest.mark.parametrize('unit', [
'D',
'1D',
'meter',
])
def test_invalid_si_unit(unit: str):
assert not is_si_unit(unit)
| [
"sirexas@gmail.com"
] | sirexas@gmail.com |
e69a3b71d01b4f76b8c9c0a1d9ffdb9bc82b442b | 38258a7dd9acbfb7adf72983015de68a948a4826 | /B_10000~/B_10871.py | e6b311cfbec7336a975db6800540699eb27bee56 | [] | no_license | kangsm0903/Algorithm | 13a7fe5729039a1d0ce91a574c4755a8a92fb02b | 7d713d1c9e2e4dc30141d4f409ac1430a357065b | refs/heads/master | 2022-10-04T00:33:49.247977 | 2022-09-26T12:51:16 | 2022-09-26T12:51:16 | 219,265,010 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | # # 11/15 10871번
N,X = input().split()
N = int(N)
X = int(X)
A = list(map(int,input().split()))
B = []
for i in range(0, N):
if (A[i] < X) :
print(A[i], end=' ') | [
"kangsm0903@naver.com"
] | kangsm0903@naver.com |
c7f221ee2ca5d98b9105f235c3746a617815877c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03168/s894090351.py | 90c251f91b9e6ad5b7d11301831dc58de6c7936d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | n = int(input())
arr = list(map(float,input().split()))
dp=[]
for i in range(n+1):
dp.append([0]*(n+1))
dp[1][0]=(1-arr[0])
dp[1][1]=arr[0]
for i in range(2,n+1):
for j in range(0,i+1):
dp[i][j]= dp[i-1][j-1]*arr[i-1] + dp[i-1][j]*(1-arr[i-1])
ans=0
for i in range(n//2+1,n+1):
ans+=dp[n][i]
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1607857a4861051a4af79b0e93cb41a76d4659c2 | 311e9f909ec8c63c40a6b09d70006da4f2e0a7d5 | /tfx/utils/model_paths/tf_serving_flavor_test.py | 7c1ad2f866b85cae9556b377d17a85091bfcd24f | [
"Apache-2.0"
] | permissive | 18jeffreyma/tfx | 793fbc6c0597d88d16ac551bae9eddfd18ff1542 | ff6917997340401570d05a4d3ebd6e8ab5760495 | refs/heads/master | 2022-12-15T16:18:15.578839 | 2020-08-31T20:34:05 | 2020-08-31T20:34:56 | 274,276,728 | 3 | 0 | Apache-2.0 | 2020-09-16T18:58:02 | 2020-06-23T01:08:19 | Python | UTF-8 | Python | false | false | 2,855 | py | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.utils.model_paths.tf_serving_flavor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tfx.utils.model_paths import tf_serving_flavor as tfs_flavor
class TFServingFlavorTest(tf.test.TestCase):
def testRoundTrip(self):
self.assertEqual(
tfs_flavor.parse_model_path(
tfs_flavor.make_model_path('/foo/bar', 'my-model', 123)),
('/foo/bar', 'my-model', 123))
self.assertEqual(
tfs_flavor.make_model_path(
*tfs_flavor.parse_model_path('/foo/bar/my-model/123')),
'/foo/bar/my-model/123')
def testMakeModelPath(self):
self.assertEqual(
tfs_flavor.make_model_path(
model_base_path='/foo/bar',
model_name='my-model',
version=123),
'/foo/bar/my-model/123')
self.assertEqual(
tfs_flavor.make_model_path(
model_base_path='s3://bucket-name/foo/bar',
model_name='my-model',
version=123),
's3://bucket-name/foo/bar/my-model/123')
self.assertEqual(
tfs_flavor.make_model_path(
model_base_path='gs://bucket-name/foo/bar',
model_name='my-model',
version=123),
'gs://bucket-name/foo/bar/my-model/123')
def testParseModelPath(self):
self.assertEqual(
tfs_flavor.parse_model_path('/foo/bar/my-model/123',),
('/foo/bar', 'my-model', 123))
self.assertEqual(
tfs_flavor.parse_model_path('s3://bucket-name/foo/bar/my-model/123'),
('s3://bucket-name/foo/bar', 'my-model', 123))
self.assertEqual(
tfs_flavor.parse_model_path('gs://bucket-name/foo/bar/my-model/123'),
('gs://bucket-name/foo/bar', 'my-model', 123))
def testParseModelPath_Fail(self):
with self.assertRaises(ValueError):
tfs_flavor.parse_model_path('too-short')
with self.assertRaises(ValueError):
tfs_flavor.parse_model_path('/foo/bar/my-model/not-an-int-version')
with self.assertRaises(ValueError):
tfs_flavor.parse_model_path('/foo/bar/other-model/123',
expected_model_name='my-model')
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow-extended-team@google.com"
] | tensorflow-extended-team@google.com |
38d5fcccac7e170f152339629aea554fd246c000 | 8c9c27cb88a2d210a5e2fb5803fe89204dba95ef | /phy/cluster/manual/tests/test_views.py | 840eb0c9c9512508abe958d990f4ba57f638d4fd | [] | no_license | arnefmeyer/phy | c13b1eceb70ee72cf0ff9c4a273e195f122fabc4 | 14663e1f2baad421d6bc9f420d34170c6c969bbe | refs/heads/master | 2020-12-07T15:42:49.605432 | 2016-04-20T21:10:38 | 2016-04-20T21:10:38 | 56,718,986 | 1 | 0 | null | 2016-04-20T20:32:18 | 2016-04-20T20:32:18 | null | UTF-8 | Python | false | false | 6,873 | py | # -*- coding: utf-8 -*-
"""Test views."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import numpy as np
from numpy.testing import assert_equal as ae
from numpy.testing import assert_allclose as ac
from vispy.util import keys
from pytest import fixture
from phy.utils import Bunch
from .conftest import MockController
from ..views import (ScatterView,
_extract_wave,
_extend,
)
#------------------------------------------------------------------------------
# Utils
#------------------------------------------------------------------------------
@fixture
def state(tempdir):
# Save a test GUI state JSON file in the tempdir.
state = Bunch()
state.WaveformView0 = Bunch(overlap=False)
state.TraceView0 = Bunch(scaling=1.)
state.FeatureView0 = Bunch(feature_scaling=.5)
state.CorrelogramView0 = Bunch(uniform_normalization=True)
return state
@fixture
def gui(tempdir, state):
controller = MockController(config_dir=tempdir)
return controller.create_gui(add_default_views=False, **state)
def _select_clusters(gui):
gui.show()
mc = gui.controller.manual_clustering
assert mc
mc.select([])
mc.select([0])
mc.select([0, 2])
mc.select([0, 2, 3])
#------------------------------------------------------------------------------
# Test utils
#------------------------------------------------------------------------------
def test_extend():
l = list(range(5))
assert _extend(l) == l
assert _extend(l, 0) == []
assert _extend(l, 4) == list(range(4))
assert _extend(l, 5) == l
assert _extend(l, 6) == (l + [4])
def test_extract_wave():
traces = np.arange(30).reshape((6, 5))
mask = np.array([0, 1, 1, .5, 0])
wave_len = 4
hwl = wave_len // 2
ae(_extract_wave(traces, 0 - hwl, mask, wave_len)[0],
[[0, 0], [0, 0], [1, 2], [6, 7]])
ae(_extract_wave(traces, 1 - hwl, mask, wave_len)[0],
[[0, 0], [1, 2], [6, 7], [11, 12]])
ae(_extract_wave(traces, 2 - hwl, mask, wave_len)[0],
[[1, 2], [6, 7], [11, 12], [16, 17]])
ae(_extract_wave(traces, 5 - hwl, mask, wave_len)[0],
[[16, 17], [21, 22], [0, 0], [0, 0]])
#------------------------------------------------------------------------------
# Test waveform view
#------------------------------------------------------------------------------
def test_waveform_view(qtbot, gui):
v = gui.controller.add_waveform_view(gui)
_select_clusters(gui)
ac(v.boxed.box_size, (.1818, .0909), atol=1e-2)
v.toggle_waveform_overlap()
v.toggle_waveform_overlap()
v.toggle_zoom_on_channels()
v.toggle_zoom_on_channels()
v.toggle_show_labels()
assert not v.do_show_labels
# Box scaling.
bs = v.boxed.box_size
v.increase()
v.decrease()
ac(v.boxed.box_size, bs)
bs = v.boxed.box_size
v.widen()
v.narrow()
ac(v.boxed.box_size, bs)
# Probe scaling.
bp = v.boxed.box_pos
v.extend_horizontally()
v.shrink_horizontally()
ac(v.boxed.box_pos, bp)
bp = v.boxed.box_pos
v.extend_vertically()
v.shrink_vertically()
ac(v.boxed.box_pos, bp)
a, b = v.probe_scaling
v.probe_scaling = (a, b * 2)
ac(v.probe_scaling, (a, b * 2))
a, b = v.box_scaling
v.box_scaling = (a * 2, b)
ac(v.box_scaling, (a * 2, b))
v.zoom_on_channels([0, 2, 4])
# Simulate channel selection.
_clicked = []
@v.gui.connect_
def on_channel_click(channel_idx=None, button=None, key=None):
_clicked.append((channel_idx, button, key))
v.events.key_press(key=keys.Key('2'))
v.events.mouse_press(pos=(0., 0.), button=1)
v.events.key_release(key=keys.Key('2'))
assert _clicked == [(0, 1, 2)]
v.next_data()
# qtbot.stop()
gui.close()
#------------------------------------------------------------------------------
# Test trace view
#------------------------------------------------------------------------------
def test_trace_view(qtbot, gui):
v = gui.controller.add_trace_view(gui)
_select_clusters(gui)
ac(v.stacked.box_size, (1., .08181), atol=1e-3)
assert v.time == .5
v.go_to(.25)
assert v.time == .25
v.go_to(-.5)
assert v.time == .125
v.go_left()
assert v.time == .125
v.go_right()
assert v.time == .175
# Change interval size.
v.interval = (.25, .75)
ac(v.interval, (.25, .75))
v.widen()
ac(v.interval, (.125, .875))
v.narrow()
ac(v.interval, (.25, .75))
# Widen the max interval.
v.set_interval((0, gui.controller.duration))
v.widen()
v.toggle_show_labels()
assert not v.do_show_labels
# Change channel scaling.
bs = v.stacked.box_size
v.increase()
v.decrease()
ac(v.stacked.box_size, bs, atol=1e-3)
v.origin = 'upper'
assert v.origin == 'upper'
# qtbot.stop()
gui.close()
#------------------------------------------------------------------------------
# Test feature view
#------------------------------------------------------------------------------
def test_feature_view(qtbot, gui):
v = gui.controller.add_feature_view(gui)
_select_clusters(gui)
assert v.feature_scaling == .5
v.add_attribute('sine',
np.sin(np.linspace(-10., 10., gui.controller.n_spikes)))
v.increase()
v.decrease()
v.on_channel_click(channel_idx=3, button=1, key=2)
v.clear_channels()
v.toggle_automatic_channel_selection()
# qtbot.stop()
gui.close()
#------------------------------------------------------------------------------
# Test scatter view
#------------------------------------------------------------------------------
def test_scatter_view(qtbot, gui):
n = 1000
v = ScatterView(coords=lambda c: Bunch(x=np.random.randn(n),
y=np.random.randn(n),
spike_ids=np.arange(n),
spike_clusters=np.ones(n).
astype(np.int32) * c[0],
) if 2 not in c else None,
# data_bounds=[-3, -3, 3, 3],
)
v.attach(gui)
_select_clusters(gui)
# qtbot.stop()
gui.close()
#------------------------------------------------------------------------------
# Test correlogram view
#------------------------------------------------------------------------------
def test_correlogram_view(qtbot, gui):
v = gui.controller.add_correlogram_view(gui)
_select_clusters(gui)
v.toggle_normalization()
v.set_bin(1)
v.set_window(100)
# qtbot.stop()
gui.close()
| [
"cyrille.rossant@gmail.com"
] | cyrille.rossant@gmail.com |
182b04bde697101e629bf4f0c85d2c853c1567a5 | dc0b6b680fd1fc0ab86ed7a3460137cde3a8612d | /Meus códigos/Python/Economia/mdic/mdic_1f.py | ec1951120d5e6bd7b7c22b8aa57b02d253aed461 | [] | no_license | pedromfnakashima/codigos_versionados | 6c8c692bc08a0dda39a82bf91c5245f28d9be330 | c40c94d69f1ee3dd4317786f1c25bcc1bbcc2bb9 | refs/heads/main | 2023-03-21T20:32:53.677701 | 2021-03-20T00:03:10 | 2021-03-20T00:03:10 | 305,754,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,097 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 25 12:40:44 2020
@author: pedro-salj
"""
#############################
##### CONFIGURAÇÃO GERAL ####
#############################
globals().clear()
""" Mudar diretório """
import os
from pathlib import Path
import getpass
if getpass.getuser() == "pedro":
caminho_base = Path(r'D:\Códigos, Dados, Documentação e Cheat Sheets')
elif getpass.getuser() == "pedro-salj":
caminho_base = Path(r'C:\Users\pedro-salj\Desktop\Pedro Nakashima\Códigos, Dados, Documentação e Cheat Sheets')
""" Mudar diretório para dados Siconfi"""
caminho_wd = caminho_base / 'Dados'
os.chdir(caminho_wd)
import pandas as pd
##########################################################################################################
##########################################################################################################
##########################################################################################################
def gs_mdic(tipo, ufs, ncms):
import glob
import numpy as np
import pandas as pd
pasta = caminho_base / 'Dados' / 'mdic' / 'anos'
os.chdir(pasta)
# -----------------------------------------------------------------------------------
np_datas = np.arange('1997-01-01','2022-01-01', 1, dtype='datetime64[M]')
meses = pd.to_datetime(np_datas).to_frame()
meses.rename(columns={0:'mês'}, inplace=True)
meses.set_index('mês',inplace=True)
meses.index.freq = 'MS'
# -----------------------------------------------------------------------------------
#tipo = 'EXP'
#uf = 'MS'
#ufs = ['MS','MT','GO']
#ncm = 12019000
#ncms = [12019000,10059010]
busca = tipo + '_????.csv'
# -----------------------------------------------------------------------------------
#ncm = 12019000
for index_ncm, ncm in enumerate(ncms):
for index_arq, arq_nome in enumerate(glob.glob(busca)):
print(arq_nome)
pasta = caminho_base / 'Dados' / 'mdic' / 'anos'
df = pd.read_csv(pasta / arq_nome,
encoding = 'latin',
delimiter = ';')
df.rename(columns={'CO_ANO':'year','CO_MES':'month'},inplace=True)
df['day'] = 1
df['mês'] = pd.to_datetime(df[['year', 'month', 'day']])
df.drop(['year','month','day'],axis=1,inplace=True)
cond1 = df['CO_NCM'] == ncm
filtro_ncm = df.loc[cond1,:]
df_soma_por_uf = filtro_ncm.groupby(['mês','SG_UF_NCM'])['VL_FOB'].sum().to_frame()
if index_arq == 0:
df_bruto = df_soma_por_uf.copy()
else:
df_bruto = df_bruto.append(df_soma_por_uf)
df_bruto.reset_index(inplace=True)
df_bruto.set_index('mês',inplace=True)
df_bruto_br = df_bruto.groupby(['mês'])['VL_FOB'].sum().to_frame()
if tipo == 'EXP':
tipo_sigla = 'X'
elif tipo == 'IMP':
tipo_sigla = 'M'
col_nome = tipo_sigla + 'BR' + str(ncm)
df_bruto_br.rename(columns={'VL_FOB':col_nome},inplace=True)
meses_copia = meses.copy()
meses_copia = meses_copia.merge(df_bruto_br,how='left',left_index=True,right_index=True)
for uf in ufs:
cond1 = df_bruto['SG_UF_NCM'] == uf
df_bruto_uf_i = df_bruto.copy().loc[cond1,['VL_FOB']]
col_nome = tipo_sigla + uf + str(ncm)
df_bruto_uf_i.rename(columns={'VL_FOB':col_nome},inplace=True)
meses_copia = meses_copia.merge(df_bruto_uf_i,how='left',left_index=True,right_index=True)
if index_ncm == 0:
df_final = meses_copia.copy()
else:
df_final = df_final.merge(meses_copia, how='left', left_index=True, right_index=True)
df_final.dropna(thresh=1, inplace=True)
df_final.fillna(0, inplace=True)
return df_final
# ------------------------------------
def g_médiaMóvel(df, períodos):
df_copia = df.copy()
for coluna in df_copia.columns:
df_copia[coluna] = df_copia[coluna].rolling(períodos).mean()
return df_copia
# ------------------------------------
ufs = ['MS','MT','GO']
ncms = [12019000,10059010]
# ------------------------------------
df_series_exp = gs_mdic(tipo='EXP', ufs=ufs, ncms=ncms)
média_móvel = g_médiaMóvel(df_series_exp, períodos=12)
# ------------------------------------
pasta = caminho_base / 'Dados' / 'mdic'
with pd.ExcelWriter(pasta / 'séries.xlsx', mode='a', engine="openpyxl") as writer:
df_series_exp.to_excel(writer, sheet_name='brutas', index=True)
# ------------------------------------
pasta = caminho_base / 'Dados' / 'mdic'
with pd.ExcelWriter(pasta / 'séries.xlsx', mode='a', engine="openpyxl") as writer:
média_móvel.to_excel(writer, sheet_name='médiaMóvel', index=True)
| [
"pedromfnakashima@gmail.com"
] | pedromfnakashima@gmail.com |
ae00981c254d0dc088012dfacb9cdee40e031a73 | 0aec617440075b73e5da64cd1477b6a098ed864c | /data_structures/Project_Show_me_Data_Structures/active_directory.py | f76d2179be46597200adc649a3296be5cf3735c2 | [
"MIT"
] | permissive | severian5it/udacity_dsa | 0b1512cc8c5125149d6be6f78fa14446e7ab5c25 | e47f27b0179961d6107fe46a236ac7d887fe6816 | refs/heads/main | 2023-03-07T02:24:37.299599 | 2021-02-14T10:34:50 | 2021-02-14T10:34:50 | 316,949,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,943 | py | class Group(object):
def __init__(self, _name):
self.name = _name
self.groups = []
self.users = []
def add_group(self, group):
self.groups.append(group)
def add_user(self, user):
self.users.append(user)
def get_groups(self):
return self.groups
def get_users(self):
return self.users
def get_name(self):
return self.name
def is_user_in_group(user, group):
"""
Return True if user is in the group, False otherwise.
Args:
user(str): user name/id
group(class:Group): group to check user membership against
"""
is_in_group = False
is_in_subgroup = False
for u in group.get_users():
if u == user:
is_in_group = True
for g in group.get_groups():
if is_user_in_group(user, g):
is_in_subgroup = True
return is_in_group or is_in_subgroup
if __name__ == "__main__":
parent = Group("parent")
child = Group("child")
sub_child = Group("subchild")
sub_child_user = "sub_child_user"
sub_child.add_user(sub_child_user)
child.add_group(sub_child)
parent.add_group(child)
# Test Case1
print(f"is sub child user in parent group? {is_user_in_group(sub_child_user, parent)}") # expected True
# Test Case2
print(f"is sub child user in child group? {is_user_in_group(sub_child_user, child)}") # expected True
# Test Case3
print(f"is sub child user in sub child group? {is_user_in_group(sub_child_user, sub_child)}") # expected True
# Test Case4
print(f"is sub child user2 in sub child group? {is_user_in_group('sub_child_user2', parent)}") # expected False
# Test Case6 child empty
print(f"is empty string in sub child group? {is_user_in_group('', parent)}") # expected False
# Test Case7 child None
print(
f"is Nonein sub child group? {is_user_in_group(None, parent)}") # expected False
| [
"pierluca@amazon.com"
] | pierluca@amazon.com |
4fc662539852d925c3aa23683981860cecb38cb4 | e2590e0a78046a22131b69c76ebde21bf042cdd1 | /ABC201_300/ABC243/B.py | e7f62726863d27002413f7819fe6d8366e13c7d8 | [] | no_license | masato-sso/AtCoderProblems | b8e23941d11881860dcf2942a5002a2b19b1f0c8 | fbc02e6b7f8c6583e5a4e5187463e0001fc5f4d8 | refs/heads/main | 2023-01-22T23:57:58.509585 | 2023-01-21T14:07:47 | 2023-01-21T14:07:47 | 170,867,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py |
N = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
def getIndex(l, x):
if x in l:
return l.index(x)
else:
return -1
ans1 = 0
ans2 = 0
for aIdx,a in enumerate(A):
bIdx = getIndex(B,a)
if(bIdx == -1):
continue
if(aIdx == bIdx):
ans1+=1
else:
ans2+=1
print(ans1)
print(ans2) | [
"masato@seijinnoMacBook-Pro-2.local"
] | masato@seijinnoMacBook-Pro-2.local |
cad633fa0cd47dc61a6c9b15c55400e0fab5095e | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5634697451274240_0/Python/Quorrin/pancakes.py | c760b7448180fb75bcb85128bc8bb4b5612e82e8 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,251 | py | f = open('input.in', 'r')
n = int(f.readline().strip())
out = open('output.txt', 'w')
pancakeDict = {}
def condenseStack(pancakes):
newStack = []
previous = ""
for i in range(len(pancakes)):
if pancakes[i] != previous:
newStack.append(pancakes[i])
previous = pancakes[i]
return newStack
def flipSign(sign):
if (sign == '-'):
return '+'
else:
return '-'
def reverse(inArr, idx):
dx = 0
retArr = list(inArr)
for i in range(idx):
if (i >= idx-i):
return retArr
else:
dx = retArr[idx-i-1]
retArr[idx-i-1] = flipSign(retArr[i])
retArr[i] = flipSign(dx)
return retArr
def getClearedLevel(inputArr):
for i in range(len(inputArr), 0, -1):
if (inputArr[i-1] == '-'):
return i
return 0
def flipped(inArr):
for i in range(len(inArr)):
if (inArr[i] == '-'):
return False
return True
def dp(inputArr, depth, unsortedLevel):
if (unsortedLevel == 0):
return depth
elif (depth > 20):
return 20
else:
minDepth = 200
for i in range(1,unsortedLevel+1):
newPancakes = condenseStack(reverse(inputArr, i))
pHash = ''.join(newPancakes)
if (pHash in pancakeDict):
return pancakeDict[pHash]
else:
currentDepth = dp(newPancakes, depth+1, getClearedLevel(newPancakes))
pancakeDict[pHash] = currentDepth
if (currentDepth < minDepth):
minDepth = currentDepth
return minDepth
for i in range(n):
inputStr = f.readline().strip()
pancakes = condenseStack(list(inputStr))
print (pancakes)
count = -1
if (len(inputStr) == 0):
count = 0
elif (len(inputStr) == 1):
if (inputStr == "-"):
count = 1
else:
count = 0
else:
pancakeDict = {}
count = dp(pancakes, 0, getClearedLevel(pancakes))
print (count)
out.write("Case #" + str(i+1) + ": " + str(count)+ "\n")
out.close()
f.close()
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
df403f3387076740ba819c48f201f9fb9d443b4a | b5c92150b0fb76daf9b8725c7a64ba1b54f2d9c7 | /product_grammage/models/purchase.py | 7804f5922d646144f5f8f8d4ae275d50bff4d991 | [] | no_license | hashemalycore/CMNT_00107_2017_SAR_addons | 63da3c66eddc99b585671cc85a53661a497771aa | 071646e495fcd9563f72a02f6630ee4d70afa438 | refs/heads/master | 2020-04-02T10:25:32.457793 | 2018-02-12T13:09:16 | 2018-02-12T13:09:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | # -*- coding: utf-8 -*-
# © 2017 Comunitea Servicios Tecnológicos S.L. (http://comunitea.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import models, fields
class PurchaseOrder(models.Model):
_inherit = 'purchase.order.line'
thread = fields.Float('Thread', related='product_id.thread')
gauge = fields.Float('Gauge', related='product_id.gauge')
width = fields.Float('Width')
grammage = fields.Float('Grammage')
| [
"javierjcf@gmail.com"
] | javierjcf@gmail.com |
4a25a135777867175549f9b2359a28dc05cfc0ef | ed0ed8863e05384db504fa58e47db1b49977bb7d | /AnalysisAndDesignOfAlgorithms/python/package_memory.py | 350a2ed835ac8013a3fae6aeca7fb450a8b27701 | [] | no_license | rogeroyer/Accumulation | fa4f6083cd1fe3d112fe4d62275280033070b174 | ac04f486761744362c57abcc51f7768c775f270c | refs/heads/master | 2022-07-22T18:58:05.679619 | 2022-07-14T04:39:53 | 2022-07-14T04:39:53 | 102,483,020 | 10 | 33 | null | null | null | null | UTF-8 | Python | false | false | 2,218 | py | class package_memory_deal(object):
def __init__(self, weight, value, max_weight, printMatrix=False):
self.weight = weight # 存储重量 #
self.value = value # 存储权值 #
self.max_weight = max_weight # 背包所能承受最大重量 #
self.array_length = len(self.value) # 物品个数 #
self.select = [[-1 for i in range(self.max_weight+1)] for j in range(self.array_length)] # 存储矩阵 #
self.printMatrix = printMatrix # 是否打印存储矩阵 #
for index in range(0, self.max_weight+1): # 初始没有物品时候,背包的价值为0 #
self.select[0][index] = 0
for index in range(1, self.array_length):
self.select[index][0] = 0
def print_out(self):
print(self.MFKnapsack(self.array_length - 1, self.max_weight))
if self.printMatrix is True:
self.select = np.array(self.select)
print(self.select)
self.show_element()
def MFKnapsack(self, i, j):
'''计算存储矩阵'''
if self.select[i][j] < 0:
if j < self.weight[i]:
value = self.MFKnapsack(i - 1, j)
else:
value = max(self.MFKnapsack(i - 1, j), self.value[i] + self.MFKnapsack(i - 1, j - self.weight[i]))
self.select[i][j] = value
return self.select[i][j] # 返回最大值 #
def show_element(self):
'''输出被选物品'''
remain_space = self.max_weight # 当前背包剩余容量 #
for i in range(self.array_length-1, 0, -1):
if remain_space >= self.weight[i]:
if self.select[i][remain_space] - self.select[i-1][remain_space-self.weight[i]] == self.value[i]:
print('item ', i, ' is selected!')
remain_space = remain_space - self.weight[i]
def main():
weight = [0, 2, 1, 3, 2]
value = [0, 12, 10, 20, 15]
max_weight = 5
# weight = [0, 19, 23, 12, 34, 24, 34, 56, 24, 53, 35]
# value = [0, 57, 68, 87, 17, 12, 21, 31, 42, 14, 15]
# max_weight = 300
al = package_memory_deal(weight, value, max_weight, True)
al.print_out()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | rogeroyer.noreply@github.com |
8c6b878927c7dac6590fca1e63d2cfb1a5ef4b1f | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/vanilla/testcase/firstcases/testcase3_003.py | cf0d72f444c3338e02c82106cee27e79e171819d | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,748 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'ch.blinkenlights.android.vanilla',
'appActivity' : 'ch.blinkenlights.android.vanilla.LibraryActivity',
'resetKeyboard' : True,
'androidCoverage' : 'ch.blinkenlights.android.vanilla/ch.blinkenlights.android.vanilla.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase003
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"Artists\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"ch.blinkenlights.android.vanilla:id/dragger\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Happy High EPBlackbird Blackbird\")", "new UiSelector().className(\"android.widget.TextView\").instance(6)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"Play all\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"01:48\")", "new UiSelector().className(\"android.widget.TextView\").instance(13)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"01:48\")", "new UiSelector().className(\"android.widget.TextView\").instance(13)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"intermissionchiptek, She\")", "new UiSelector().className(\"android.widget.TextView\").instance(22)")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"ch.blinkenlights.android.vanilla:id/icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"ch.blinkenlights.android.vanilla:id/cover\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"3_003\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'ch.blinkenlights.android.vanilla'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
374c6dc3c8939e43c4c3fecd5522911c9253d932 | c8098e3907f39210ac159cf78f66cd871440fc10 | /vagrant/lesson 1/database_setup.py | 45e857aab66b72fc7b79507451798969667f5bee | [] | no_license | jaapdejong/fullstack-nanodegree-vm--ud088 | e297dbc421d19df61499a16ae87ca572426228df | ba383a9ab7a315279e4acd0425bbca1e25ba943a | refs/heads/master | 2021-06-11T23:14:37.669565 | 2017-01-22T11:59:55 | 2017-01-22T11:59:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | #!/usr/bin/python
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Restaurant(Base):
__tablename__ = 'restaurant'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
class MenuItem(Base):
__tablename__ = 'menu_item'
name = Column(String(80), nullable=False)
id = Column(Integer, primary_key=True)
description = Column(String(250))
price = Column(String(8))
course = Column(String(250))
restaurant_id = Column(Integer, ForeignKey('restaurant.id'))
restaurant = relationship(Restaurant)
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.create_all(engine)
| [
"jaap.dejong@nedap.com"
] | jaap.dejong@nedap.com |
4f657bc1f1c2e30b69b8ba84ed32bd6ee4e0ddf7 | 76133934b1dd287273a9bfa0c801d10d08a21b21 | /test/functional/getchaintips.py | 95d9627833e5526bc62095eaf21841cd9365d834 | [
"MIT"
] | permissive | kenfmcoin/kenfmcoin | d8783b34fcb3ae01067e8d1b33e3a73e3b82b1f9 | 1fa48487593233f2066757dc54f48b2349e2d9db | refs/heads/master | 2020-03-10T17:53:31.569229 | 2018-04-14T12:28:55 | 2018-04-14T12:28:55 | 129,511,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,183 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The KenFMcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the getchaintips RPC.
- introduce a network split
- work on chains of different lengths
- join the network together again
- verify that getchaintips now returns two chain tips.
"""
from test_framework.test_framework import KenFMcoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (KenFMcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def run_test (self):
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all([self.nodes[:2], self.nodes[2:]])
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| [
"37983255+spineinhalb@users.noreply.github.com"
] | 37983255+spineinhalb@users.noreply.github.com |
10f80e4c65bd78181993027e8a5a587a62070faf | cfbf8e78017a7c97107112680b04b2733bd27f8e | /Raw_data_Modules/Modules/DatabaseConnector.py | 852a60a6801bde9f0fdb33ac213cfeb2f701874b | [] | no_license | WenRichard/Web-page-Recommendation | bdd279e382a119a2068480f5f49e1703d0703777 | 0757b43f2d3f62c29c4aca9c1dd7a8b327204f32 | refs/heads/master | 2020-03-27T20:11:58.218795 | 2018-09-02T04:46:46 | 2018-09-02T04:46:46 | 147,047,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | import pymysql
import pandas as pd
# 连接数据库
def connector(host, user, password, database):
db = pymysql.connect(host, user, password, database)
return db
# 断开数据库
def closer(db):
db.close()
return "the database has been closed."
# 读取数据:从csv文本文件
# url, date, features(1~n), labels(1~m)
class CSVReader(object):
def __init__(self, file_path):
self.file_path = file_path
self.df = self.csv_to_df()
def csv_to_df(self):
return pd.read_csv(self.file_path)
# 数据库连接器类
class MySQLReader(object):
'''一个连接MySQL数据库的对象'''
def __int__(self, host="localhost", user="root",
password="broadtech", database="world"):
self.database_connector = pymysql.connect(host, user, password, database)
def connector(self, host, user, password, database):
db = pymysql.connect(host, user, password, database)
return db
def close(self):
self.database_connector.close()
def exec(self, sql_exp=""):
'''执行一段sql语句并获取全部返回结果'''
cursor = self.database_connector.cursor()
cursor.execute(sql_exp)
data = cursor.fetchall()
if data is ():
print("Query result is empty.")
return None
return data
def get_table(self, table_name="None"):
'''获取一张表的全部内容,转换为pd.DataFrame对象'''
data = self.exec("select * from %s;" % table_name)
if data is not None:
data = list(map(list, data))
data = pd.DataFrame(data)
del data[0]
return data
else:
print("Get an empty table.")
return None
if __name__ == "__main__":
d = MySQLReader()
d.__int__(database="WPF")
data = d.exec("select * from visit_url_features limit 1;")
print(data)
| [
"xiezhengwen2013@163.com"
] | xiezhengwen2013@163.com |
b39c2f3984971a040831df35e49517e3fb93df8c | 3e63befd66d0f8fddaba4ce8c1ed73525c32a5aa | /venv/Lib/site-packages/mediapipe/calculators/core/sequence_shift_calculator_pb2.py | ec7af4e37ba7cb45a6a8086a1af24275c15bee53 | [
"MIT"
] | permissive | tanvirtareq/awesome-hand-gesture-detection | b0ecc6636e810412950b705e6ef5c1d83099b547 | ccc836557b730cf34861301712de0de3eec1076d | refs/heads/main | 2023-06-04T02:24:34.452783 | 2021-06-18T11:36:39 | 2021-06-18T11:36:39 | 389,102,297 | 1 | 0 | MIT | 2021-07-24T13:10:45 | 2021-07-24T13:10:45 | null | UTF-8 | Python | false | true | 3,553 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/calculators/core/sequence_shift_calculator.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mediapipe.framework import calculator_pb2 as mediapipe_dot_framework_dot_calculator__pb2
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe_dot_framework_dot_calculator__options__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/calculators/core/sequence_shift_calculator.proto',
package='mediapipe',
syntax='proto2',
serialized_pb=_b('\n:mediapipe/calculators/core/sequence_shift_calculator.proto\x12\tmediapipe\x1a$mediapipe/framework/calculator.proto\"\x94\x01\n\x1eSequenceShiftCalculatorOptions\x12\x19\n\rpacket_offset\x18\x01 \x01(\x05:\x02-12W\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\x87\xba\xa9\x33 \x01(\x0b\x32).mediapipe.SequenceShiftCalculatorOptionsB\x0c\xa2\x02\tMediaPipe')
,
dependencies=[mediapipe_dot_framework_dot_calculator__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SEQUENCESHIFTCALCULATOROPTIONS = _descriptor.Descriptor(
name='SequenceShiftCalculatorOptions',
full_name='mediapipe.SequenceShiftCalculatorOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='packet_offset', full_name='mediapipe.SequenceShiftCalculatorOptions.packet_offset', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
_descriptor.FieldDescriptor(
name='ext', full_name='mediapipe.SequenceShiftCalculatorOptions.ext', index=0,
number=107633927, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None),
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=112,
serialized_end=260,
)
DESCRIPTOR.message_types_by_name['SequenceShiftCalculatorOptions'] = _SEQUENCESHIFTCALCULATOROPTIONS
SequenceShiftCalculatorOptions = _reflection.GeneratedProtocolMessageType('SequenceShiftCalculatorOptions', (_message.Message,), dict(
DESCRIPTOR = _SEQUENCESHIFTCALCULATOROPTIONS,
__module__ = 'mediapipe.calculators.core.sequence_shift_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.SequenceShiftCalculatorOptions)
))
_sym_db.RegisterMessage(SequenceShiftCalculatorOptions)
_SEQUENCESHIFTCALCULATOROPTIONS.extensions_by_name['ext'].message_type = _SEQUENCESHIFTCALCULATOROPTIONS
mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension(_SEQUENCESHIFTCALCULATOROPTIONS.extensions_by_name['ext'])
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\242\002\tMediaPipe'))
# @@protoc_insertion_point(module_scope)
| [
"airriaislam@gmail.com"
] | airriaislam@gmail.com |
c9bc5d6c76ec5047e3101a82c773eb67ac5b156d | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Keras_tensorflow/source/tensorflow/contrib/tensor_forest/client/random_forest_test.py | 1e774dab2b06f2db402aebc4b8b64d052e5a56d6 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 3,224 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorForestTrainer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.tensor_forest.client import random_forest
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.platform import test
class TensorForestTrainerTests(test.TestCase):
def testClassification(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=3,
num_features=4,
split_after_samples=20)
classifier = random_forest.TensorForestEstimator(hparams.fill())
iris = base.load_iris()
data = iris.data.astype(np.float32)
labels = iris.target.astype(np.float32)
classifier.fit(x=data, y=labels, steps=100, batch_size=50)
classifier.evaluate(x=data, y=labels, steps=10)
def testClassificationTrainingLoss(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=3, max_nodes=1000, num_classes=3, num_features=4)
classifier = random_forest.TensorForestEstimator(
hparams, graph_builder_class=(tensor_forest.TrainingLossForest))
iris = base.load_iris()
data = iris.data.astype(np.float32)
labels = iris.target.astype(np.float32)
monitors = [random_forest.TensorForestLossHook(10)]
classifier.fit(x=data, y=labels, steps=100, monitors=monitors)
classifier.evaluate(x=data, y=labels, steps=10)
def testRegression(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=1,
num_features=13,
regression=True,
split_after_samples=20)
regressor = random_forest.TensorForestEstimator(hparams.fill())
boston = base.load_boston()
data = boston.data.astype(np.float32)
labels = boston.target.astype(np.float32)
regressor.fit(x=data, y=labels, steps=100, batch_size=50)
regressor.evaluate(x=data, y=labels, steps=10)
if __name__ == "__main__":
test.main()
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
80d38101f88dedd6685e1de271ee7ba897dc1487 | 3db48e7c13b330af7c488820d14d22edf0a7cfda | /그래프 이론/[10-3]위상정렬 알고리즘.py | 06fd577232f9442862e3ff32bd6969a65947f818 | [] | no_license | kim-kiwon/Coding-test | 1555d7e7699a21655e86f892e76f784accf4b9cc | aa8563ab54596c9c6dace84494d4f68fbd8e97f4 | refs/heads/master | 2023-04-01T10:04:11.152485 | 2021-04-05T10:17:51 | 2021-04-05T10:17:51 | 328,202,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,370 | py | #위상정렬 알고리즘 : 방향 그래프로 순서가 주어질 경우. 모든 노드를 순서에 거스르지 않고 정렬.
#선수과목 고려한 수강신청이 주된 문제.
#진입차수 0 인 노드 큐에 삽입. 큐에서 제거시 해당 노드에서 나가는 간선 모두제거. 반복
#큐가 비었는데 방문하지 않는 노드가 남았다 -> 사이클존재 (남은 노드 중에 진입차수 0 인 노드가 없게되므로)
from collections import deque
v, e = map(int, input().split())
indegree = [0] * (v+1) #진입차수 0으로 초기화
graph = [[] for i in range(v+1)]
#간선 입력받기
for _ in range(e):
a, b = map(int, input().split())
graph[a].append(b)
indegree[b] +=1 #b 진입차수 증가
def topology_sort():
result = []
q = deque()
#진입차수 0인 노드 큐에 삽입
for i in range(1, v+1):
if indegree[i] == 0:
q.append(i)
while q:
now = q.popleft() #큐에서 꺼내고
result.append(now) #결과에 넣기.
#해당 원소와 연결된 노드 진입차수 1 빼주기
for i in graph[now]:
indegree[i] -= 1
#새로 진입차수 0인 노드 큐에 삽입
if indegree[i] == 0:
q.append(i)
#결과 출력
for i in result:
print(i, end=' ')
topology_sort()
| [
"76721493+kim-kiwon@users.noreply.github.com"
] | 76721493+kim-kiwon@users.noreply.github.com |
1431f3ebabae290a7e25a9c3f1c2fd5ffb3a26eb | 34652a47355a8dbe9200db229a1bbc62619de364 | /Matlibplots/samples2/contour_label_demo.py | 44ac1ddc73c54dcdf9cad3b695a1f9fb4dc177ef | [] | no_license | btrif/Python_dev_repo | df34ab7066eab662a5c11467d390e067ab5bf0f8 | b4c81010a1476721cabc2621b17d92fead9314b4 | refs/heads/master | 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,319 | py | #!/usr/bin/env python
"""
Illustrate some of the more advanced things that one can do with
contour labels.
See also contour_demo.py.
"""
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
##################################################
# Define our surface
##################################################
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
##################################################
# Make contour labels using creative float classes
# Follows suggestion of Manuel Metz
##################################################
plt.figure()
# Basic contour plot
CS = plt.contour(X, Y, Z)
# Define a class that forces representation of float to look a certain way
# This remove trailing zero so '1.0' becomes '1'
class nf(float):
def __repr__(self):
str = '%.1f' % (self.__float__(),)
if str[-1] == '0':
return '%.0f' % self.__float__()
else:
return '%.1f' % self.__float__()
# Recast levels to new class
CS.levels = [nf(val) for val in CS.levels ]
# Label levels with specially formatted floats
if plt.rcParams["text.usetex"]:
fmt = r'%r \%%'
else:
fmt = '%r %%'
plt.clabel(CS, CS.levels, inline=True, fmt=fmt, fontsize=10)
##################################################
# Label contours with arbitrary strings using a
# dictionary
##################################################
plt.figure()
# Basic contour plot
CS = plt.contour(X, Y, Z)
fmt = {}
strs = [ 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh' ]
for l, s in zip(CS.levels, strs):
fmt[l] = s
# Label every other level using strings
plt.clabel(CS, CS.levels[::2], inline=True, fmt=fmt, fontsize=10)
# Use a Formatter
plt.figure()
CS = plt.contour(X, Y, 100 ** Z, locator=plt.LogLocator())
fmt = ticker.LogFormatterMathtext()
fmt.create_dummy_axis()
plt.clabel(CS, CS.levels, fmt=fmt)
plt.title("$100^Z$")
plt.show()
| [
"bogdan.evanzo@gmail.com"
] | bogdan.evanzo@gmail.com |
4e950512f3e46044884aa3e2cb21adc6db35ee7a | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-2/d1fcf255c5402d75a3f7b450bd1e795196d5817a-<_login>-bug.py | e920b6d9685aacf11b23547d526d2bc7ebec0fda | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,497 | py |
def _login(self):
(username, password) = self._get_login_info()
if (username is None):
return
(_, urlh) = self._download_webpage_handle('https://learning.oreilly.com/accounts/login-check/', None, 'Downloading login page')
def is_logged(urlh):
return ('learning.oreilly.com/home/' in compat_str(urlh.geturl()))
if is_logged(urlh):
self.LOGGED_IN = True
return
redirect_url = compat_str(urlh.geturl())
parsed_url = compat_urlparse.urlparse(redirect_url)
qs = compat_parse_qs(parsed_url.query)
next_uri = compat_urlparse.urljoin('https://api.oreilly.com', qs['next'][0])
(auth, urlh) = self._download_json_handle('https://www.oreilly.com/member/auth/login/', None, 'Logging in', data=json.dumps({
'email': username,
'password': password,
'redirect_uri': next_uri,
}).encode(), headers={
'Content-Type': 'application/json',
'Referer': redirect_url,
}, expected_status=400)
credentials = auth.get('credentials')
if ((not auth.get('logged_in')) and (not auth.get('redirect_uri')) and credentials):
raise ExtractorError(('Unable to login: %s' % credentials), expected=True)
self._apply_first_set_cookie_header(urlh, 'groot_sessionid')
(_, urlh) = self._download_webpage_handle((auth.get('redirect_uri') or next_uri), None, 'Completing login')
if is_logged(urlh):
self.LOGGED_IN = True
return
raise ExtractorError('Unable to log in')
| [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
b6f1115d4f04e8309fb6d9dd7f163c32b2b8bf2e | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /090_logging/_exercises/_templates/Python Logging Basics/004_Example 3 – Log File.py | a2705a8ad6d0da1023b298252b981003efb9282c | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,263 | py | # # The final option is to log messages directly to a file. This is rarely useful these days, as administrators can
# # configure syslog to write certain messages to specific files, or if deploying inside containers,
# # this is an anti-pattern. Also if you use centralized logging, having to deal with additional log files is an
# # added concern. But it is an option that is still available.
# #
# # When logging to files, the main thing to be wary of is that log files need to be rotated regularly.
# # The application needs to detect the log file being renamed and handle that situation. While Python provides its
# # own file rotation handler, it is best to leave log rotation to dedicated tools such as logrotate.
# # The WatchedFileHandler will keep track of the log file and reopen it if it is rotated, making it work well with
# # logrotate without requiring any specific signals.
# #
# # Here is a sample implementation.
#
# ______ l____
# ______ l____.h__
# ______ os
#
# handler _ l____.h__.WFH_(
# __.e___.g.. "LOGFILE", "/var/log/yourapp.log"
# formatter _ l____.F... l____.B..
# h__.sF_ f..
# root _ l____.gL_
# ?.sL_ __.e__.g__ "LOGLEVEL", "INFO"
# ?.aH_ h..
#
# t__
# e.. m..
# e___ E..
# l____.e.. "Exception in main()"
# e.. 1
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
45b3718a502218323dd157b5f41726f28c7cb8b9 | 929d12e11ed2fb69476b9d07932e38662f0ce6fc | /Two Pointers/3 Sum.py | db65bf577617e5c0cda320df9d5e87c5a5f947dd | [] | no_license | arnabs542/Data-Structures-And-Algorithms | b8f341a31ca18044bf179294fbcb0fac1f835216 | ffcc2f8a25520ce37cd1f67e6225281c85141a65 | refs/heads/master | 2022-12-13T14:09:55.005341 | 2020-09-13T11:58:58 | 2020-09-13T11:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,814 | py | """
3 Sum
Problem Description
Given an array A of N integers, find three integers in A such that the sum is closest to a given number B. Return the sum of those three integers. Assume that there will only be one solution.
Problem Constraints
-10^8 <= B <= 10^8
1 <= N <= 10^4
-10^8 <= A[i] <= 10^8
Input Format
First argument is an integer array A of size N. Second argument is an integer B denoting the sum you need to get close to.
Output Format
Return a single integer denoting the sum of three integers which is closest to B.
Example Input
Input 1:
A = [-1, 2, 1, -4]
B = 1
Input 2:
A = [1, 2, 3]
B = 6
Example Output
Output 1:
2
Output 2:
6
Example Explanation
Explanation 1:
The sum that is closest to the target is 2. (-1 + 2 + 1 = 2)
Explanation 2:
Take all elements to get exactly 6.
"""
class Solution:
# @param A : list of integers
# @param B : integer
# @return an integer
def threeSumClosest(self, A, B):
n = len(A)
A.sort()
ans = float("inf")
closest_sum = 0
for i in range(n-2):
start = i+1
end = n-1
temp = B-A[i]
result = 0
min_val = float("inf")
while(start<end):
diff = (temp-A[start]-A[end])
if abs(diff) < min_val:
min_val = abs(diff)
result = A[start]+A[end]
if diff<0:
end -= 1
else:
start += 1
if abs(A[i]+result-B) < ans:
ans = abs(A[i]+result-B)
closest_sum = A[i]+result
return closest_sum
| [
"noreply@github.com"
] | arnabs542.noreply@github.com |
7a11fe137ba5bf37e86e22d6f0511f13d1e1b673 | ba7134468cb18014fe2e3e1513382fa52aafd4eb | /01_Python_basic_grammar_supplement/005_Python常用内置函数/002_map_映射函数_按规律生成新列表.py | 1a656cdc6a055d276239ad4f2a42c28e99d16e3a | [] | no_license | FelixZFB/Python_advanced_learning | 4e44616b390e1c6e7da37229c7ad48c069cee71b | a71a6d733ed2134a79f02a6488807862b23438b8 | refs/heads/master | 2021-06-27T11:15:07.754719 | 2020-11-20T02:41:25 | 2020-11-20T02:41:25 | 183,116,714 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | # map: 映射
# 即把集合或者列表中的元素,每一个元素都按照一定的规则进行操作,生成一个新的列表或者集合
# map函数是系统提供的具有映射功能的高阶函数,返回值是一个迭代对象
# 先看一个列表[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],以该列表为基础每个数字乘以10
# 生成一个新的列表[0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
# 代码如下:
l1 = [i for i in range(10)]
l2 = []
for i in l1:
l2.append(i * 10)
print(l2)
# map函数实现上面的功能,代码变的更简单
l3 = [i for i in range(10)]
def mulTen(n):
return n * 10
l4 = map(mulTen, l3)
print(type(l4))
print(l4)
# map类型是可迭代的,使用for循环取出每个元素
for i in l4:
print(i)
| [
"18200116656@qq.com"
] | 18200116656@qq.com |
a97dfba740547798aa43e4e8df8ee377d844b172 | 9d1701a88644663277342f3a12d9795cd55a259c | /CSC108/a1/test.py | e3696c3c0ae1ba55928f89f9bbe2a46d1880dfc7 | [] | no_license | xxcocoymlxx/Study-Notes | cb05c0e438b0c47b069d6a4c30dd13ab97e4ee6d | c7437d387dc2b9a8039c60d8786373899c2e28bd | refs/heads/master | 2023-01-13T06:09:11.005038 | 2020-05-19T19:37:45 | 2020-05-19T19:37:45 | 252,774,764 | 2 | 0 | null | 2022-12-22T15:29:26 | 2020-04-03T15:44:44 | Jupyter Notebook | UTF-8 | Python | false | false | 1,238 | py | SIGN_GROUPS = '[ARI,LEO,SAG],[TAU,VIR,CAP],[GEM,LIB,AQU],[PIS,SCO,CAN]'
SIGNS = 'ARI:03,21-04,19;TAU:04,20-05,20;GEM:05,21-06,21;CAN:06,22-07,22;' + \
'LEO:07,23-08,22;VIR:08,23-09,22;LIB:09,23-10,23;SCO:10,24-11,20;' + \
'SAG:11,21-12,21;CAP:12,22-01,20;AQU:01,21-02,21;PIS:02,22-03,20;'
def get_sign_group(sign):
'''
>>> get_sign_group('ARI')
0
>>> get_sign_group('CAN')
3
'''
i = 0
group_number = 0
while i < len(SIGN_GROUPS):
if SIGN_GROUPS[i] != ']':
i += 1
elif SIGN_GROUPS[i] == ']':
group = SIGN_GROUPS[i-12:i+1]
#print(group)
i += 1
if sign not in group:
group_number += 1
else:
return group_number
def find_astrological_sign(month, date):
'''
>>> find_astrological_sign(9, 2)
'VIR'
>>> find_astrological_sign(10, 23)
'LIB'
'''
i = 0
while i + 16 <= len(SIGNS):
if (int(SIGNS[i+4:i+6]) == month and date >= int(SIGNS[i+7:i+9])) or \
(int(SIGNS[i+10:i+12]) == month and date <= int(SIGNS[i+13:i+15])):
return SIGNS[i:i+3]
else:
i = i + 16
| [
"coco.yang@mail.utoronto.ca"
] | coco.yang@mail.utoronto.ca |
c5835b7fa2b4f5ca981932cbb072da01a4eb7ff8 | fc27e1e21ad4891b1d4e769170671da1a4d32ed2 | /aliyun-python-sdk-ccs/setup.py | 02f22be7a4dd0804225e7b9d1814db5fb67fdfa6 | [
"Apache-2.0"
] | permissive | yonzhan2/aliyun-openapi-python-sdk | 3d05f7e83aeb286ad553a6a36c42ce932a1ece3e | e64873f9b528e1a83e3ea27d583f3f7998e7650b | refs/heads/master | 2020-04-11T10:22:48.511973 | 2018-12-13T09:29:21 | 2018-12-13T09:29:21 | 161,712,443 | 1 | 0 | null | 2018-12-14T00:52:39 | 2018-12-14T00:52:39 | null | UTF-8 | Python | false | false | 2,586 | py | #!/usr/bin/python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
from setuptools import setup, find_packages
import os
import sys
"""
setup module for ccs.
Created on 7/3/2015
@author: alex
"""
PACKAGE = "aliyunsdkccs"
NAME = "aliyun-python-sdk-ccs"
DESCRIPTION = "The ccs module of Aliyun Python sdk."
AUTHOR = "Aliyun"
AUTHOR_EMAIL = "aliyun-developers-efficiency@list.alibaba-inc.com"
URL = "http://develop.aliyun.com/sdk/python"
TOPDIR = os.path.dirname(__file__) or "."
VERSION = __import__(PACKAGE).__version__
desc_file = open("README.rst")
try:
LONG_DESCRIPTION = desc_file.read()
finally:
desc_file.close()
requires = []
if sys.version_info < (3, 3):
requires.append("aliyun-python-sdk-core>=2.0.2")
else:
requires.append("aliyun-python-sdk-core-v3>=2.3.5")
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache",
url=URL,
keywords=["aliyun","sdk","ccs"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=requires,
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development",
)
) | [
"yixiong.jxy@alibaba-inc.com"
] | yixiong.jxy@alibaba-inc.com |
f66ca3f9bba3dc867733f33d8e8453c735d63d42 | f0bc59dc9aab005ef977957e6ea6b91bbe430952 | /2018-02-22-mongo-python-kennedy/code/data/release_health.py | ad6e41fad36f7fd0f5e9e01a03516dc3dbb6197e | [
"Apache-2.0"
] | permissive | Wintellect/WintellectWebinars | 3ac0f6ae02d2d52eefb80f4f06d70f44e0d66095 | 5a59d9742c340022d58ec7e2cda69a1eba0feb53 | refs/heads/master | 2023-03-02T06:31:25.457579 | 2022-04-29T19:26:55 | 2022-04-29T19:26:55 | 87,122,981 | 68 | 124 | Apache-2.0 | 2023-03-01T02:39:17 | 2017-04-03T21:33:32 | JavaScript | UTF-8 | Python | false | false | 192 | py | import mongoengine
class ReleaseHealth(mongoengine.EmbeddedDocument):
ci = mongoengine.BooleanField()
coverage = mongoengine.FloatField()
health_index = mongoengine.FloatField()
| [
"mikeckennedy@gmail.com"
] | mikeckennedy@gmail.com |
c9967a831db5b1498cb70bbd89be219e0c57becd | bb6ebff7a7f6140903d37905c350954ff6599091 | /tools/telemetry/telemetry/timeline/process.py | aa94a9cc40d91e045da00eb4f426995817470912 | [
"BSD-3-Clause",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | PDi-Communication-Systems-Inc/lollipop_external_chromium_org | faa6602bd6bfd9b9b6277ce3cd16df0bd26e7f2f | ccadf4e63dd34be157281f53fe213d09a8c66d2c | refs/heads/master | 2022-12-23T18:07:04.568931 | 2016-04-11T16:03:36 | 2016-04-11T16:03:36 | 53,677,925 | 0 | 1 | BSD-3-Clause | 2022-12-09T23:46:46 | 2016-03-11T15:49:07 | C++ | UTF-8 | Python | false | false | 2,450 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.timeline.event_container as event_container
import telemetry.timeline.counter as tracing_counter
import telemetry.timeline.thread as tracing_thread
class Process(event_container.TimelineEventContainer):
''' The Process represents a single userland process in the trace.
'''
def __init__(self, parent, pid):
super(Process, self).__init__('process %s' % pid, parent)
self.pid = pid
self._threads = {}
self._counters = {}
@property
def threads(self):
return self._threads
@property
def counters(self):
return self._counters
def IterChildContainers(self):
for thread in self._threads.itervalues():
yield thread
for counter in self._counters.itervalues():
yield counter
def IterAllSlicesOfName(self, name):
for thread in self._threads.itervalues():
for s in thread.IterAllSlicesOfName(name):
yield s
def IterAllAsyncSlicesOfName(self, name):
for thread in self._threads.itervalues():
for s in thread.IterAllAsyncSlicesOfName(name):
yield s
def IterEventsInThisContainer(self):
return
yield # pylint: disable=W0101
def GetOrCreateThread(self, tid):
thread = self.threads.get(tid, None)
if thread:
return thread
thread = tracing_thread.Thread(self, tid)
self._threads[tid] = thread
return thread
def GetCounter(self, category, name):
counter_id = category + '.' + name
if counter_id in self.counters:
return self.counters[counter_id]
raise ValueError(
'Counter %s not found in process with id %s.' % (counter_id,
self.pid))
def GetOrCreateCounter(self, category, name):
try:
return self.GetCounter(category, name)
except ValueError:
ctr = tracing_counter.Counter(self, category, name)
self._counters[ctr.full_name] = ctr
return ctr
def AutoCloseOpenSlices(self, max_timestamp, thread_time_bounds):
for thread in self._threads.itervalues():
thread.AutoCloseOpenSlices(max_timestamp, thread_time_bounds[thread].max)
def FinalizeImport(self):
for thread in self._threads.itervalues():
thread.FinalizeImport()
for counter in self._counters.itervalues():
counter.FinalizeImport()
| [
"mrobbeloth@pdiarm.com"
] | mrobbeloth@pdiarm.com |
60ab6a543eed1a43ddab5434945c723e9390423a | e26437e26ebb17187ae9c9caaa5dfc4208a7ec1d | /venv/bin/pyreverse | 9f96a391a24de4c7fb7d39e36f0c1b16bf65b197 | [
"CC0-1.0"
] | permissive | OseiasBeu/PyECom | 93f36fe22aca1b8c06be0fa0027d6cd42e614b6a | 2ea4e7e3be4ca015fb1bbc1083aa3f2d44accc5f | refs/heads/master | 2022-12-15T00:16:29.799351 | 2020-08-31T21:16:00 | 2020-08-31T21:16:00 | 287,870,077 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | #!/home/oseiasbeu/Documents/djangoecommerce-aula002/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
| [
"oseiasbeu@outlook.com"
] | oseiasbeu@outlook.com | |
da66572725917bc7eae7416aef1d229579a87d5e | 105d55b45e36ae1d3062135b22781f1df0fb1653 | /oauth_tokens/providers/facebook.py | 650cdc472bd5d2c75041be8704e017c55b924156 | [
"BSD-3-Clause"
] | permissive | EndyKaufman/django-oauth-tokens | 6151dd26acb99bb53aabbe5e75d01eac6cdd377e | b813b13d383b79e1a78e15a3881be5b94680a011 | refs/heads/master | 2021-01-14T11:20:09.040188 | 2015-06-25T18:03:45 | 2015-06-25T18:03:45 | 38,067,554 | 0 | 0 | null | 2015-06-25T18:48:54 | 2015-06-25T18:48:54 | null | UTF-8 | Python | false | false | 3,939 | py | # -*- coding: utf-8 -*-
import re
import urllib
from xml.sax import saxutils as su
from bs4 import BeautifulSoup
from django.core.exceptions import ImproperlyConfigured
import requests
from ..base import AccessTokenBase, AuthRequestBase
from ..exceptions import LoginPasswordError, AccountLocked, WrongRedirectUrl
class FacebookAuthRequest(AuthRequestBase):
'''
Facebook authorized request class
'''
provider = 'facebook'
form_action_domain = 'https://facebook.com'
login_url = 'https://www.facebook.com/login.php'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/34.0.1847.116 Chrome/34.0.1847.116 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Charset': 'utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
'Host': 'www.facebook.com',
}
account_locked_phrases = [
'Ваш аккаунт временно заблокирован',
'Мы заблокировали ваш аккаунт в связи с попыткой входа из незнакомого места. Пожалуйста, помогите нам подтвердить, что попытка входа была произведена вами.',
'Your account is temporarily locked.',
]
def add_data_credentials(self, data):
data['email'] = self.username
data['pass'] = self.password
def authorize(self):
'''
TODO: cover with tests for each condition
'''
response = super(FacebookAuthRequest, self).authorize()
if 'You are trying too often' in response.content:
# TODO: fix it
raise Exception("Facebook authorization request returns error 'You are trying too often'")
if 'Cookies Required' in response.content:
response = requests.get(self.form_action_domain)
self.cookies = response.cookies
self.authorize()
# TODO: move this to FacebookAcessToken class
if 'API Error Code: 191' in response.content:
raise ImproperlyConfigured(
"You must specify URL '%s' in your facebook application settings" % self.redirect_uri)
for account_locked_phrase in self.account_locked_phrases:
if account_locked_phrase in response.content:
raise AccountLocked(
"Facebook errored 'Your account is temporarily locked.'. Try to login via web browser")
return response
class FacebookAccessToken(AccessTokenBase):
provider = 'facebook'
type = 'oauth2'
authorize_url = 'https://www.facebook.com/dialog/oauth'
access_token_url = 'https://graph.facebook.com/oauth/access_token'
redirect_uri = 'https://google.com/404'
auth_request_class = FacebookAuthRequest
def authorization_get_request(self):
response = super(FacebookAccessToken, self).authorization_get_request()
bs = BeautifulSoup(response.content)
if bs.find('title').text == 'Error':
raise WrongRedirectUrl(bs.find('div').text)
return response
def authorization_permissions_request(self, response):
if 'Redirecting...' in response.content:
matches = re.findall(r'<meta http-equiv="refresh" content="0;url=(.+)" /></head>', response.content)
url = su.unescape(urllib.unquote(matches[0]))
response = self.oauth.request(
method='get', url=url, cookies=response.cookies, headers=self.auth_request.headers)
return response
def get_url_from_response(self, response):
if response.status_code == 404 and 'code=' in response.url:
return response.url
else:
return None
| [
"ramusus@gmail.com"
] | ramusus@gmail.com |
844a9a758831717b2da46cbd65a2b6d94b78da26 | ac8b725681e25177c5de3daf58afe00135241d0f | /leetcode/0622_design_circular_queue.py | 238c3abdb146d116221b12774b973ca013cac211 | [
"MIT"
] | permissive | jacquerie/leetcode | 7af100ea1d7292c8c3da34210cf04d891be5561b | 0cb213b9c7bcb6efa11210e9ebc291befb560bb9 | refs/heads/master | 2022-05-19T22:19:46.284065 | 2022-03-27T02:41:58 | 2022-03-27T02:41:58 | 129,323,741 | 3 | 0 | MIT | 2021-01-04T01:41:50 | 2018-04-12T23:51:56 | Python | UTF-8 | Python | false | false | 1,228 | py | # -*- coding: utf-8 -*-
class MyCircularQueue:
def __init__(self, k):
self.capacity = k
self.count = 0
self.elements = [0] * k
self.index = 0
def enQueue(self, value):
if self.isFull():
return False
self.elements[(self.index + self.count) % self.capacity] = value
self.count += 1
return True
def deQueue(self):
if self.isEmpty():
return False
self.index = (self.index + 1) % self.capacity
self.count -= 1
return True
def Front(self):
if self.isEmpty():
return -1
return self.elements[self.index]
def Rear(self):
if self.isEmpty():
return -1
return self.elements[(self.index + self.count - 1) % self.capacity]
def isEmpty(self):
return 0 == self.count
def isFull(self):
return self.capacity == self.count
if __name__ == "__main__":
obj = MyCircularQueue(3)
assert obj.enQueue(1)
assert obj.enQueue(2)
assert obj.enQueue(3)
assert not obj.enQueue(4)
assert 3 == obj.Rear()
assert obj.isFull()
assert obj.deQueue()
assert obj.enQueue(4)
assert 4 == obj.Rear()
| [
"jacopo.notarstefano@gmail.com"
] | jacopo.notarstefano@gmail.com |
82d8010ea973ca811c5b181a212f6d636c8b8d9e | 1e30788a9e045e3bda2cfcb3bb42adfa7ee85dae | /venev/lib/python2.7/site-packages/coverage/misc.py | e3723c1847da8137214715ca5849ac49eb6f0584 | [
"MIT"
] | permissive | CompeteLeak/crankycoin | 1bee3a032c4c6360093035aed1a7842cfffb46f0 | 9376fbd3095429f2d46a3e4436023f814bb2e36a | refs/heads/master | 2020-03-10T15:59:10.016605 | 2018-05-01T06:16:24 | 2018-05-01T06:16:24 | 129,462,940 | 0 | 0 | MIT | 2018-04-25T20:56:12 | 2018-04-13T23:17:30 | Python | UTF-8 | Python | false | false | 7,487 | py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Miscellaneous stuff for coverage.py."""
import errno
import hashlib
import inspect
import locale
import os
import sys
import types
from coverage import env
from coverage.backward import string_class, to_bytes, unicode_class
ISOLATED_MODULES = {}
def isolate_module(mod):
"""Copy a module so that we are isolated from aggressive mocking.
If a test suite mocks os.path.exists (for example), and then we need to use
it during the test, everything will get tangled up if we use their mock.
Making a copy of the module when we import it will isolate coverage.py from
those complications.
"""
if mod not in ISOLATED_MODULES:
new_mod = types.ModuleType(mod.__name__)
ISOLATED_MODULES[mod] = new_mod
for name in dir(mod):
value = getattr(mod, name)
if isinstance(value, types.ModuleType):
value = isolate_module(value)
setattr(new_mod, name, value)
return ISOLATED_MODULES[mod]
os = isolate_module(os)
# Use PyContracts for assertion testing on parameters and returns, but only if
# we are running our own test suite.
if env.TESTING:
from contracts import contract # pylint: disable=unused-import
from contracts import new_contract as raw_new_contract
def new_contract(*args, **kwargs):
"""A proxy for contracts.new_contract that doesn't mind happening twice."""
try:
return raw_new_contract(*args, **kwargs)
except ValueError:
# During meta-coverage, this module is imported twice, and
# PyContracts doesn't like redefining contracts. It's OK.
pass
# Define contract words that PyContract doesn't have.
new_contract('bytes', lambda v: isinstance(v, bytes))
if env.PY3:
new_contract('unicode', lambda v: isinstance(v, unicode_class))
else: # pragma: not covered
# We aren't using real PyContracts, so just define a no-op decorator as a
# stunt double.
def contract(**unused):
"""Dummy no-op implementation of `contract`."""
return lambda func: func
def new_contract(*args_unused, **kwargs_unused):
"""Dummy no-op implementation of `new_contract`."""
pass
def nice_pair(pair):
"""Make a nice string representation of a pair of numbers.
If the numbers are equal, just return the number, otherwise return the pair
with a dash between them, indicating the range.
"""
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
def format_lines(statements, lines):
"""Nicely format a list of line numbers.
Format a list of line numbers for printing by coalescing groups of lines as
long as the lines represent consecutive statements. This will coalesce
even if there are gaps between statements.
For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
`lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
"""
pairs = []
i = 0
j = 0
start = None
statements = sorted(statements)
lines = sorted(lines)
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start is None:
start = lines[j]
end = lines[j]
j += 1
elif start:
pairs.append((start, end))
start = None
i += 1
if start:
pairs.append((start, end))
ret = ', '.join(map(nice_pair, pairs))
return ret
def expensive(fn):
"""A decorator to indicate that a method shouldn't be called more than once.
Normally, this does nothing. During testing, this raises an exception if
called more than once.
"""
if env.TESTING:
attr = "_once_" + fn.__name__
def _wrapped(self):
"""Inner function that checks the cache."""
if hasattr(self, attr):
raise Exception("Shouldn't have called %s more than once" % fn.__name__)
setattr(self, attr, True)
return fn(self)
return _wrapped
else:
return fn
def bool_or_none(b):
"""Return bool(b), but preserve None."""
if b is None:
return None
else:
return bool(b)
def join_regex(regexes):
"""Combine a list of regexes into one that matches any of them."""
return "|".join("(?:%s)" % r for r in regexes)
def file_be_gone(path):
"""Remove a file, and don't get annoyed if it doesn't exist."""
try:
os.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def output_encoding(outfile=None):
"""Determine the encoding to use for output written to `outfile` or stdout."""
if outfile is None:
outfile = sys.stdout
encoding = (
getattr(outfile, "encoding", None) or
getattr(sys.__stdout__, "encoding", None) or
locale.getpreferredencoding()
)
return encoding
class Hasher(object):
"""Hashes Python data into md5."""
def __init__(self):
self.md5 = hashlib.md5()
def update(self, v):
"""Add `v` to the hash, recursively if needed."""
self.md5.update(to_bytes(str(type(v))))
if isinstance(v, string_class):
self.md5.update(to_bytes(v))
elif isinstance(v, bytes):
self.md5.update(v)
elif v is None:
pass
elif isinstance(v, (int, float)):
self.md5.update(to_bytes(str(v)))
elif isinstance(v, (tuple, list)):
for e in v:
self.update(e)
elif isinstance(v, dict):
keys = v.keys()
for k in sorted(keys):
self.update(k)
self.update(v[k])
else:
for k in dir(v):
if k.startswith('__'):
continue
a = getattr(v, k)
if inspect.isroutine(a):
continue
self.update(k)
self.update(a)
def hexdigest(self):
"""Retrieve the hex digest of the hash."""
return self.md5.hexdigest()
def _needs_to_implement(that, func_name):
"""Helper to raise NotImplementedError in interface stubs."""
if hasattr(that, "_coverage_plugin_name"):
thing = "Plugin"
name = that._coverage_plugin_name
else:
thing = "Class"
klass = that.__class__
name = "{klass.__module__}.{klass.__name__}".format(klass=klass)
raise NotImplementedError(
"{thing} {name!r} needs to implement {func_name}()".format(
thing=thing, name=name, func_name=func_name
)
)
class CoverageException(Exception):
"""An exception specific to coverage.py."""
pass
class NoSource(CoverageException):
"""We couldn't find the source for a module."""
pass
class NoCode(NoSource):
"""We couldn't find any code at all."""
pass
class NotPython(CoverageException):
"""A source file turned out not to be parsable Python."""
pass
class ExceptionDuringRun(CoverageException):
"""An exception happened while running customer code.
Construct it with three arguments, the values from `sys.exc_info`.
"""
pass
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
bf4e0aab1c159295634285c1e66c3ddbf71eaa43 | 35fc3136ca3f4af52ebeb36cedcd30b41d685146 | /RNASeq/pipelines_ds/RNASeq_MDD21.py | f7d2cd5bd88f8ae9d1fc645cae3f5ac64ce93125 | [] | no_license | stockedge/tpot-fss | cf260d9fd90fdd4b3d50da168f8b780bb2430fd1 | d1ee616b7552ef254eb3832743c49a32e1203d6a | refs/heads/master | 2022-09-19T13:10:30.479297 | 2020-06-02T15:43:16 | 2020-06-02T15:43:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | import numpy as np
import pandas as pd
from sklearn.cluster import FeatureAgglomeration
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from tpot.builtins import DatasetSelector
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'].values, random_state=21)
# Average CV score on the training set was:0.752695652173913
exported_pipeline = make_pipeline(
DatasetSelector(sel_subset=12, subset_list="module23.csv"),
FeatureAgglomeration(affinity="l2", linkage="average"),
RandomForestClassifier(bootstrap=False, criterion="entropy", max_features=0.5, min_samples_leaf=9, min_samples_split=14, n_estimators=100)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| [
"grixor@gmail.com"
] | grixor@gmail.com |
54b3cba439ff4df98ef0664037b16637b744cc2c | 2df47589ca457d16fbffd4e1bccf5133174a0b97 | /highcharts/core/urls.py | 224545575fb36ead36d321285b6b52ffed2591b7 | [] | no_license | bguerbas/highcharts | a805419cb8d5a00bc3f82b5c4df285598f7685d8 | 571fba58465136c5040266b3d4ba2d65a5cc740c | refs/heads/master | 2022-02-12T19:33:12.244474 | 2016-06-04T05:00:24 | 2016-06-04T05:00:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | from django.conf.urls import url
from highcharts.core.graphics import dollar_json, euro_json, product_json
from highcharts.core import views as v
urlpatterns = [
url(r'^$', v.home, name='home'),
url(r'^dollar-graphic/$', v.dollar_graphic, name='dollar-graphic'),
url(r'^euro-graphic/$', v.euro_graphic, name='euro-graphic'),
url(r'^product-graphic/$', v.product_graphic, name='product-graphic'),
url(r'^dollar_json/$', dollar_json),
url(r'^euro_json/$', euro_json),
url(r'^product_json/$', product_json),
]
| [
"rg3915@yahoo.com.br"
] | rg3915@yahoo.com.br |
aa2082dc6d4bc7facdfcda2f11287a57b36d45d5 | 921b3a67a24df947f085e93ba58833ec20f6b89e | /producer-tutorial/Lib/site-packages/faker/providers/ssn/en_PH/__init__.py | b46f518321a9d4fc9f7172f85be5a33b8bd0612e | [] | no_license | jaslanm/python | e3bacd7ad0020b7e11adcb1b17dd6da3e4b2f65c | 5cfa3913b89acb0b8cf79247de1b2820a8b92f3a | refs/heads/main | 2023-08-30T01:53:13.752918 | 2021-10-23T13:24:48 | 2021-10-23T13:24:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,638 | py | from ... import BaseProvider
class Provider(BaseProvider):
"""
Provider for Philippine IDs that are related to social security
There is no unified social security program in the Philippines. Instead, the Philippines has a messy collection of
social programs and IDs that, when put together, serves as an analogue of other countries' social security program.
The government agencies responsible for these programs have relatively poor/outdated information and documentation
on their respective websites, so the sources section include third party "unofficial" information.
- Social Security System (SSS) - Social insurance program for workers in private, professional, and informal sectors
- Government Service Insurance System (GSIS) - Social insurance program for government employees
- Home Development Mutual Fund (popularly known as Pag-IBIG) - Socialized financial assistance and loaning program
- Philippine Health Insurance Corporation (PhilHealth) - Social insurance program for health care
- Unified Multi-Purpose ID (UMID) - Identity card with common reference number (CRN) that serves as a link to
the four previous programs and was planned to supersede the previous IDs, but
its future is now uncertain because of the upcoming national ID system
Sources:
- https://www.sss.gov.ph/sss/DownloadContent?fileName=SSSForms_UMID_Application.pdf
- https://www.gsis.gov.ph/active-members/benefits/ecard-plus/
- https://www.pagibigfund.gov.ph/DLForms/providentrelated/PFF039_MembersDataForm_V07.pdf
- https://filipiknow.net/is-umid-and-sss-id-the-same/
- https://filipiknow.net/philhealth-number/
- https://en.wikipedia.org/wiki/Unified_Multi-Purpose_ID
"""
sss_formats = ('##-#######-#',)
gsis_formats = ('###########',)
philhealth_formats = ('##-#########-#',)
pagibig_formats = ('####-####-####',)
umid_formats = ('####-#######-#',)
def sss(self) -> str:
return self.numerify(self.random_element(self.sss_formats))
def gsis(self) -> str:
return self.numerify(self.random_element(self.gsis_formats))
def pagibig(self) -> str:
return self.numerify(self.random_element(self.pagibig_formats))
def philhealth(self) -> str:
return self.numerify(self.random_element(self.philhealth_formats))
def umid(self) -> str:
return self.numerify(self.random_element(self.umid_formats))
def ssn(self) -> str:
# Use UMID as SSN in the interim till its deprecation
return self.umid()
| [
"jaslanm@gmail.com"
] | jaslanm@gmail.com |
d67a97f7d001de095080b8e061061fdc66d4ab5c | 1d01f44e748c03e2f00ede0b317ac57d868cb9a8 | /bdd/features/steps/generic.py | a41853ca92fad327f95aa4f5481dc70ae24b84f1 | [
"Apache-2.0"
] | permissive | gtoonstra/airflow-hovercraft | 6ef9e6588a1dbc9a97f4c725ee8e50d38f913d3a | 87d3f3dde410d186dcfbe30fb3330b6c3c8d08d9 | refs/heads/master | 2021-01-22T23:10:49.646487 | 2017-06-18T20:07:37 | 2017-06-18T20:07:37 | 92,804,454 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,290 | py | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import ast
from bddlib.fake_hook import FakeHook
from airflow.hooks.base_hook import BaseHook
def get_default_context():
return {}
@given('no specific state')
def step_impl(context):
pass
@given('a specific initializer')
def step_impl(context):
if context.table is not None:
row = context.table[0]
headers = context.table.headings
d = {}
for header in headers:
d[header] = ast.literal_eval(row[header])
context.initializer = d
@given('hook mocked with FakeHook')
def step_impl(context):
returned_data = {}
if context.table is not None:
row = context.table[0]
headers = context.table.headings
for header in headers:
returned_data[header] = ast.literal_eval(row[header])
def get_hook(conn_id='fake'):
return FakeHook(returned_data)
BaseHook.get_hook = get_hook
@when('the {operator_type} is created')
def step_impl(context, operator_type):
"""
This step checks if it can instantiate
a class of a certain type
"""
try:
context.exception = None
s = operator_type.split(".")
mod = ".".join(s[:len(s)-1])
clz = s[len(s)-1]
MyClass = getattr(importlib.import_module(mod), clz)
d = {}
if "initializer" in context:
d = context.initializer
d['task_id'] = 'test'
context.instance = MyClass(**d)
else:
context.instance = MyClass(task_id='test')
except Exception as e:
context.exception = e
@then('the operator is executed')
def step_impl(context):
try:
ctxt = get_default_context()
context.return_value = context.instance.execute(ctxt)
except Exception as e:
context.exception = e
@then('no exception is raised')
def step_impl(context):
"""
This step just checks if an exception was raised
in a previous step.
"""
if context.exception is not None:
raise context.exception
@then('the exception {exception_type} is raised')
def step_impl(context, exception_type):
"""
This step just checks if an exception was raised
in a previous step.
"""
if context.exception is None:
raise Exception("No exception was raised when one was expected")
assert type(context.exception).__name__ == exception_type
@then('the return value is {return_value}')
def step_impl(context, return_value):
"""
This step just checks if an exception was raised
in a previous step.
"""
if context.return_value is not None:
assert str(context.return_value) == str(return_value)
else:
raise Exception("No return value from operator")
| [
"gtoonstra@gmail.com"
] | gtoonstra@gmail.com |
82c10264b839855b634e22321e3d1c1056cc2fa2 | ffc1cc3bb7b68335b115122fdc7924fc4e31d528 | /hun89.py | 598943a29a06fb9e8bd394f448e35cc79e22821b | [] | no_license | Rihanashariff/swathi24 | dba1dd3c3d2ff583ae431b432e0ef262bfeb3ac3 | 2b0d21f2febdd2a563e8f0affeebd5ca7a5821b8 | refs/heads/master | 2020-07-02T05:28:32.199982 | 2019-06-29T08:22:10 | 2019-06-29T08:22:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | #s
n=input()
l=[]
for i in range(0,len(n)):
if n[i] not in l:
l.append(n[i])
l=l[::-1]
for i in range(0,len(l)-1):
print(l[i],end="")
print(l[-1])
| [
"noreply@github.com"
] | Rihanashariff.noreply@github.com |
445b8f4162f676a1d2d53a66b8f67bd4b216b021 | a2d3f2787cd26f2bf90f30ba9516d1675a69f8be | /emission/tests/coreTests/TestEntry.py | 93e03219cfcb42cee4b1af697e90badb0d2316d4 | [
"BSD-3-Clause"
] | permissive | njriasan/e-mission-server | 318833ba06cb7f40ddb7b8d2ac3da4d049e7c846 | 23224ddcfd29f31c13f75d819d9ad8530aea052f | refs/heads/master | 2020-05-02T11:02:00.528836 | 2019-03-27T19:21:31 | 2019-03-27T19:21:31 | 177,915,408 | 1 | 0 | BSD-3-Clause | 2019-03-27T04:01:32 | 2019-03-27T04:01:31 | null | UTF-8 | Python | false | false | 4,859 | py | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Test the class that supports usercache entries
# The main change here is that
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import unittest
from uuid import UUID
import geojson as gj
import bson.objectid as bo
# Our imports
import emission.core.wrapper.entry as ecwe
import emission.core.wrapper.motionactivity as ecwm
import emission.core.wrapper.trip as ecwt
import emission.tests.common as etc
class TestEntry(unittest.TestCase):
def testWrapLocation(self):
testEntryJSON = {'_id': '55a4418c7d65cb39ee9737cf',
'data': {'accuracy': 52.5,
'altitude': 0,
'bearing': 0,
'elapsedRealtimeNanos': 100623898000000,
'latitude': 37.3885529,
'longitude': -122.0879696,
'loc': {"coordinates": [-122.0879696, 37.3885529], "type": "Point"},
'sensed_speed': 0,
'ts': 1436826356.852},
'metadata': {'key': 'background/location',
'platform': 'android',
'read_ts': 0,
'type': 'message',
'write_ts': 1436826357.115,
'write_fmt_time': '2015-07-13 15:25:57.115000-07:00'
},
'user_id': UUID('0763de67-f61e-3f5d-90e7-518e69793954')}
entry = ecwe.Entry(testEntryJSON)
self.assertEquals(entry.metadata.key, 'background/location')
self.assertEquals(entry.metadata.type, 'message')
self.assertEquals(entry.data.latitude, 37.3885529)
self.assertEquals(entry.data.longitude, -122.0879696)
# self.assertEquals(entry.data.loc, gj.Point((-122.0879696, 37.3885529)))
self.assertTrue(isinstance(entry.data.loc, gj.Point))
logging.debug("location time = %s, written at %s (%s)" %
(entry.data.ts, entry.metadata.write_ts, entry.metadata.write_fmt_time))
def testWrapActivity(self):
testEntryJSON = {
'_id': '55a4418c7d65cb39ee9737d2',
'data': {
'type': 5,
'confidence': 100,
'ts': 1436826360.493
},
'metadata': {'key': 'background/motion_activity',
'platform': 'android',
'read_ts': 0,
'type': 'message',
'write_ts': 1436826360.493,
'write_fmt_time': '2015-07-13 15:26:00.493000-07:00'
},
'user_id': UUID('0763de67-f61e-3f5d-90e7-518e69793954')
}
entry = ecwe.Entry(testEntryJSON)
self.assertEquals(entry.metadata.key, 'background/motion_activity')
self.assertEquals(entry.metadata.type, 'message')
self.assertEquals(entry.data.type, ecwm.MotionTypes.TILTING)
self.assertEquals(entry.data.confidence, 100)
logging.debug("activity time = %s, written at %s (%s)" %
(entry.data.ts, entry.metadata.write_ts, entry.metadata.write_fmt_time))
def testWrapTrip(self):
testTripJSON = {
'_id': bo.ObjectId("55d8c47b7d65cb39ee983c2d"),
'start_ts': 1436826360.200,
'start_fmt_time': '2015-07-13 15:26:00.200000-07:00',
'end_ts': 1436826360.493,
'end_fmt_time': '2015-07-13 15:26:00.493000-07:00',
'start_place': bo.ObjectId("55d8c47b7d65cb39ee983c2d"),
'end_place': bo.ObjectId("55d8c47b7d65cb39ee983c2d"),
'start_loc': {"coordinates": [-122, 37], "type": "Point"},
'user_id': UUID('0763de67-f61e-3f5d-90e7-518e69793954')
}
trip = ecwt.Trip(testTripJSON)
self.assertEquals(trip.get_id(), bo.ObjectId("55d8c47b7d65cb39ee983c2d"))
self.assertEquals(trip.start_place, bo.ObjectId("55d8c47b7d65cb39ee983c2d"))
self.assertEquals(trip.end_place, bo.ObjectId("55d8c47b7d65cb39ee983c2d"))
self.assertTrue(isinstance(trip.start_loc, gj.Point))
def testDedupList(self):
import emission.core.wrapper.location as ecwl
import emission.core.wrapper.transition as ecwt
self.assertEqual(type(ecwe.Entry.get_dedup_list("background/filtered_location")),
list)
self.assertIn("latitude", ecwe.Entry.get_dedup_list("background/filtered_location"))
self.assertIn("ts", ecwe.Entry.get_dedup_list("background/filtered_location"))
self.assertEqual(type(ecwe.Entry.get_dedup_list("statemachine/transition")),
list)
self.assertIn("curr_state", ecwe.Entry.get_dedup_list("statemachine/transition"))
self.assertIn("ts", ecwe.Entry.get_dedup_list("statemachine/transition"))
if __name__ == '__main__':
etc.configLogging()
unittest.main()
| [
"shankari@eecs.berkeley.edu"
] | shankari@eecs.berkeley.edu |
42ef418b29e8afe99bd8a80b80757cb7ddc5210e | 48b7b96a5caf2102ae6ca5626efc0135d4088a75 | /server/providers/models.py | 80bce9d435fb3af1e97647ff6ba22b3133451c1d | [] | no_license | DonAurelio/coder | ced49498e0e0717fa8f0c523e5a2ff87895f162d | 429d2e4c43d66770792200bac6cd103f86dcf8b1 | refs/heads/master | 2023-01-11T08:59:43.749545 | 2019-10-02T19:58:07 | 2019-10-02T19:58:07 | 106,939,435 | 2 | 1 | null | 2022-12-29T07:03:46 | 2017-10-14T15:33:08 | JavaScript | UTF-8 | Python | false | false | 1,669 | py | from django.db import models
import requests
# Create your models here.
class Service(models.Model):
name = models.CharField(max_length=100,primary_key=True,
help_text='Service name has to be unique')
base_url = models.CharField(
max_length=400,
help_text='API base url without ending slash')
description = models.CharField(max_length=400,
help_text='Describe the service in few words')
def __str__(self):
return self.name
class Resource(models.Model):
# Service who makes the resource available
service = models.ForeignKey(Service)
# Resource name
name = models.CharField(max_length=100,
help_text='Name of this resouce')
# An summary about resouce functinality
description = models.CharField(max_length=400,
help_text='purpose of this resource')
class Meta:
unique_together = (("service", "name"),)
def url(self,*args):
url_parts = [self.service.base_url,self.name] + list(args)
return '/'.join(url_parts)
def is_available(self):
message = ''
try:
response = requests.get(self.url(),timeout=1)
return True
except requests.exceptions.ConnectionError as e:
return False
return message
def status(self):
message = ''
try:
response = requests.head(self.url(),timeout=1)
if response.status_code is 200:
message = 'Online'
else:
message = 'Online with erros'
except requests.exceptions.ConnectionError as e:
message = 'Offline'
return message | [
"aurelio.vivas@correounivalle.edu.co"
] | aurelio.vivas@correounivalle.edu.co |
5031bc2a965c2f9f19ed8c7313cf8595400f10be | 3d4094d6eca69329d4c6ba08e0c8ce79eedeb6b6 | /starter/Recusion.py | 9294f308c3d312ec11a45b4945592dbb04f552bc | [] | no_license | agkozik/Python_Course | c9f3c8b68e60b452e57f43da7554c13daf386a0c | 4b095bbc86f33999efe95127528b3e1d8bfded9f | refs/heads/master | 2022-04-27T06:04:15.276472 | 2020-04-22T11:49:06 | 2020-04-22T11:49:06 | 255,082,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | def non_recursion(n):
result = 1
for multiplayer in range(n):
result *= multiplayer
return result
def recursive_fact(n):
if n == 0:
return 1
else:
return n * recursive_fact(n-1)
print(recursive_fact(100))
print(non_recursion(100)) | [
"agkozik@gmail.com"
] | agkozik@gmail.com |
05081be19c602536b83c4921c511a0830229bd5c | 77b300d44131c74ce42c9099e1b709b9c5941ba1 | /src/zojax/content/model/tests/view.py | 2c2bb9d223bd8e21e24d76ced9764bbf6a8aa39d | [
"ZPL-2.1"
] | permissive | Zojax/zojax.content.model | 26c0984457a8a9940105d11143a1c7cb9ed9d8c0 | 07d14dc8ba467f6efb2ad58e68c050afebd0e69d | refs/heads/master | 2016-09-06T07:22:26.242130 | 2011-12-16T07:12:30 | 2011-12-16T07:12:30 | 2,035,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id$
"""
from zope import interface, component, schema
from zope.security.proxy import removeSecurityProxy
from zojax.content.model.model import ViewModel
from zojax.content.model.interfaces import IModelRenderer
from zojax.content.type.interfaces import IContentView
class IMyDynamicView(interface.Interface):
content = schema.TextLine(
title = u'Content title',
required = False)
class MyDynamicView(ViewModel):
interface.implements(IModelRenderer, IContentView)
component.adapts(interface.Interface, interface.Interface)
def render(self):
if self.content:
return 'My Dynamic View: %s'%self.content
else:
return 'My Dynamic View: %s'%self.context.title
class IMyDynamicView2(interface.Interface):
pass
class MyDynamicView2(ViewModel):
component.adapts(interface.Interface, interface.Interface)
class MyDynamicView2View(object):
def __call__(self, *args, **kw):
return 'My Dynamic View: %s'%self.context.context.title
| [
"andrey.fedoseev@gmail.com"
] | andrey.fedoseev@gmail.com |
d84df8e0443840cc9a741459f80f50079bd18ce3 | fc96f28fc3dd08ecd418fe13f13d71c8f7b51cd9 | /enrich/tfc.py | 12be029215f5e4e49edfbe219a703bfb874c92ff | [
"MIT"
] | permissive | mindis/spacyapp | 59d811854291a770bcb9d6f0552a9ceaa48246ec | 2b5b6af1be4e0cee55bcc776253d63f5005f899f | refs/heads/master | 2020-04-14T12:54:17.922808 | 2019-01-02T14:12:10 | 2019-01-02T14:12:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,409 | py | from enrich.tei import XMLReader
from datetime import datetime
class Tcf(XMLReader):
""" a class to read an process tfc-documents
tried with 'data/nn_nrhz_001_1418.tcf.xml'
"""
def list_nodes(self, element):
""" returns a list of passed in element-nodes"""
expr = "//tcf:*[local-name() = $name]"
nodes = self.tree.xpath(expr, name=element, namespaces=self.nsmap)
return nodes
def list_multiple_nodes(self, elements=['token', 'lemma', 'tag', 'sentence']):
""" returns a dict with keys of past in elements and a list of those nodes as values"""
expr = "//tcf:*[local-name() = $name]"
nodes = {}
for x in elements:
nodes[x] = self.list_nodes(x)
return nodes
def count_multiple_nodes(self, elements=['token', 'lemma', 'tag', 'sentence']):
""" counts the number of nodes of the passed in elements """
nodes = self.list_multiple_nodes(elements)
result = {}
for key, value in nodes.items():
result[key] = len(value)
return result
def create_sent_list(self):
""" create a list of dicts for each sentence with their according token elements"""
elements = ['token', 'lemma', 'tag', 'sentence']
start = 0
end = 0
sent_list = []
nodes = self.list_multiple_nodes(elements)
sentences = nodes['sentence']
tokens = nodes['token']
tags = nodes['tag']
lemmas = nodes['lemma']
for x in sentences:
sent = {}
token_count = len(x.xpath('./@tokenIDs')[0].split(' '))
end = start + token_count
sent['sent_id'] = x.xpath('./@ID')[0]
sent['words'] = tokens[start:end]
sent['tags'] = tags[start:end]
sent['lemmas'] = lemmas[start:end]
start = end
sent_list.append(sent)
return sent_list
def tag_train_data(self):
""" returns a list of samples to trains spacy's pos-tagger"""
TRAIN_DATA = []
for x in self.create_sent_list():
text = (" ".join([y.text for y in x['words']]))
tags = {'tags': [y.text for y in x['tags']]}
words = {'word': [y.text for y in x['words']]}
lemmas = {'lemma': [y.text for y in x['lemmas']]}
TRAIN_DATA.append((text, [words, tags, lemmas]))
return TRAIN_DATA
def create_tokenlist(self):
""" returns a list of token-dicts extracted from tcf:token """
words = self.list_nodes('token')
token_list = []
for x in words:
token = {}
token['value'] = x.text
token['tokenId'] = x.xpath('./@ID')[0]
try:
follows = x.getnext().text
except AttributeError:
follows = None
if follows:
if token['value'] == "(":
token['whitespace'] = False
elif token['value'] == "„":
token['whitespace'] = False
elif token['value'] == "‒":
token['whitespace'] = True
elif follows[0].isalnum():
token['whitespace'] = True
elif follows[0] == "„":
token['whitespace'] = True
elif follows[0] == "(":
token['whitespace'] = True
else:
token['whitespace'] = False
else:
token['whitespace'] = False
token_list.append(token)
return token_list
def process_tokenlist(self, tokenlist, by_id=None):
""" takes a tokenlist and updates the selected elements. Returns the updated self.tree """
nr_tokens = len(tokenlist)
nr_nodes = len(self.tree.xpath('.//tcf:token', namespaces=self.nsmap))
print("# tokens: {}".format(nr_tokens))
print("# token-nodes: {}".format(nr_nodes))
if by_id:
expr = './/tcf:token[@ID=$id]'
for x in tokenlist:
print('by ID')
try:
node = self.tree.xpath(expr, id=x['tokenId'], namespaces=self.nsmap)[0]
except IndexError:
node = None
if node is not None:
try:
node.attrib['lemma'] = x['lemma']
except AttributeError:
pass
try:
node.attrib['iob'] = x['iob']
except AttributeError:
pass
try:
node.attrib['type'] = x['type']
except AttributeError:
pass
try:
node.attrib['ana'] = x['pos']
except AttributeError:
pass
elif nr_nodes == nr_nodes:
print('not by ID')
counter = 0
for x in self.list_nodes('token'):
x.attrib['lemma'] = tokenlist[counter]['lemma']
x.attrib['iob'] = tokenlist[counter]['iob']
x.attrib['type'] = tokenlist[counter]['type']
x.attrib['ana'] = tokenlist[counter]['pos']
counter += 1
else:
pass
return self.tree
| [
"Peter.Andorfer@oeaw.ac.at"
] | Peter.Andorfer@oeaw.ac.at |
5b3676d9864fd42804bba265747557e5df923681 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-vs/aliyunsdkvs/request/v20181212/DescribeVsDomainReqBpsDataRequest.py | 307d6bbfec73206085e343b8694ed5221dad5335 | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 2,455 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvs.endpoint import endpoint_data
class DescribeVsDomainReqBpsDataRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'vs', '2018-12-12', 'DescribeVsDomainReqBpsData','vs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_LocationNameEn(self):
return self.get_query_params().get('LocationNameEn')
def set_LocationNameEn(self,LocationNameEn):
self.add_query_param('LocationNameEn',LocationNameEn)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_IspNameEn(self):
return self.get_query_params().get('IspNameEn')
def set_IspNameEn(self,IspNameEn):
self.add_query_param('IspNameEn',IspNameEn)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Interval(self):
return self.get_query_params().get('Interval')
def set_Interval(self,Interval):
self.add_query_param('Interval',Interval) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
e3629543f40c546cf51bb37c2ae7539b0733c980 | b521802cca8e4ee4ff5a5ffe59175a34f2f6d763 | /maya/maya-utils/Scripts/Animation/2019-2-15 Tim Cam_Route_Manager/.history/Cam_Main/Cam_Main/Cam_Main_20190117194302.py | 39149174f32dc16da49b80990804dd7f78bc9b70 | [] | no_license | all-in-one-of/I-Do-library | 2edf68b29558728ce53fe17168694ad0353a076e | 8972ebdcf1430ccc207028d8482210092acf02ce | refs/heads/master | 2021-01-04T06:58:57.871216 | 2019-12-16T04:52:20 | 2019-12-16T04:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,739 | py | # -*- coding:utf-8 -*-
# Require Header
import os
import json
from functools import partial
# Sys Header
import sys
import traceback
import subprocess
# Maya Header
import maya.cmds as cmds
import maya.mel as mel
import maya.OpenMayaUI as omui
import plugin.Qt as Qt
from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
def loadUiType(uiFile):
import plugin.Qt as Qt
if Qt.__binding__.startswith('PyQt'):
from Qt import _uic as uic
return uic.loadUiType(uiFile)
elif Qt.__binding__ == 'PySide':
import pysideuic as uic
else:
import pyside2uic as uic
import xml.etree.ElementTree as xml
from cStringIO import StringIO
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uiFile, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('%s'%widget_class)
return form_class, base_class
from Qt.QtCompat import wrapInstance
DIR = os.path.dirname(__file__)
UI_PATH = os.path.join(DIR,"ui","Cam_Main.ui")
GUI_STATE_PATH = os.path.join(DIR, "json" ,'GUI_STATE.json')
form_class , base_class = loadUiType(UI_PATH)
import Cam_Item_Layout
import Cam_Attrubte_Panel
reload(Cam_Item_Layout)
reload(Cam_Attrubte_Panel)
from Cam_Item_Layout import Cam_Item_Layout
from Cam_Attrubte_Panel import Cam_Attrubte_Panel
class Cam_Main(form_class,base_class):
def __init__(self):
super(Cam_Main,self).__init__()
self.setupUi(self)
self.Cam_Item_Widget = Cam_Item_Layout()
self.Cam_Attrubte_Widget = Cam_Attrubte_Panel()
splitter = QSplitter()
splitter.setHandleWidth(5)
splitter.addWidget(self.Cam_Item_Widget)
splitter.addWidget(self.Cam_Attrubte_Widget)
self.Main_Layout.layout().addWidget(splitter)
self.Default_Attr_Setting()
def Default_Attr_Setting(self):
self.Cam_Attrubte_Widget.Cam_Name_Label.setText(u"<center> - 请选择摄像机 - </center>")
self.Cam_Attrubte_Widget.Cam_Input_Toggle.setVisible(False)
self.Cam_Attrubte_Widget.Cam_Input_Layout.setVisible(False)
self.Cam_Attrubte_Widget.Cam_Output_Toggle.setVisible(False)
self.Cam_Attrubte_Widget.Cam_Output_Layout.setVisible(False)
def Save_Json_Fun(self,path=GUI_STATE_PATH):
GUI_STATE = {}
GUI_STATE['DOCK'] = self.DOCK
try:
with open(path,'w') as f:
json.dump(GUI_STATE,f,indent=4)
except:
if path != "":
QMessageBox.warning(self, u"Warning", u"保存失败")
def Load_Json_Fun(self,path=GUI_STATE_PATH,load=False):
if os.path.exists(path):
GUI_STATE = {}
with open(path,'r') as f:
GUI_STATE = json.load(f)
return True
else:
if load==True:
QMessageBox.warning(self, u"Warning", u"加载失败\n检查路径是否正确")
return False
def mousePressEvent(self,e):
for i,child in enumerate(self.Cam_Item_Widget.Item_Layout.children()):
if i != 0:
if child.geometry().contains(e.pos()):
child.setStyleSheet("Cam_Item_%s{{border:3px solid red}}" % i)
else:
child.setStyleSheet("Cam_Item_%s{{border:3px solid red}}"% i)
| [
"2595715768@qq.com"
] | 2595715768@qq.com |
c48b263b359fd78edb64adc043143431bdb69b80 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/e3ebbbfe7e234f848c86e6281082178b.py | 364fd3459a9c67d7731467dfdf0b50a1914242a2 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 210 | py |
def hey(string):
if not string.strip():
return 'Fine. Be that way!'
elif string.isupper():
return 'Whoa, chill out!'
elif string.endswith('?'):
return 'Sure.'
else:
return 'Whatever.'
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
8a6ba3996b46a72a73ca370b427eba807287b0eb | e11b2493f55c60685c3ea76f900be73e6a454b2f | /high_peformance_python/matrix & iterator/list_and_set.py | deda5e0f90c5c616b600d6de3b6e00c725d130dd | [] | no_license | zpoint/Reading-Exercises-Notes | 29e566dd86d97eadb84d7bb6f8f640b85486557c | 31b38fe927232ba8e6f6a0e7ab9c58026eefcffb | refs/heads/master | 2021-06-04T08:12:42.777309 | 2021-04-19T02:12:22 | 2021-04-19T02:12:22 | 70,507,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | def list_unique_names(phonebook):
unique_names = []
for name, phonenumber in phonebook:
firstname, lastname = name.split(" ", 1)
for unique in unique_names:
if unique == firstname:
break
else:
unique_names.append(firstname)
return len(unique_names)
def set_unique_names(phonebook):
unique_names = set()
for name, phonenumber in phonebook:
first_name, last_name = name.split(" ", 1)
unique_names.add(first_name)
return len(unique_names)
phonebook = [
("Joe Doe", "555-555-5555"),
("Albert Einstein", "212-555-5555"),
("John Murphey", "202-555-5555"),
("Albert Rutherford", "647-555-5555"),
("Elaine Bodian", "301-555-5555")
]
for i in range(10000):
if (i % 2 == 0):
phonebook.append(("Jo" + chr(i) + " Doe", "555-555-5555"))
else:
phonebook.append(("Elaine"+ chr(i) +" Bodian", "301-555-5555"))
print ("Number of unique name from set method", set_unique_names(phonebook))
print ("Number of unique names from list method", list_unique_names(phonebook))
#In [21]: %timeit list_unique_names(phonebook)
#1 loop, best of 3: 2.3 s per loop
#In [22]: %timeit set_unique_names(phonebook)
#100 loops, best of 3: 9.44 ms per loop | [
"zp0int@qq.com"
] | zp0int@qq.com |
005dab74220c894d396199899ee60aaacaae6ac3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_squished.py | 706288b75fe0e4e6999c43fefc2d7e58c3ec6189 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
from xai.brain.wordbase.verbs._squish import _SQUISH
#calss header
class _SQUISHED(_SQUISH, ):
def __init__(self,):
_SQUISH.__init__(self)
self.name = "SQUISHED"
self.specie = 'verbs'
self.basic = "squish"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.