blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dad949a14e690002447d02f8e29d60c18500099b | 508321d683975b2339e5292202f3b7a51bfbe22d | /Userset.vim/ftplugin/python/CompletePack/PySide2/QtGui/QVector3D.py | b8973e32292f5f6d1dd2b4cfec809fcb2ed4014c | [] | no_license | cundesi/vimSetSa | 4947d97bcfe89e27fd2727423112bb37aac402e2 | 0d3f9e5724b471ab21aa1199cc3b4676e30f8aab | refs/heads/master | 2020-03-28T05:54:44.721896 | 2018-08-31T07:23:41 | 2018-08-31T07:23:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,335 | py | # encoding: utf-8
# module PySide2.QtGui
# from C:\Program Files\Autodesk\Maya2017\Python\lib\site-packages\PySide2\QtGui.pyd
# by generator 1.145
# no doc
# imports
import PySide2.QtCore as __PySide2_QtCore
import Shiboken as __Shiboken
class QVector3D(__Shiboken.Object):
# no doc
def crossProduct(self, *args, **kwargs): # real signature unknown
pass
def distanceToLine(self, *args, **kwargs): # real signature unknown
pass
def distanceToPlane(self, *args, **kwargs): # real signature unknown
pass
def distanceToPoint(self, *args, **kwargs): # real signature unknown
pass
def dotProduct(self, *args, **kwargs): # real signature unknown
pass
def isNull(self, *args, **kwargs): # real signature unknown
pass
def length(self, *args, **kwargs): # real signature unknown
pass
def lengthSquared(self, *args, **kwargs): # real signature unknown
pass
def normal(self, *args, **kwargs): # real signature unknown
pass
def normalize(self, *args, **kwargs): # real signature unknown
pass
def normalized(self, *args, **kwargs): # real signature unknown
pass
def project(self, *args, **kwargs): # real signature unknown
pass
def setX(self, *args, **kwargs): # real signature unknown
pass
def setY(self, *args, **kwargs): # real signature unknown
pass
def setZ(self, *args, **kwargs): # real signature unknown
pass
def toPoint(self, *args, **kwargs): # real signature unknown
pass
def toPointF(self, *args, **kwargs): # real signature unknown
pass
def toTuple(self, *args, **kwargs): # real signature unknown
pass
def toVector2D(self, *args, **kwargs): # real signature unknown
pass
def toVector4D(self, *args, **kwargs): # real signature unknown
pass
def unproject(self, *args, **kwargs): # real signature unknown
pass
def x(self, *args, **kwargs): # real signature unknown
pass
def y(self, *args, **kwargs): # real signature unknown
pass
def z(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iadd__(self, y): # real signature unknown; restored from __doc__
""" x.__iadd__(y) <==> x+=y """
pass
def __idiv__(self, y): # real signature unknown; restored from __doc__
""" x.__idiv__(y) <==> x/=y """
pass
def __imul__(self, y): # real signature unknown; restored from __doc__
""" x.__imul__(y) <==> x*=y """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __isub__(self, y): # real signature unknown; restored from __doc__
""" x.__isub__(y) <==> x-=y """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lshift__(self, y): # real signature unknown; restored from __doc__
""" x.__lshift__(y) <==> x<<y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rlshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rlshift__(y) <==> y<<x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __rrshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rrshift__(y) <==> y>>x """
pass
def __rshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rshift__(y) <==> x>>y """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
| [
"noreply@github.com"
] | cundesi.noreply@github.com |
d06c3fe68020ef60919224da60cbd77b2a0d58c0 | 12b401d5be9f5a1e1f60eb607f1796771deae085 | /application_play_game.py | 78325623113f39f07959a06d1fdcf3e6d58bfc12 | [] | no_license | chandraprakashh/machine_learning_code | 805355125f66cd03005fbc6bb134aeebf8a46c6a | 64679785d0ac8e231fd0a2d5386519f7e93eea82 | refs/heads/master | 2020-07-20T23:21:00.157810 | 2020-01-13T10:28:36 | 2020-01-13T10:28:36 | 206,724,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 26 11:35:02 2019
@author: Administrator
"""
import tkinter
window = tkinter.Tk()
window.title("GUI")
def PrintOnClick():
tkinter.Label(window, text = "welcome").pack()
tkinter.Button(window, text = "click me", command = PrintOnClick).pack()
window.mainloop() | [
"noreply@github.com"
] | chandraprakashh.noreply@github.com |
2111ba7c1c2e5eef8696d286f56b85cb87a8ffd3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_fencers.py | 458ff6e91aa5b2b77af88010dcaf87cfc6f9bd2a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _FENCERS():
def __init__(self,):
self.name = "FENCERS"
self.definitions = fencer
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['fencer']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
920229c2be6c0e09a63d32d83e5be8973bb20dc8 | de4d88db6ea32d20020c169f734edd4b95c3092d | /aiotdlib/api/functions/process_push_notification.py | 339796d01d5927139d4d1b705af21fd64f6ce1e8 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | thiagosm/aiotdlib | 5cc790a5645f7e4cc61bbd0791433ed182d69062 | 4528fcfca7c5c69b54a878ce6ce60e934a2dcc73 | refs/heads/main | 2023-08-15T05:16:28.436803 | 2021-10-18T20:41:27 | 2021-10-18T20:41:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | # =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class ProcessPushNotification(BaseObject):
"""
Handles a push notification. Returns error with code 406 if the push notification is not supported and connection to the server is required to fetch new data. Can be called before authorization
:param payload: JSON-encoded push notification payload with all fields sent by the server, and "google.sent_time" and "google.notification.sound" fields added
:type payload: :class:`str`
"""
ID: str = Field("processPushNotification", alias="@type")
payload: str
@staticmethod
def read(q: dict) -> ProcessPushNotification:
return ProcessPushNotification.construct(**q)
| [
"pylakey@protonmail.com"
] | pylakey@protonmail.com |
1f361b50f9c5c862d5ed7da0bf89240bf1400f42 | 3d6b4aca5ef90dd65a2b40cf11fd8f84088777ab | /zounds/datasets/phatdrumloops.py | 55d1cfcad1b68dd4f92b6a20d307d0f4bb7c855e | [
"MIT"
] | permissive | maozhiqiang/zounds | c3015f1bb58b835b5f8e9106518348442f86b0fc | df633399e7acbcbfbf5576f2692ab20d0501642e | refs/heads/master | 2020-04-19T22:41:42.540537 | 2019-01-31T01:13:45 | 2019-01-31T01:13:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,453 | py | from zounds.soundfile import AudioMetaData
import requests
import re
import urlparse
class PhatDrumLoops(object):
"""
Produces an iterable of :class:`zounds.soundfile.AudioMetaData` instances
for every drum break from http://phatdrumloops.com/beats.php
Args:
attrs (dict): Extra properties to add to the :class:`AudioMetaData`
Examples
>>> from zounds import PhatDrumLoops
>>> pdl = PhatDrumLoops()
>>> iter(pdl).next()
{'description': None, 'tags': None, 'uri': <Request [GET]>, 'channels': None, 'licensing': None, 'samplerate': None}
See Also:
:class:`InternetArchive`
:class:`FreeSoundSearch`
:class:`zounds.soundfile.AudioMetaData`
"""
def __init__(self, **attrs):
super(PhatDrumLoops, self).__init__()
self.attrs = attrs
self.attrs.update(web_url='http://www.phatdrumloops.com/beats.php')
def __iter__(self):
resp = requests.get('http://phatdrumloops.com/beats.php')
pattern = re.compile('href="(?P<uri>/audio/wav/[^\.]+\.wav)"')
for m in pattern.finditer(resp.content):
url = urlparse.urljoin('http://phatdrumloops.com',
m.groupdict()['uri'])
request = requests.Request(
method='GET',
url=url,
headers={'Range': 'bytes=0-'})
yield AudioMetaData(uri=request, **self.attrs)
| [
"john.vinyard@gmail.com"
] | john.vinyard@gmail.com |
cbb8321e0fef6844c689fe05fcee4eaf4d7988e9 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_051/ch120_2020_09_27_20_54_37_267942.py | 265ee47db4543afc45e1caee2a01fdff39a7ce64 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | import random as rd
dinheiro=100
while dinheiro >0:
numero=rd.randint(1, 36)
if dinheiro == 0:
break
print ('voce tem', dinheiro, 'dinheiro')
valor=int(input('qunato quer apostar? '))
if valor == 0:
break
aposta=input('quer apostar em um numero ou em uma paridade? ')
if aposta == 'n':
casa=int(input('escolha um numero de 1 a 36: '))
if casa == numero:
dinheiro+=valor*35
else:
dinheiro-=valor
if aposta == 'p':
paridade=input('par ou impar: ')
if paridade == 'p':
if numero % 2 == 0:
dinheiro += valor
else:
dinheiro-=valor
else:
if numero % 2 != 0:
dinheiro+=valor
else:
dinheiro-=valor
if dinheiro == 0:
print ('acabou seu dinheiro')
else:
print ('Muito bem voce terminou com', dinheiro, 'dinheiro') | [
"you@example.com"
] | you@example.com |
e4dd4ae92970dcf7438e5e3e8e1d87c2c669e718 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02267/s160876142.py | cc65b83611da412d51a5be85eff336a19d9f1eec | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | N = int(input())
L = input().split()
n = int(input())
l = input().split()
dup = []
count = 0
for i in range(N):
for j in range(n):
if L[i] == l[j]:
dup.append(L[i])
break
dup = list(set(dup))
print(len(dup))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ebf8ee08be5d85beef798cd93010cce8cdfcb4f7 | 2dc50ddfb0a431a34867c8955a972e67870a2755 | /migrations/versions/4445080944ee_hidden_hosts_management.py | d853ad3d00f27794e4a672e9281631081a45b99b | [
"BSD-3-Clause"
] | permissive | ziirish/burp-ui | d5aec06adb516eb26f7180f8e9305e12de89156c | 2b8c6e09a4174f2ae3545fa048f59c55c4ae7dba | refs/heads/master | 2023-07-19T23:05:57.646158 | 2023-07-07T18:21:34 | 2023-07-07T18:21:34 | 20,400,152 | 98 | 18 | BSD-3-Clause | 2023-05-02T00:31:27 | 2014-06-02T10:23:40 | Python | UTF-8 | Python | false | false | 875 | py | """hidden hosts management
Revision ID: 4445080944ee
Revises: 695dcbd29d4f
Create Date: 2018-10-03 11:47:20.028686
"""
# revision identifiers, used by Alembic.
revision = "4445080944ee"
down_revision = "695dcbd29d4f"
import sqlalchemy as sa
from alembic import op
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"hidden",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("user", sa.String(length=256), nullable=False),
sa.Column("client", sa.String(length=4096), nullable=True),
sa.Column("server", sa.String(length=4096), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("hidden")
# ### end Alembic commands ###
| [
"ziirish@ziirish.info"
] | ziirish@ziirish.info |
74ca2b4a8a82aad877acfaf39298809830bd83a9 | d3e31f6b8da5c1a7310b543bbf2adc76091b5571 | /Day29/upload_file/upload_file/settings.py | dc5d94a6a1bf90cd9d6113c34d79017577181cc2 | [] | no_license | pytutorial/py2103 | 224a5a7133dbe03fc4f798408694bf664be10613 | adbd9eb5a32eb1d28b747dcfbe90ab8a3470e5de | refs/heads/main | 2023-07-14T06:31:18.918778 | 2021-08-12T14:29:16 | 2021-08-12T14:29:16 | 355,163,185 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,339 | py | """
Django settings for upload_file project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-m&_n*uu56)_z%@jaq$1*z&yhs9ht56x!hk$lbehfee4c$j-ti9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'upload_file.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'upload_file.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# New
import os
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
] | [
"duongthanhtungvn01@gmail.com"
] | duongthanhtungvn01@gmail.com |
506ad115684ba8ed96c7300b40bb7467ba9114d6 | 2198abd6e37195dbb64b46effa11c6fad1de3b4e | /PyQuantum/Tools/CSV.py | 218dbc3802b453fa193aca8202b04c513040678a | [
"MIT"
] | permissive | deyh2020/PyQuantum | 179b501bea74be54ccce547e77212c7e1f3cd206 | 78b09987cbfecf549e67b919bb5cb2046b21ad44 | refs/heads/master | 2022-03-24T08:11:50.950566 | 2020-01-05T02:07:59 | 2020-01-05T02:07:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | import pandas as pd
import csv
def list_to_csv(lst, filename):
df = pd.DataFrame(lst, columns=None)
df.to_csv(filename, index=None, header=False)
# def list_to_csv(lst, filename):
# df = pd.DataFrame(lst)
# df.to_csv(filename)
# def list_from_csv(filename):
# df = pd.read_csv(filename)
# lst = list(df.iloc[:, 1])
# return lst
def list_from_csv(filename):
rows = []
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
rows.append(row)
return rows
| [
"alexfmsu@mail.ru"
] | alexfmsu@mail.ru |
98ba07395607b0818853a04048c8dc3186048939 | 4e2117a4381f65e7f2bb2b06da800f40dc98fa12 | /165_RealtimeStereo/test_tflite.py | c9d58604e945ac4c6775f9f5e1b7419b476080c3 | [
"GPL-3.0-only",
"AGPL-3.0-only",
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | PINTO0309/PINTO_model_zoo | 84f995247afbeda2543b5424d5e0a14a70b8d1f1 | ff08e6e8ab095d98e96fc4a136ad5cbccc75fcf9 | refs/heads/main | 2023-09-04T05:27:31.040946 | 2023-08-31T23:24:30 | 2023-08-31T23:24:30 | 227,367,327 | 2,849 | 520 | MIT | 2023-08-31T23:24:31 | 2019-12-11T13:02:40 | Python | UTF-8 | Python | false | false | 2,792 | py | from tensorflow.lite.python.interpreter import Interpreter
import cv2
import numpy as np
import time
class RealtimeStereo():
def __init__(self, model_path):
self.model = self.load_model(model_path)
def load_model(self, model_path):
self.interpreter = Interpreter(model_path, num_threads=4)
self.interpreter.allocate_tensors()
self.input_details = self.interpreter.get_input_details()
input_shape = self.input_details[0]['shape']
self.input_height = input_shape[1]
self.input_width = input_shape[2]
self.channels = input_shape[2]
self.output_details = self.interpreter.get_output_details()
self.output_shape = self.output_details[0]['shape']
def preprocess(self, image):
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
img_input = cv2.resize(
img,
(self.input_width,self.input_height)
).astype(np.float32)
# Scale input pixel values to -1 to 1
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
img_input = ((img_input/ 255.0 - mean) / std)
# img_input = img_input.transpose(2, 0, 1)
img_input = img_input[np.newaxis,:,:,:]
return img_input.astype(np.float32)
def run(self, left, right):
input_left = self.preprocess(left)
input_right = self.preprocess(right)
self.interpreter.set_tensor(self.input_details[0]['index'], input_left)
self.interpreter.set_tensor(self.input_details[1]['index'], input_right)
self.interpreter.invoke()
disparity = self.interpreter.get_tensor(self.output_details[0]['index'])
return np.squeeze(disparity)
if __name__ == '__main__':
# model_path = 'saved_model/model_float32.tflite'
model_path = 'saved_model/model_float16_quant.tflite'
# model_path = 'saved_model/model_dynamic_range_quant.tflite'
realtimeStereo = RealtimeStereo(model_path)
img_left = cv2.imread('im0.png')
img_right = cv2.imread('im1.png')
start = time.time()
disp = realtimeStereo.run(img_left, img_right)
disp = cv2.resize(
disp,
(img_left.shape[1], img_left.shape[0]),
interpolation=cv2.INTER_LINEAR
).astype(np.float32)
img = (disp*256).astype('uint16')
cv2.imshow('disp', img)
d_min = np.min(disp)
d_max = np.max(disp)
depth_map = (disp - d_min) / (d_max - d_min)
depth_map = depth_map * 255.0
depth_map = np.asarray(depth_map, dtype="uint8")
depth_map = cv2.applyColorMap(depth_map, cv2.COLORMAP_JET)
end = time.time()
eslapse = end - start
print("depthmap : {}s".format(eslapse))
cv2.imwrite('result.jpg', depth_map)
cv2.imshow('output', depth_map)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"rmsdh122@yahoo.co.jp"
] | rmsdh122@yahoo.co.jp |
24e1af6e4014ee573951e1bbb70250d99347fcd8 | e89f44632effe9ba82b940c7721cad19a32b8a94 | /text2shorthand/shorthand/svsd/a.py | bf377f0363fe3f97c48628ff5aeb67c2e7de1eef | [] | no_license | Wyess/text2shorthand | 3bcdb708f1d7eeb17f9ae3181c4dd70c65c8986e | 5ba361c716178fc3b7e68ab1ae724a57cf3a5d0b | refs/heads/master | 2020-05-17T14:52:11.369058 | 2019-08-20T12:50:00 | 2019-08-20T12:50:00 | 183,776,467 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | from ..svsd.char import SvsdChar
from text2shorthand.common.point import Point as P, PPoint as PP
import pyx
from pyx.metapost.path import (
beginknot,
knot,
endknot,
smoothknot,
tensioncurve,
controlcurve,
curve)
class CharA(SvsdChar):
def __init__(self, name='a', kana='あ',
model='NE10', head_type='NE', tail_type='NE'):
super().__init__(name, kana, model, head_type, tail_type)
self.head_ligature = {}
self.tail_ligature = {'NE'}
@classmethod
def path_NE(cls, ta=None, **kwargs):
return pyx.path.line(0, 0, *PP(10, 30))
@classmethod
def path_NEe(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEer(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEel(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEne(cls, ta=None, **kwargs):
return cls.jog(cls.path_NE())
@classmethod
def path_NEner(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEnel(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEs(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEsl(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEsr(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEse(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEser(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEsel(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEsw(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEswr(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEswl(cls, ta=None, **kwargs):
pass
| [
"diyhacker@mail.goo.ne.jp"
] | diyhacker@mail.goo.ne.jp |
c162fe99b26ec56068d61a2d04055ad5804cbeaf | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /vYYfFAAfjoc8crCqu_5.py | c815b8937d476932feaa5ae13066bc8fc49dac5e | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py |
def tree(h):
ans = []
row_len = 2*h-1
for i in range(1, row_len+1, 2):
n = (row_len - i)//2
row = ' '*n + '#'*i + ' '*n
ans.append(row)
return ans
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
a4a300afe9003b66876d7c1ee2857fec4542e32e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/4/usersdata/145/3314/submittedfiles/swamee.py | 9de554dec9e9faab31dbced293cb3abc50b8bb40 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
#entrada
f=input('digite um valor de f:')
L=input('digite um valor de L:')
Q=input('digite um valor de Q:')
deltaH=input('digite um valor de deltaH:')
V=input('digite um valor de v:')
#processamento
g=9,81
e=0,000002
D=(8*f*L*(Q**2)/((math.pi**2)*g*deltaH))**0.2
Rey=(4*Q)/(math.pi*D*V))
k=0.25/(math.log10((e/(3.7*D))+(5.74/(Rey**0.9))))**2
print('D=%.4f'%D)
print('Rey=%.4f'%Rey)
print('k=%.4f'%k) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
1ee2b14448aace3b16487246053c468adc039ba6 | 78f1cc341cd6313d02b34d910eec4e9b2745506a | /p02_personal_summary/p13_lee_yung_seong/p03_week/p02_thursday/C5.11.py | 6bb587db4fe5e5cc69b337b6bfe6754cab7e0d67 | [] | no_license | python-cookbook/PythonStudy | a4855621d52eae77537bffb01aae7834a0656392 | cdca17e9734479c760bef188dcb0e183edf8564a | refs/heads/master | 2021-01-20T17:23:35.823875 | 2017-07-30T10:56:13 | 2017-07-30T10:56:13 | 90,873,920 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | #경로 다루기
#문제
#기본 파일 이름, 디렉터리 이름, 절대 경로 등을 찾기 위해 경로를 다루어야 한다.
##해결
#경로를 다루기 위해서 os.path 모듈의 함수를 사용한다. 몇몇 기능을 예제를 통해 살펴보자.
import os
path = '/users/beazley/Data/data.csv'
os.path.basename(path)#경로의 마지막 부분
#디렉터리 이름
os.path.dirname(path)
#합치기
os.path.join('tmp','data',os.path.basename(path))
#사용자의 홈 디렉토리 펼치기
path = '~/Data/data.csv'
os.path.expanduser(path)
#파일 확장자 나누기
os.path.splitext(path)
#토론
#파일 이름을 다루기 위해서 문자열에 관련된 코드를 직접 작성하지 말고 os.path 모듈을 사용해야 한다. 이는 이식성과도 어느 정도 관련이 있다.
#os path 모듈은 unix와 윈도우의 차이점을 알고 자동으로 처리한다. | [
"acegauss@naver.com"
] | acegauss@naver.com |
aea80bf4ca2d9742e5bb0dc0e1f750e6e39a75b0 | 78da694dc955639c5a9f64e2d83acee4d13fd931 | /socialadmin/admin.py | bda4a33cb121ea5517b5a0e1bdd106a82d435037 | [] | no_license | toluwanicareer/kemibox | f255e73f71c824e780d528e47f37ec7ebca35f60 | 641808e70545826c536ed4062276b129414c2c04 | refs/heads/master | 2020-03-12T17:04:55.299158 | 2018-04-24T16:12:52 | 2018-04-24T16:12:52 | 130,729,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import KemiBox, Post
# Register your models here.
admin.site.register(KemiBox )
admin.site.register(Post) | [
"abiodun.toluwanii@gmail.com"
] | abiodun.toluwanii@gmail.com |
52acd72b04938eb85d82c661482aa54387aac381 | d65128e38be0243f279e0d72ef85e7d3c5e116ca | /base/site-packages/django/bin/daily_cleanup.py | 5a2ce210f403ce4de577568cf3de95ebcfb94d42 | [
"Apache-2.0"
] | permissive | ZxwZero/fastor | 19bfc568f9a68f1447c2e049428330ade02d451d | dd9e299e250362802032d1984801bed249e36d8d | refs/heads/master | 2021-06-26T06:40:38.555211 | 2021-06-09T02:05:38 | 2021-06-09T02:05:38 | 229,753,500 | 1 | 1 | Apache-2.0 | 2019-12-23T12:59:25 | 2019-12-23T12:59:24 | null | UTF-8 | Python | false | false | 441 | py | #!/usr/bin/env python
"""
Daily cleanup job.
Can be run as a cronjob to clean out old data from the database (only expired
sessions at the moment).
"""
import warnings
from django.core import management
if __name__ == "__main__":
warnings.warn(
"The `daily_cleanup` script has been deprecated "
"in favor of `django-admin.py clearsessions`.",
DeprecationWarning)
management.call_command('clearsessions')
| [
"edisonlz@163.com"
] | edisonlz@163.com |
ad9951dd21e5e5802f28c37bba8d655b7c3d1314 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2/nallo/problemB.py | c7cee17f3977f567c41df66e67f27d3beef12abb | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,447 | py | """
se inizio con -
cerca il primo +
se trovo il +
cambia tutti i - in + fino al + trovato compreso
mosse++
se non trovo il +
sol = mosse + 1
se inizio con +
cerca il primo -
se trovo il -
cambia tutti i + in - fino al - trovato compreso
mosse++
se non trovo il meno
sol = mosse
"""
def solve(test_case):
s = raw_input()
moves = 0
# print "Start: " + s
while 1:
if s[0]=='-':
plus_index = s.find("+")
if plus_index!=-1:
# cambia tutti i - in + fino al + trovato compreso
replacing_str = plus_index * "+"
s = replacing_str + s[plus_index:]
# print "Debug: " + s
moves += 1
else:
print "Case #" + str(test_case) + ": " + str(moves+1)
return
else:
minus_index = s.find("-")
if minus_index!=-1:
# cambia tutti i + in - fino al - trovato compreso
replacing_str = minus_index * "-"
s = replacing_str + s[minus_index:]
# print "Debug: " + s
moves += 1
else:
print "Case #" + str(test_case) + ": " + str(moves)
return
def main():
t = int(raw_input())
for i in xrange(t):
solve(i+1)
if __name__=="__main__":
main()
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
0eb8edd55910aa04bea4ace48cfb3159cb268fc7 | 26f6313772161851b3b28b32a4f8d255499b3974 | /Python/SequentialDigits.py | 426eff307728f7e423f3c99f70c5007a2d736e4c | [] | no_license | here0009/LeetCode | 693e634a3096d929e5c842c5c5b989fa388e0fcd | f96a2273c6831a8035e1adacfa452f73c599ae16 | refs/heads/master | 2023-06-30T19:07:23.645941 | 2021-07-31T03:38:51 | 2021-07-31T03:38:51 | 266,287,834 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | """
An integer has sequential digits if and only if each digit in the number is one more than the previous digit.
Return a sorted list of all the integers in the range [low, high] inclusive that have sequential digits.
Example 1:
Input: low = 100, high = 300
Output: [123,234]
Example 2:
Input: low = 1000, high = 13000
Output: [1234,2345,3456,4567,5678,6789,12345]
Constraints:
10 <= low <= high <= 10^9
"""
class Solution:
def sequentialDigits(self, low: int, high: int):
str_low = str(low)
str_high = str(high)
res = []
digits = len(str_low)
digits_high = len(str_high) + 1
start_num = 0
while digits < digits_high:
for start in range(1,10):
if start + digits <= 10:
num = int(''.join([str(i) for i in range(start, start+digits)]))
if num >= low and num <= high:
res.append(num)
else:
break
digits += 1
return res
s = Solution()
low = 100
high = 300
print(s.sequentialDigits(low, high))
low = 1000
high = 13000
print(s.sequentialDigits(low, high))
| [
"here0009@163.com"
] | here0009@163.com |
424526fe4c6b2ac3f5a89f36e3f865f5c458509b | 9d2bafb07baf657c447d09a6bc5a6e551ba1806d | /ros2_ws/build/ros2srv/build/lib/ros2srv/api/__init__.py | c1fba609b68266f574397fa8a54e3448c92a64c2 | [] | no_license | weidafan/ros2_dds | f65c4352899a72e1ade662b4106e822d80a99403 | c0d9e6ff97cb7cc822fe25a62c0b1d56f7d12c59 | refs/heads/master | 2021-09-05T20:47:49.088161 | 2018-01-30T21:03:59 | 2018-01-30T21:03:59 | 119,592,597 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,818 | py | # Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python import get_resource
from ament_index_python import get_resources
from ament_index_python import has_resource
def get_all_service_types():
all_service_types = {}
for package_name in get_resources('rosidl_interfaces'):
service_types = get_service_types(package_name)
if service_types:
all_service_types[package_name] = service_types
return all_service_types
def get_service_types(package_name):
if not has_resource('packages', package_name):
raise LookupError('Unknown package name')
try:
content, _ = get_resource('rosidl_interfaces', package_name)
except LookupError:
return []
interface_names = content.splitlines()
# TODO(dirk-thomas) this logic should come from a rosidl related package
return [n[:-4] for n in interface_names if n.endswith('.srv')]
def get_service_path(package_name, service_name):
service_types = get_service_types(package_name)
if service_name not in service_types:
raise LookupError('Unknown service name')
prefix_path = has_resource('packages', package_name)
# TODO(dirk-thomas) this logic should come from a rosidl related package
return os.path.join(
prefix_path, 'share', package_name, 'srv', service_name + '.srv')
def service_package_name_completer(**kwargs):
"""Callable returning a list of package names which contain services."""
return get_all_service_types().keys()
def service_type_completer(**kwargs):
"""Callable returning a list of service types."""
service_types = []
for package_name, service_names in get_all_service_types().items():
for service_name in service_names:
service_types.append(
'{package_name}/{service_name}'.format_map(locals()))
return service_types
class ServiceNameCompleter(object):
"""Callable returning a list of service names within a package."""
def __init__(self, *, package_name_key=None):
self.package_name_key = package_name_key
def __call__(self, prefix, parsed_args, **kwargs):
package_name = getattr(parsed_args, self.package_name_key)
return get_service_types(package_name)
| [
"austin.tisdale.15@cnu.edu"
] | austin.tisdale.15@cnu.edu |
39490f513add0e0a5c8110a317483eff540be3e1 | e84e699767444315ac2096b3ece1659ba2873ae3 | /radio/templatetags/show_stars.py | bd6749ed39e9404bd4b450a2938de695e41ca7a1 | [
"BSD-3-Clause"
] | permissive | ftrain/django-ftrain | 1e6ac41211dba5e69eabf1a4a85c2aec0c048959 | af535fda8e113e9dcdac31216852e35a01d3b950 | refs/heads/master | 2021-01-21T01:46:53.957091 | 2009-12-28T15:31:26 | 2009-12-28T15:31:26 | 259,071 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,808 | py | import math
from django.template import Library, Node, TemplateSyntaxError, VariableDoesNotExist, resolve_variable
from django.conf import settings
register = Library()
IMG_TEMPLATE = '<img src="%s" alt="%s"/>'
PATH_TO_WHOLE_STAR = IMG_TEMPLATE % (settings.MEDIA_URL + 'e/stars/star.png', "Whole Star")
PATH_TO_THREE_QUARTER_STAR = IMG_TEMPLATE % (settings.MEDIA_URL + 'e/stars/three-quarter.png', "3/4 Star")
PATH_TO_HALF_STAR = IMG_TEMPLATE % (settings.MEDIA_URL + 'e/stars/half.png', "1/2 Star")
PATH_TO_QUARTER_STAR = IMG_TEMPLATE % (settings.MEDIA_URL + 'e/stars/quarter.png', "1/4 Star")
PATH_TO_BLANK_STAR = IMG_TEMPLATE % (settings.MEDIA_URL + 'e/stars/blank.png', "Empty Star")
class ShowStarsNode(Node):
""" Default rounding is to the whole unit """
def __init__(self, context_var, total_stars, round_to):
self.context_var = context_var
self.total_stars = int(total_stars)
self.round_to = round_to.lower()
def render(self, context):
try:
stars = resolve_variable(self.context_var, context)
except VariableDoesNotExist:
return ''
if self.round_to == "half":
stars = round(stars*2)/2
elif self.round_to == "quarter":
stars = round(stars*4)/4
else:
stars = round(stars)
fraction, integer = math.modf(stars)
integer = int(integer)
output = []
for whole_star in range(integer):
output.append(PATH_TO_WHOLE_STAR)
if self.round_to == 'half' and fraction == .5:
output.append(PATH_TO_HALF_STAR)
elif self.round_to == 'quarter':
if fraction == .25:
output.append(PATH_TO_QUARTER_STAR)
elif fraction == .5:
output.append(PATH_TO_HALF_STAR)
elif fraction == .75:
output.append(PATH_TO_THREE_QUARTER_STAR)
if fraction:
integer += 1
blanks = int(self.total_stars - integer)
for blank_star in range(blanks):
output.append(PATH_TO_BLANK_STAR)
return "".join(output)
""" show_stars context_var of 5 round to half """
def do_show_stars(parser, token):
args = token.contents.split()
if len(args) != 7:
raise TemplateSyntaxError('%s tag requires exactly six arguments' % args[0])
if args[2] != 'of':
raise TemplateSyntaxError("second argument to '%s' tag must be 'of'" % args[0])
if args[4] != 'round':
raise TemplateSyntaxError("fourth argument to '%s' tag must be 'round'" % args[0])
if args[5] != 'to':
raise TemplateSyntaxError("fourth argument to '%s' tag must be 'to'" % args[0])
return ShowStarsNode(args[1], args[3], args[6])
register.tag('show_stars', do_show_stars)
| [
"ford@ftrain.com"
] | ford@ftrain.com |
a789a909f89e7fc0ec0c3ce4cbafd1e65fa6a22e | 113ef54e42a047e9e631b557012411ecfac72c47 | /siphon/web/apps/submissions/migrations/0006_auto_20160224_0639.py | 8a5b27e311ba3669f3c194106114013581a4ae54 | [
"MIT"
] | permissive | siphoncode/siphon-web | 77bd241d5f3912ee78155c2b71b75fb59e1b5e27 | c398427dc1b73f70b94cd2f60a13e4d26c71610e | refs/heads/master | 2021-01-19T21:48:15.253451 | 2016-08-04T16:53:18 | 2016-08-04T16:53:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-24 06:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submissions', '0005_auto_20160224_0629'),
]
operations = [
migrations.AlterField(
model_name='submission',
name='display_name',
field=models.CharField(max_length=32),
),
]
| [
"james.potter@gmail.com"
] | james.potter@gmail.com |
81876856328d10a73c63fd33479e2d04f4a2bc80 | 58cf4e5a576b2baf7755ae19d410bf8afc2f3709 | /leetcode-solutions/P1498.Number_of_subsequences_satisfy_sum.py | d1a47cb9eb60a1fc9beae07105be0146554ee857 | [] | no_license | srihariprasad-r/leet-code | 78284beac34a4d84dde9f8cd36503496b618fdf7 | fc4f1455bafd1496eb5469a509be8638b75155c1 | refs/heads/master | 2023-08-16T20:24:09.474931 | 2023-08-07T14:23:36 | 2023-08-07T14:23:36 | 231,920,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | class Solution(object):
def numSubseq(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
mod = 10**9+7
n = len(nums)
l = ans = 0
r = n - 1
while l <= r:
if nums[l] + nums[r] <= target:
ans += pow(2, r - l, mod)
l += 1
else:
r -= 1
return ans % mod
| [
"59530606+srihariprasad-r@users.noreply.github.com"
] | 59530606+srihariprasad-r@users.noreply.github.com |
9f95a27bc89138cd60a4c119c7e5cb81011fd2cc | 17b0f34996ed63d14fb3ae2b09e3de866740a27e | /website/settings.py | a0a6ce9dc331e64a2e3ffe1d08f441cb34c00567 | [
"MIT"
] | permissive | Alexmhack/django_blog | 6ca47d4d294c1886eaa19ee866395144f1d2bce7 | ffb08983ca0aff2fca15961c64168bbb8d55311b | refs/heads/master | 2020-03-28T01:55:32.939171 | 2018-09-13T13:05:55 | 2018-09-13T13:05:55 | 147,532,712 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,335 | py | """
Django settings for website project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1g61cdt=p!#bl-4r&8aooqx72aiang6+=)_3#bva)4yd0a@6^f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'posts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static_cdn')
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'media_cdn')
| [
"alexmohack@gmail.com"
] | alexmohack@gmail.com |
35416f386997d9306cdc80e0b9d426b66dd86c93 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/common/Lib/distutils/tests/test_install.py | 992b281d3dd630b90577a87c6e7387582f4872a4 | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 7,714 | py | # 2017.02.03 21:58:16 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/distutils/tests/test_install.py
"""Tests for distutils.command.install."""
import os
import sys
import unittest
import site
from test.test_support import captured_stdout, run_unittest
from distutils import sysconfig
from distutils.command.install import install
from distutils.command import install as install_module
from distutils.command.build_ext import build_ext
from distutils.command.install import INSTALL_SCHEMES
from distutils.core import Distribution
from distutils.errors import DistutilsOptionError
from distutils.extension import Extension
from distutils.tests import support
def _make_ext_name(modname):
if os.name == 'nt' and sys.executable.endswith('_d.exe'):
modname += '_d'
return modname + sysconfig.get_config_var('SO')
class InstallTestCase(support.TempdirManager, support.LoggingSilencer, unittest.TestCase):
def test_home_installation_scheme(self):
builddir = self.mkdtemp()
destination = os.path.join(builddir, 'installation')
dist = Distribution({'name': 'foopkg'})
dist.script_name = os.path.join(builddir, 'setup.py')
dist.command_obj['build'] = support.DummyCommand(build_base=builddir, build_lib=os.path.join(builddir, 'lib'))
cmd = install(dist)
cmd.home = destination
cmd.ensure_finalized()
self.assertEqual(cmd.install_base, destination)
self.assertEqual(cmd.install_platbase, destination)
def check_path(got, expected):
got = os.path.normpath(got)
expected = os.path.normpath(expected)
self.assertEqual(got, expected)
libdir = os.path.join(destination, 'lib', 'python')
check_path(cmd.install_lib, libdir)
check_path(cmd.install_platlib, libdir)
check_path(cmd.install_purelib, libdir)
check_path(cmd.install_headers, os.path.join(destination, 'include', 'python', 'foopkg'))
check_path(cmd.install_scripts, os.path.join(destination, 'bin'))
check_path(cmd.install_data, destination)
@unittest.skipIf(sys.version < '2.6', 'site.USER_SITE was introduced in 2.6')
def test_user_site(self):
self.old_user_base = site.USER_BASE
self.old_user_site = site.USER_SITE
self.tmpdir = self.mkdtemp()
self.user_base = os.path.join(self.tmpdir, 'B')
self.user_site = os.path.join(self.tmpdir, 'S')
site.USER_BASE = self.user_base
site.USER_SITE = self.user_site
install_module.USER_BASE = self.user_base
install_module.USER_SITE = self.user_site
def _expanduser(path):
return self.tmpdir
self.old_expand = os.path.expanduser
os.path.expanduser = _expanduser
def cleanup():
site.USER_BASE = self.old_user_base
site.USER_SITE = self.old_user_site
install_module.USER_BASE = self.old_user_base
install_module.USER_SITE = self.old_user_site
os.path.expanduser = self.old_expand
self.addCleanup(cleanup)
for key in ('nt_user', 'unix_user', 'os2_home'):
self.assertIn(key, INSTALL_SCHEMES)
dist = Distribution({'name': 'xx'})
cmd = install(dist)
options = [ name for name, short, lable in cmd.user_options ]
self.assertIn('user', options)
cmd.user = 1
self.assertFalse(os.path.exists(self.user_base))
self.assertFalse(os.path.exists(self.user_site))
cmd.ensure_finalized()
self.assertTrue(os.path.exists(self.user_base))
self.assertTrue(os.path.exists(self.user_site))
self.assertIn('userbase', cmd.config_vars)
self.assertIn('usersite', cmd.config_vars)
def test_handle_extra_path(self):
dist = Distribution({'name': 'xx',
'extra_path': 'path,dirs'})
cmd = install(dist)
cmd.handle_extra_path()
self.assertEqual(cmd.extra_path, ['path', 'dirs'])
self.assertEqual(cmd.extra_dirs, 'dirs')
self.assertEqual(cmd.path_file, 'path')
cmd.extra_path = ['path']
cmd.handle_extra_path()
self.assertEqual(cmd.extra_path, ['path'])
self.assertEqual(cmd.extra_dirs, 'path')
self.assertEqual(cmd.path_file, 'path')
dist.extra_path = cmd.extra_path = None
cmd.handle_extra_path()
self.assertEqual(cmd.extra_path, None)
self.assertEqual(cmd.extra_dirs, '')
self.assertEqual(cmd.path_file, None)
cmd.extra_path = 'path,dirs,again'
self.assertRaises(DistutilsOptionError, cmd.handle_extra_path)
return
def test_finalize_options(self):
dist = Distribution({'name': 'xx'})
cmd = install(dist)
cmd.prefix = 'prefix'
cmd.install_base = 'base'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
cmd.install_base = None
cmd.home = 'home'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
cmd.prefix = None
cmd.user = 'user'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
return
def test_record(self):
install_dir = self.mkdtemp()
project_dir, dist = self.create_dist(py_modules=['hello'], scripts=['sayhi'])
os.chdir(project_dir)
self.write_file('hello.py', "def main(): print 'o hai'")
self.write_file('sayhi', 'from hello import main; main()')
cmd = install(dist)
dist.command_obj['install'] = cmd
cmd.root = install_dir
cmd.record = os.path.join(project_dir, 'filelist')
cmd.ensure_finalized()
cmd.run()
f = open(cmd.record)
try:
content = f.read()
finally:
f.close()
found = [ os.path.basename(line) for line in content.splitlines() ]
expected = ['hello.py',
'hello.pyc',
'sayhi',
'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]]
self.assertEqual(found, expected)
def test_record_extensions(self):
install_dir = self.mkdtemp()
project_dir, dist = self.create_dist(ext_modules=[Extension('xx', ['xxmodule.c'])])
os.chdir(project_dir)
support.copy_xxmodule_c(project_dir)
buildextcmd = build_ext(dist)
support.fixup_build_ext(buildextcmd)
buildextcmd.ensure_finalized()
cmd = install(dist)
dist.command_obj['install'] = cmd
dist.command_obj['build_ext'] = buildextcmd
cmd.root = install_dir
cmd.record = os.path.join(project_dir, 'filelist')
cmd.ensure_finalized()
cmd.run()
f = open(cmd.record)
try:
content = f.read()
finally:
f.close()
found = [ os.path.basename(line) for line in content.splitlines() ]
expected = [_make_ext_name('xx'), 'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]]
self.assertEqual(found, expected)
def test_debug_mode(self):
old_logs_len = len(self.logs)
install_module.DEBUG = True
try:
with captured_stdout():
self.test_record()
finally:
install_module.DEBUG = False
self.assertGreater(len(self.logs), old_logs_len)
def test_suite():
return unittest.makeSuite(InstallTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\common\Lib\distutils\tests\test_install.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:58:16 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
4a1b97ecf31fc70c5c028327969c8e09b80a2cae | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/3070/205003070.py | 3456808fc041576fb61f383fc7f6c51e56317478 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,791 | py | from bots.botsconfig import *
from records003070 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'MN',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'TRN', MIN: 0, MAX: 1},
{ID: 'NM1', MIN: 1, MAX: 5, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 2},
{ID: 'REF', MIN: 0, MAX: 4},
]},
{ID: 'MNC', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'SOM', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 20},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'INT', MIN: 0, MAX: 2},
{ID: 'PCT', MIN: 0, MAX: 10},
{ID: 'AMT', MIN: 0, MAX: 10},
{ID: 'QTY', MIN: 0, MAX: 6},
{ID: 'YNQ', MIN: 0, MAX: 12},
{ID: 'III', MIN: 0, MAX: 12},
{ID: 'CDI', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LX', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'VDI', MIN: 0, MAX: 99999},
{ID: 'YNQ', MIN: 0, MAX: 4},
{ID: 'AMT', MIN: 0, MAX: 6},
{ID: 'PCT', MIN: 0, MAX: 6},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'III', MIN: 0, MAX: 12},
]},
]},
{ID: 'IN1', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'IN2', MIN: 0, MAX: 10},
{ID: 'PER', MIN: 0, MAX: 2},
{ID: 'REF', MIN: 0, MAX: 15},
{ID: 'NX1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'NX2', MIN: 0, MAX: 10},
]},
]},
]},
{ID: 'CTT', MIN: 0, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
2111217e5d242a6101b463046e8af164ea271f71 | 4e879e994720100a9354895af2bb9be33b38a42b | /cConsolidation/c_OOP/cm_ClassMethods/110_ClassMethods_002_factoryMethods_example_002.py | 396e108d6539a3d77f23d3f9c9396635d68ffe85 | [] | no_license | pepitogrilho/learning_python | 80314ec97091238ed5cc3ed47422d2e6073a3280 | bbdc78a9a0513c13d991701859bcfe7a8e614a49 | refs/heads/master | 2023-04-09T15:07:08.866721 | 2023-04-02T18:45:47 | 2023-04-02T18:45:47 | 230,527,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | # -*- coding: utf-8 -*-
"""
- Instance Methods receive the OBJECT as argument
- Class Methods receive the CLASS as argument
- Static Methods don't receive neither the OBJECT, nor the CLASS
Class Methods are called by a class, which is passed to the ...
.... cls parameter
A common use of these are factory
"""
class Vector2D:
def __init__(self, x, y):
if x > 0:
self.x = x
else:
self.x = 0
if y > 0:
self.y = y
else:
self.y = 0
def area(self):
return self.x * self.y
@classmethod
def newVector2D(cls,coord):
return cls(coord, coord)
v01 = Vector2D.newVector2D(4)
print(v01.area()) | [
"pepitogrilho@gmail.com"
] | pepitogrilho@gmail.com |
a5b47c634aa82aa560d7c99745cdfd77c7fce511 | 400e10dfd9e21ae5092f5184753faa91a9df9277 | /python_structure/객체 미리연산.py | 1808df21a4e0c094ebbb3047219aac426601b9d2 | [] | no_license | MyaGya/Python_Practice | d4aff327a76010603f038bcf4491a14ea51de304 | 0391d1aa6d530f53715c968e5ea6a02cf745fde5 | refs/heads/master | 2023-06-14T07:48:19.998573 | 2021-07-12T05:50:52 | 2021-07-12T05:50:52 | 280,452,168 | 0 | 0 | null | 2021-06-23T13:37:56 | 2020-07-17T14:56:24 | Python | UTF-8 | Python | false | false | 1,205 | py | import time
start = time.time()
print("배열을 만드는 데 걸리는 시간 : ", end=" ")
data = [i for i in range(100000)]
end = time.time()
print(end - start)
start = time.time()
print("배열을 출력하는 데 걸리는 시간 : ", end=" ")
print(data)
end = time.time()
print(end - start)
# map 연산을 걸리는 시간
start = time.time()
print("map 연산 시간 : ", end=" ")
data2 = map(str,data)
end = time.time()
print(data2)
print(end - start)
# 그렇다면 객체 데이터 내부를 모두 바꾸어도 map 연산에는 문제가 없을까?
start = time.time()
print("리스트 값을 일부분 변경시키는 데 걸리는 시간 : ", end=" ")
for i in range(10000):
data[i] = "XXX"
end = time.time()
print(end - start)
start = time.time()
print("원본 데이터를 변경한 후 값을 출력하는 데 걸리는 시간 : ", end=" ")
print(list(data2))
end = time.time()
print(end - start)
# 이로써 원본 데이터가 바뀌면 map리스트가 가리키고 있는 명령어는 바뀐 명령어로 실행됨을 알 수 있다.
# 따라서 따로 원본 데이터의 객체를 저장하는 것이 아닌 포인터 방식으로 가리키고 있음을 알 수 있다.
| [
"38939015+MyaGya@users.noreply.github.com"
] | 38939015+MyaGya@users.noreply.github.com |
aca0869423c75d58481d19ce9e4a5eaed6b7a4dc | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_envy.py | 39953a52249af41d6c5335f74ba7d1b427f27fad | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py |
#calss header
class _ENVY():
def __init__(self,):
self.name = "ENVY"
self.definitions = [u'to wish that you had something that another person has: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
9e2cfe03c71b87d255becb0e3bfe46b09662220c | 5b5a49643c75aa43d5a876608383bc825ae1e147 | /tests/lists/p126_test.py | 9ab97710827b437465cca8b9f65d6457d8b608fb | [] | no_license | rscai/python99 | 281d00473c0dc977f58ba7511c5bcb6f38275771 | 3fa0cb7683ec8223259410fb6ea2967e3d0e6f61 | refs/heads/master | 2020-04-12T09:08:49.500799 | 2019-10-06T07:47:17 | 2019-10-06T07:47:17 | 162,393,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | from python99.lists.p126 import combination
import itertools
def test_combination():
l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
k = 2
actual = combination(l, k)
assert actual == [list(e) for e in itertools.combinations(l, k)]
| [
"ray.s.cai@icloud.com"
] | ray.s.cai@icloud.com |
10dcd966e82a61e2e0650c4a7fd206471ed57224 | 90cea58e80309d2dff88f73f3a43ed5f943ff97d | /DotProduct.py | 16c5d89db0ce1644b33b46100a69c608f8ce680f | [] | no_license | SaiSujithReddy/CodePython | 0b65c82b0e71dba2bbd4c1aefec4e6cd6fd42341 | 4c05b7909092009afffa4536fd284060d20e462d | refs/heads/master | 2022-02-24T09:21:15.284745 | 2019-10-07T23:36:17 | 2019-10-07T23:36:17 | 106,611,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | #3 3 1 4 4 2 5 3 1 7 2 6 5 1
def dot_product(list_user_input):
print("inside dot product")
first_vector_length = list_user_input[0]
second_vector_length = list_user_input[1]
print("length of first vector is ",first_vector_length)
if first_vector_length != second_vector_length:
print("Incorrect input of vector lengths")
return
result = 0
first_vector = list_user_input[2:2+(first_vector_length*2)]
second_vector = list_user_input[2+(first_vector_length * 2):]
if len(first_vector) != len(second_vector):
print("Incorrect input of vectors")
return
print("elements of first vector",first_vector)
print("elements of second vector",second_vector)
# for x in range(len(first_vector)):
# for y in range(len(second_vector)):
x=0
y=0
while x < len(first_vector) and y < len(second_vector):
print("x,y are", x,y)
if first_vector[x] == second_vector[y]:
result += first_vector[x+1]*second_vector[y+1]
x += 2
y += 2
print("values after incrementing by 2, x, y",x,y)
elif first_vector[x] < second_vector[y]:
x += 2
else:
y += 2
print(result)
try:
user_input = input('Enter your input: ')
print(user_input)
except ValueError:
print("Error in input")
list_user_input = list(user_input.replace(" ",""))
list_user_input_integer = [int(i) for i in list_user_input]
#find dot product
dot_product(list_user_input_integer) | [
"sai.marapareddy@gmail.com"
] | sai.marapareddy@gmail.com |
c96653a68b279bac906eb7d31a76f6cc76b3e85d | 184310f55b58e854dc3b6c58599ef99bc4c95739 | /hujian_api/API_service/Templight/Official_02.py | 8fc915dc8e1d6048088919cf706caaf6d96527ac | [] | no_license | tanjijun1/Python_API | c8585821a627c399fea1ab31bb024be6b82dd3ab | 3c4771875870ffe425d2d39fc28a50449b1752f2 | refs/heads/master | 2023-01-07T23:30:30.284433 | 2020-11-11T08:43:10 | 2020-11-11T08:43:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,546 | py | import pytest
import allure
import requests
import json
import time
from Templight import GetMysqlData
from Templight import GetMysqlDataSeller
from Common import Post
from Common import Get
from Common import Assert
from Common import Consts
class Official_01:
#官网留言
#type--string反馈类型, feedback/advice/other
def Off_01(self,userId):
sessionX = requests.session()
post_req = Post.Post()
ass = Assert.Assertions()
url = 'http://172.16.2.101:3000/v1/site/add_feedback'
params = {'name':'胡健','contact':'13418483933','content':'吃饭了没?','userId':1,'type':'feedback','mobileModel':'CL001','appVersion':'0.0.1','mobileOSVersion':'10.0.2','deviceModel':'360','images':'https://zhiyundata.oss-cn-shenzhen.aliyuncs.com/zyplay/site_feedback/2018-10-24-11:53:05.jpg'}
params['userId'] = userId
res = post_req.post_model_b(sessionX, url, params)
print(res)
resCode = res['code']
resText = res['text']
assert ass.assert_code(resCode, 200)
assert ass.assert_in_text(resText, '成功')
Consts.RESULT_LIST.append('True')
#官网订阅
#手机, 邮箱手机二选一
def Off_02(self):
sessionX = requests.session()
post_req = Post.Post()
ass = Assert.Assertions()
url = 'http://172.16.2.101:3000/v1/site/add_followmail'
params = {'mobile':'13418483933', 'nickname':'hj'}
res = post_req.post_model_b(sessionX, url, params)
print(res)
resCode = res['code']
resText = res['text']
assert ass.assert_code(resCode, 200)
assert ass.assert_in_text(resText, '成功')
Consts.RESULT_LIST.append('True')
#外链点击日志
def Off_03(self,userId,linkId):
sessionX = requests.session()
post_req = Post.Post()
ass = Assert.Assertions()
url = 'http://172.16.2.101:3000/v1/site/add_link_click_log'
params = { 'linkId':1, 'token':'12134567890121345678901213456789012'}
data = GetMysqlData.GetMysqlData()
tokenData = data.getTokenByUserId(userId)
params['token'] = tokenData
params['linkId'] = linkId
res = post_req.post_model_b(sessionX, url, params)
print(res)
resCode = res['code']
resText = res['text']
assert ass.assert_code(resCode, 200)
assert ass.assert_in_text(resText, '成功')
Consts.RESULT_LIST.append('True')
if __name__ == '__main__':
a = Official_02()
a.Off_03()
| [
"1065913054@qq.com"
] | 1065913054@qq.com |
d2ea2f4eb3e5aea99cea24baf453eca59c539211 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2/AndrewNguyen/b2.py | f7a0b686bc0911c70127e931d1811ba557d1d1d4 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,119 | py | import sys
def isHappy(state):
for ch in state:
if ch == '-':
return False
return True
def generate(state):
new_states = []
lenOfState = len(state)
for i in range(1,lenOfState+1):
new_state = ""
for j in range(0, i):
if state[j] == '+':
new_state = new_state + '-'
else:
new_state = new_state + '+'
new_state = new_state + state[i:lenOfState]
new_states.append(new_state)
return new_states
if __name__ == "__main__":
filename = sys.argv[1]
f = open(filename, 'r')
lines = f.readlines()
T = int(lines[0])
idx = 0
for i in range(0,T):
origin_state = lines[i+1]
states = set([origin_state])
level = {origin_state:0}
queue = [origin_state]
while len(queue) > 0:
# print(states)
next_item = queue.pop()
# print("%3d: %s" % (idx, next_item))
idx = idx + 1
current_level = level[next_item]
if isHappy(next_item):
print("Case #%d: %d" % (i+1, current_level))
break
new_states = generate(next_item)
for state in new_states:
if state not in states:
states.add(state)
queue.insert(0, state)
level[state] = current_level + 1
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
d3c5463beb6e02ae8155b9bce09381283c948c6e | 4cc2ef8cd6a587da84a7ad0d7f15ed29c3f1cb9a | /pic/updateid.py | 24991c93c0f3a55e6d809fd0567e517804d5f37a | [] | no_license | eiselekd/backuphelpers | 71797823468cec36038d482ee81e606571f35289 | a9b92a6f3bd3984a517eb0c262f856e451302990 | refs/heads/master | 2020-03-23T08:48:29.977323 | 2018-11-19T20:33:13 | 2018-11-19T20:33:13 | 141,349,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | #!/usr/bin/python3
import argparse, os
from pprint import pprint
from sortpics.img import SortImage
from pyexiftool.exiftool import ExifTool
from pyexiftool.exiftool import fsencode
import tempfile, sys, hashlib
parser = argparse.ArgumentParser(description='sortpics')
parser.add_argument('--verbose', '-v', dest='verbose', action='count', default=0)
parser.add_argument('--update', '-u', dest='update', action='count', default=0)
parser.add_argument('file')
args = parser.parse_args()
f = args.file
def md5(img_path):
hash_md5 = hashlib.md5()
with open(img_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
with ExifTool() as et:
exif_data = et.get_metadata(f)
if (args.verbose):
pprint(exif_data)
m = ""
for j in [ 'EXIF:ImageUniqueID', 'MakerNotes:ImageUniqueID' ]:
if j in exif_data:
if (len(exif_data[j]) > 16):
m = exif_data[j]
if m == "":
if (args.update):
m = md5(f)
p = map(fsencode,["-EXIF:ImageUniqueID+=%s" %(m), f])
et.execute(*p)
sys.stdout.write(m)
| [
"eiselekd@gmail.com"
] | eiselekd@gmail.com |
8994d1eea949890530bfbdfe44ecda4a0e96bce8 | d51b4c766661af65b4ee6e7c30f8cb4bdd8603e3 | /python/algorithm/leetcode/403.py | 3684426cb3a77880be1c00f4f4e999c006acfbfb | [] | no_license | yanxurui/keepcoding | 3e988c76b123d55b32cf7cc35fbffb12c4ccb095 | d6b9f07e2d1437681fa77fee0687ea9b83cab135 | refs/heads/master | 2021-01-24T09:01:41.306597 | 2020-05-21T05:36:04 | 2020-05-21T05:36:04 | 93,400,267 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,715 | py | from typing import List
# https://leetcode.com/problems/frog-jump/discuss/88824/Very-easy-to-understand-JAVA-solution-with-explanations
class Solution:
def canCross(self, stones: List[int]) -> bool:
d = {stone:set() for stone in stones}
d[stones[0]].add(1)
for stone in stones:
for step in d[stone]:
reach = stone+step
if reach == stones[-1]:
return True
if reach in d:
if step-1 > 0:
d[reach].add(step-1)
d[reach].add(step)
d[reach].add(step+1)
return False
# TLE
class Solution2:
def canCross(self, stones: List[int]) -> bool:
return self.dfs(stones, 0, 1)
def dfs(self, stones, i, k):
# jump from index i by k units
n = len(stones)
if i == n-1:
# arrive at the last stone
return True
# jump to j
j = self.bs(stones, i+1, len(stones)-1, stones[i]+k)
if j > 0:
return self.dfs(stones, j, k-1) or self.dfs(stones, j, k) or self.dfs(stones, j, k+1)
else:
return False
def bs(self, stones, l, h, t):
while l <= h:
m = (l+h)//2
if stones[m] == t:
return m
elif stones[m] < t:
l = m+1
else:
h = m-1
return -1
if __name__ == '__main__':
from testfunc import test
test_data = [
(
[0,1,3,5,6,8,12,17],
True
),
(
[0,1,2,3,4,8,9,11],
False
)
]
test(Solution().canCross, test_data)
| [
"617080352@qq.com"
] | 617080352@qq.com |
3c15bf29f03af4682e3d5978616547b28ff92205 | d7faf47825b6f8e5abf9a9587f1e7248c0eed1e2 | /python/ray/tests/test_multinode_failures.py | 61627c99c93063c84698e4d6581e3a96baa82812 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | ggdupont/ray | 7d7c7f39a8f99a09199fab60897da9e48b8e2645 | 15391026c19f1cbbb8d412e46b01f7998e42f2b9 | refs/heads/master | 2023-03-12T06:30:11.428319 | 2021-12-07T05:34:27 | 2021-12-07T05:34:27 | 165,058,028 | 0 | 0 | Apache-2.0 | 2023-03-04T08:56:50 | 2019-01-10T12:41:09 | Python | UTF-8 | Python | false | false | 5,384 | py | import os
import signal
import sys
import time
import pytest
import ray
import ray.ray_constants as ray_constants
from ray.cluster_utils import Cluster, cluster_not_supported
from ray._private.test_utils import RayTestTimeoutException, get_other_nodes
SIGKILL = signal.SIGKILL if sys.platform != "win32" else signal.SIGTERM
@pytest.mark.xfail(cluster_not_supported, reason="cluster not supported")
@pytest.fixture(params=[(1, 4), (4, 4)])
def ray_start_workers_separate_multinode(request):
num_nodes = request.param[0]
num_initial_workers = request.param[1]
# Start the Ray processes.
cluster = Cluster()
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_initial_workers)
ray.init(address=cluster.address)
yield num_nodes, num_initial_workers
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_worker_failed(ray_start_workers_separate_multinode):
num_nodes, num_initial_workers = (ray_start_workers_separate_multinode)
@ray.remote
def get_pids():
time.sleep(0.25)
return os.getpid()
start_time = time.time()
pids = set()
while len(pids) < num_nodes * num_initial_workers:
new_pids = ray.get([
get_pids.remote()
for _ in range(2 * num_nodes * num_initial_workers)
])
for pid in new_pids:
pids.add(pid)
if time.time() - start_time > 60:
raise RayTestTimeoutException(
"Timed out while waiting to get worker PIDs.")
@ray.remote
def f(x):
time.sleep(0.5)
return x
# Submit more tasks than there are workers so that all workers and
# cores are utilized.
object_refs = [f.remote(i) for i in range(num_initial_workers * num_nodes)]
object_refs += [f.remote(object_ref) for object_ref in object_refs]
# Allow the tasks some time to begin executing.
time.sleep(0.1)
# Kill the workers as the tasks execute.
for pid in pids:
try:
os.kill(pid, SIGKILL)
except OSError:
# The process may have already exited due to worker capping.
pass
time.sleep(0.1)
# Make sure that we either get the object or we get an appropriate
# exception.
for object_ref in object_refs:
try:
ray.get(object_ref)
except (ray.exceptions.RayTaskError,
ray.exceptions.WorkerCrashedError):
pass
def _test_component_failed(cluster, component_type):
"""Kill a component on all worker nodes and check workload succeeds."""
# Submit many tasks with many dependencies.
@ray.remote
def f(x):
# Sleep to make sure that tasks actually fail mid-execution.
time.sleep(0.01)
return x
@ray.remote
def g(*xs):
# Sleep to make sure that tasks actually fail mid-execution. We
# only use it for direct calls because the test already takes a
# long time to run with the raylet codepath.
time.sleep(0.01)
return 1
# Kill the component on all nodes except the head node as the tasks
# execute. Do this in a loop while submitting tasks between each
# component failure.
time.sleep(0.1)
worker_nodes = get_other_nodes(cluster)
assert len(worker_nodes) > 0
for node in worker_nodes:
process = node.all_processes[component_type][0].process
# Submit a round of tasks with many dependencies.
x = 1
for _ in range(1000):
x = f.remote(x)
xs = [g.remote(1)]
for _ in range(100):
xs.append(g.remote(*xs))
xs.append(g.remote(1))
# Kill a component on one of the nodes.
process.terminate()
time.sleep(1)
process.kill()
process.wait()
assert not process.poll() is None
# Make sure that we can still get the objects after the
# executing tasks died.
ray.get(x)
ray.get(xs)
def check_components_alive(cluster, component_type, check_component_alive):
"""Check that a given component type is alive on all worker nodes."""
worker_nodes = get_other_nodes(cluster)
assert len(worker_nodes) > 0
for node in worker_nodes:
process = node.all_processes[component_type][0].process
if check_component_alive:
assert process.poll() is None
else:
print("waiting for " + component_type + " with PID " +
str(process.pid) + "to terminate")
process.wait()
print("done waiting for " + component_type + " with PID " +
str(process.pid) + "to terminate")
assert not process.poll() is None
@pytest.mark.parametrize(
"ray_start_cluster",
[{
"num_cpus": 8,
"num_nodes": 4,
"_system_config": {
# Raylet codepath is not stable with a shorter timeout.
"num_heartbeats_timeout": 10
},
}],
indirect=True)
def test_raylet_failed(ray_start_cluster):
cluster = ray_start_cluster
# Kill all raylets on worker nodes.
_test_component_failed(cluster, ray_constants.PROCESS_TYPE_RAYLET)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| [
"noreply@github.com"
] | ggdupont.noreply@github.com |
2f2a337de675580690d98717fd71b31a135f8197 | 209155f8a32bef23c9b02f5ec26a1f5935d319c0 | /tests/test_squeezedimd.py | 69974d322cf4bebb2a84a03558eefc6f67187780 | [
"Apache-2.0"
] | permissive | wentaozhu/MONAI | ec154ca360714df8fcd55aa63506465c7949e1ff | 55025e07144b113f156b232b196c3ef1a765e02b | refs/heads/master | 2022-11-09T10:28:50.672985 | 2020-06-25T23:09:26 | 2020-06-25T23:09:26 | 274,224,027 | 2 | 1 | Apache-2.0 | 2020-06-22T19:22:59 | 2020-06-22T19:22:58 | null | UTF-8 | Python | false | false | 2,518 | py | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.transforms import SqueezeDimd
TEST_CASE_1 = [
{"keys": ["img", "seg"], "dim": None},
{"img": np.random.rand(1, 2, 1, 3), "seg": np.random.randint(0, 2, size=[1, 2, 1, 3])},
(2, 3),
]
TEST_CASE_2 = [
{"keys": ["img", "seg"], "dim": 2},
{"img": np.random.rand(1, 2, 1, 8, 16), "seg": np.random.randint(0, 2, size=[1, 2, 1, 8, 16])},
(1, 2, 8, 16),
]
TEST_CASE_3 = [
{"keys": ["img", "seg"], "dim": -1},
{"img": np.random.rand(1, 1, 16, 8, 1), "seg": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])},
(1, 1, 16, 8),
]
TEST_CASE_4 = [
{"keys": ["img", "seg"]},
{"img": np.random.rand(1, 2, 1, 3), "seg": np.random.randint(0, 2, size=[1, 2, 1, 3])},
(2, 1, 3),
]
TEST_CASE_4_PT = [
{"keys": ["img", "seg"], "dim": 0},
{"img": torch.rand(1, 2, 1, 3), "seg": torch.randint(0, 2, size=[1, 2, 1, 3])},
(2, 1, 3),
]
TEST_CASE_5 = [
{"keys": ["img", "seg"], "dim": -2},
{"img": np.random.rand(1, 1, 16, 8, 1), "seg": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])},
]
TEST_CASE_6 = [
{"keys": ["img", "seg"], "dim": 0.5},
{"img": np.random.rand(1, 1, 16, 8, 1), "seg": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])},
]
class TestSqueezeDim(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_4_PT])
def test_shape(self, input_param, test_data, expected_shape):
result = SqueezeDimd(**input_param)(test_data)
self.assertTupleEqual(result["img"].shape, expected_shape)
self.assertTupleEqual(result["seg"].shape, expected_shape)
@parameterized.expand([TEST_CASE_5, TEST_CASE_6])
def test_invalid_inputs(self, input_param, test_data):
with self.assertRaises(ValueError):
SqueezeDimd(**input_param)(test_data)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | wentaozhu.noreply@github.com |
decbb3699bc8cd3376d79937ce5ff90ed552d6e5 | 0e0ce88c886370df9af51855115c99dfc003e5da | /2020/08_Flask/16_MONGOdb_and_Login_user/app/__init__.py | 96dcb3ff132ec6bfd8b7aa4b0b1c6f118af143e8 | [] | no_license | miguelzeph/Python_Git | ed80db9a4f060836203df8cc2e42e003b0df6afd | 79d3b00236e7f4194d2a23fb016b43e9d09311e6 | refs/heads/master | 2021-07-08T18:43:45.855023 | 2021-04-01T14:12:23 | 2021-04-01T14:12:23 | 232,007,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | # Constructor and Config
from flask import Flask
from flask_pymongo import PyMongo
app = Flask(__name__)
app.config['SECRET_KEY'] = 'mysecretkey'
#################MONGODB######################
# MongoDB Config (Cadastrar no www.mlab.com)
app.config['MONGO_URI'] = "mongodb+srv://useradmin:admin@cluster0.7rvyr.gcp.mongodb.net/MEU_DB?retryWrites=true&w=majority"
# Create Object MongoDB
mongo = PyMongo(app)
############################################## | [
"miguel.junior.mat@hotmail.com"
] | miguel.junior.mat@hotmail.com |
1a91963a5355142fb88137dc0520af04d28e21be | c8fd92a30a9ff7f8b3678398ad137b0350b907b8 | /config.py | 173287a0d803000c04dc4a237fe89f9190437745 | [] | no_license | maximilianh/repeatBrowser | 2634d487aeeb8edfb9f48096650cf2bf96251a26 | 76d55908d567ee5eda89dab9145d5f5652065bc5 | refs/heads/master | 2021-01-01T20:00:54.441883 | 2020-03-09T11:57:53 | 2020-03-09T11:57:53 | 20,178,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | DB = "hg38"
#DB = "hg19"
TMPDIR = "/scratch/tmp"
# set to None to not use cluster
#CLUSTER=None
CLUSTER="ku"
TARGETPERC = 99 # percentile, for top50 consensus building
rmskTable = "rmskHmmer2020" # the rmsk table, usually it's rmsk, but you can specify a different one here
RMTORB = "data/rmToDfam.tab"
RBTORM = "data/rbToRm.tab"
| [
"max@soe.ucsc.edu"
] | max@soe.ucsc.edu |
3d805adb218362378b581ac72b37a9ac9781147d | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/d7f1d7204e4f46fc8875f4aeefac75e3.py | 36daf78d07daeb7267f1f3306eef351eb9fe3518 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 260 | py | def hey(words):
if len(words.strip())==0:
return 'Fine. Be that way!'
elif words.isupper():
return 'Whoa, chill out!'
punctuation=words[-1]
if punctuation=='?':
return 'Sure.'
else:
return 'Whatever.'
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
83dc2ea11d5703b80b5e91b05c61699d3b671f2b | f28aebe70dc265e0e2ecf3a02c19f1d20a750a92 | /perfil/views.py | 3c2e2a1d680890c1c1419ac0cf747b0d8060df93 | [] | no_license | linikerunk/E-commerce-liniker | 7fc1750d53ad8ad1e2c28537158324caaaf7d710 | c91376da3d18498fdf956e34d884a8ccca2c0d33 | refs/heads/master | 2020-09-28T08:25:12.794289 | 2020-01-09T02:06:26 | 2020-01-09T02:06:26 | 226,731,684 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,674 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import ListView
from django.views import View
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
import copy
from . import models
from . import forms
# Create your views here.
class BasePerfil(View):
template_name = 'perfil/criar.html'
def setup(self, *args, **kwargs):
super().setup(*args, **kwargs)
self.carrinho = copy.deepcopy(self.request.session.get('carrinho', {}))
self.perfil = None
if self.request.user.is_authenticated:
self.perfil = models.Perfil.objects.filter(
usuario=self.request.user).first()
self.contexto = {
'userform':forms.UserForm(
data=self.request.POST or None,
usuario=self.request.user,
instance=self.request.user,
),
'perfilform': forms.PerfilForm(
data=self.request.POST or None,
instance=self.perfil,
),
}
else:
self.contexto = {
'userform':forms.UserForm(
data=self.request.POST or None
),
'perfilform': forms.PerfilForm(
data=self.request.POST or None
),
}
self.userform = self.contexto['userform']
self.perfilform = self.contexto['perfilform']
if self.request.user.is_authenticated:
self.template_name = 'perfil/atualizar.html'
self.renderizar = render(self.request, self.template_name, self.contexto)
def get(self, *args, **kwargs):
return self.renderizar
class Criar(BasePerfil):
def post(self, *args, **kwargs):
if not self.userform.is_valid() or not self.perfilform.is_valid():
messages.error(
self.request,
'Existem erros no formulário de cadastro. Verifique se todos '
'os campos foram preenchidos corretamentes'
)
return self.renderizar
username = self.userform.cleaned_data.get('username')
password = self.userform.cleaned_data.get('password')
email = self.userform.cleaned_data.get('email')
first_name = self.userform.cleaned_data.get('first_name')
last_name = self.userform.cleaned_data.get('last_name')
# Usuário Logado
if self.request.user.is_authenticated:
usuario = get_object_or_404(User, username=self.request.user.username)
usuario.username = username
if password:
usuario.set_password(password)
usuario.email = email
usuario.first_name = first_name
usuario.last_name = last_name
usuario.save()
if not self.perfil:
self.perfilform.cleaned_data['usuario'] = usuario
print(self.perfilform.cleaned_data)
perfil = models.Perfil(**self.perfilform.cleaned_data)
perfil.save()
else:
perfil = self.perfilform.save(commit=False)
perfil.usuario = usuario
perfil.save()
#Usuário não logado (novo)
else:
usuario = self.userform.save(commit=False)
usuario.set_password(password)
usuario.save()
perfil = self.perfilform.save(commit=False)
perfil.usuario = usuario
perfil.save()
if password:
autentica = authenticate(self.request,
username=usuario,
password=password)
if autentica:
login(self.request, user=usuario)
self.request.session['carrinho'] = self.carrinho
self.request.session.save()
messages.success(
self.request,
'Seu cadastro foi criado ou atualizado com sucesso.')
messages.success(
self.request,
'Você fez login e pode concluir sua compra.')
return redirect('produto:carrinho')
return self.renderizar
class Atualizar(View):
def get(self, *args, **kwargs):
return HttpResponse('Atualizar')
class Login(View):
def post(self, *args, **kwargs):
username = self.request.POST.get('username')
password = self.request.POST.get('password')
if not username or not password:
messages.error(
self.request,
'Usuário ou senha invalidos.'
)
return redirect('perfil:criar')
usuario = authenticate(self.request, username=username, password=password)
if not usuario:
messages.error(
self.request,
'Usuário ou senha invalidos.'
)
return redirect('perfil:criar')
login(self.request, user=usuario)
messages.success(
self.request,
'Você fez login no sistema e pode concluir sua compra.'
)
return redirect('produto:carrinho')
class Logout(View):
def get(self, *args, **kwargs):
carrinho = copy.deepcopy(self.request.session.get('carrinho'))
logout(self.request)
self.request.session['carrinho'] = carrinho
self.request.session.save()
return redirect('produto:lista')
| [
"linikerenem@gmail.com"
] | linikerenem@gmail.com |
0d8395c16985bc147b8ca2e583c5f5cace411878 | 7da288104b7ab242913f061dc39adb01b226db98 | /PathSupportSet/CandidatePredicatePathSupSet.py | ff351321e0825a22ebe4414b8da02b0bce7229ee | [] | no_license | cstmdzx/pyAnswer | fc1ba6a58b12a8478cf9e1cd79eb4ec4b46697ef | bb5c16510f25bb61d9618c4069c49fdb0672d417 | refs/heads/master | 2021-01-12T05:25:13.307037 | 2017-04-27T02:57:09 | 2017-04-27T02:57:09 | 77,926,338 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 3,377 | py | # coding=gbk
# CandidatePredicatePathForRep format:RelationPhraseId~Predicate[f/b]Predicate[f/b]…~Predicate[f/b]Predicate[f/b]…
# generate Predicate/Path support set and record predicate candidates at the same time
import MySQLdb
import re
conn = MySQLdb.connect(host='localhost', user='root', password='password', db='dbpedia')
cur = conn.cursor()
filePredicatePathSupSet = open('PredicatePathSupSet', 'w')
fileCandidatePredicatePathForRep = open('CandidatePredicatePathForRep')
filePredicateCandidates = open('PredicatePathCandidates', 'w')
linesCandidatePredicatePathForRep = fileCandidatePredicatePathForRep.readlines()
dictPredicatePathCandidates = dict()
for eachLine in linesCandidatePredicatePathForRep:
eachLine = eachLine.replace('\n', '')
words = eachLine.split('~')
idRep = words[0]
del words[0] # 去掉最开始的relation phrase id
listPath = list()
for eachPath in words: # 一条路径
if eachPath in dictPredicatePathCandidates:
dictPredicatePathCandidates[eachPath] += '~' + idRep
else:
dictPredicatePathCandidates[eachPath] = idRep
wordsPath = eachPath.split(']')
dictResEntityPair = dict() # 保存截止当前路径的EntityPairs
for eachPredicate in wordsPath:
wordsPredicate = eachPredicate.split('[') # 路径中的每一条谓语
predicate = wordsPredicate[0]
direction = wordsPredicate[1]
sqlPredicateSO = 'select SO from P_SO WHERE Predicate = \'' + predicate.__str__() + '\''
cur.execute(sqlPredicateSO)
listSO = cur.fetchall() # 找出所有的SO
dictCurEntityPair = dict() # 将找出的SO处理成dict,方便后面做连接
for eachElement in listSO:
wordsSO = eachElement.split('s')
wordSubject = wordsSO[0]
wordObject = wordsSO[1]
if direction.__str__() == 'f':
dictCurEntityPair[wordSubject] = wordObject
else:
dictCurEntityPair[wordObject] = wordSubject
if dictResEntityPair.__len__() == 1:
dictResEntityPair = dictCurEntityPair
else:
for eachKey in dictResEntityPair:
# 把当前的node,连接到下一次要做匹配连接的那个node上
if eachKey in dictCurEntityPair:
# 其实就相当于把dictResEntityPair的key换成下一个node
dictResEntityPair[dictCurEntityPair[eachKey]] = dictResEntityPair[eachKey]
# 写入结果
filePredicatePathSupSet.write(eachPath)
for eachKey in dictResEntityPair:
filePredicatePathSupSet.write('~')
# 一定是先写value进去,然后再写key,因为连接方式
filePredicatePathSupSet.write(dictResEntityPair[eachKey] + '\t' + eachKey)
filePredicatePathSupSet.write('\n')
# 保存candidate
for eachCandidate in dictPredicatePathCandidates:
filePredicateCandidates.write(eachCandidate + '~' + dictPredicatePathCandidates[eachCandidate] + '\n')
cur.close()
conn.close()
filePredicateCandidates.close()
filePredicatePathSupSet.close()
fileCandidatePredicatePathForRep.close()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
7e16f267e522ad119638dc37559b9520b949a962 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-dgc/huaweicloudsdkdgc/v1/model/resource_info.py | a14466abe6bbdbb7ed79a7a66a595ce0f83a8450 | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,704 | py | # coding: utf-8
import re
import six
class ResourceInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'type': 'str',
'location': 'str',
'depend_files': 'list[str]',
'desc': 'str',
'directory': 'str'
}
attribute_map = {
'name': 'name',
'type': 'type',
'location': 'location',
'depend_files': 'dependFiles',
'desc': 'desc',
'directory': 'directory'
}
def __init__(self, name=None, type=None, location=None, depend_files=None, desc=None, directory=None):
"""ResourceInfo - a model defined in huaweicloud sdk"""
self._name = None
self._type = None
self._location = None
self._depend_files = None
self._desc = None
self._directory = None
self.discriminator = None
if name is not None:
self.name = name
if type is not None:
self.type = type
if location is not None:
self.location = location
if depend_files is not None:
self.depend_files = depend_files
if desc is not None:
self.desc = desc
if directory is not None:
self.directory = directory
@property
def name(self):
"""Gets the name of this ResourceInfo.
:return: The name of this ResourceInfo.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ResourceInfo.
:param name: The name of this ResourceInfo.
:type: str
"""
self._name = name
@property
def type(self):
"""Gets the type of this ResourceInfo.
:return: The type of this ResourceInfo.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ResourceInfo.
:param type: The type of this ResourceInfo.
:type: str
"""
self._type = type
@property
def location(self):
"""Gets the location of this ResourceInfo.
资源文件所在OBS路径
:return: The location of this ResourceInfo.
:rtype: str
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this ResourceInfo.
资源文件所在OBS路径
:param location: The location of this ResourceInfo.
:type: str
"""
self._location = location
@property
def depend_files(self):
"""Gets the depend_files of this ResourceInfo.
:return: The depend_files of this ResourceInfo.
:rtype: list[str]
"""
return self._depend_files
@depend_files.setter
def depend_files(self, depend_files):
"""Sets the depend_files of this ResourceInfo.
:param depend_files: The depend_files of this ResourceInfo.
:type: list[str]
"""
self._depend_files = depend_files
@property
def desc(self):
"""Gets the desc of this ResourceInfo.
:return: The desc of this ResourceInfo.
:rtype: str
"""
return self._desc
@desc.setter
def desc(self, desc):
"""Sets the desc of this ResourceInfo.
:param desc: The desc of this ResourceInfo.
:type: str
"""
self._desc = desc
@property
def directory(self):
"""Gets the directory of this ResourceInfo.
资源所在目录
:return: The directory of this ResourceInfo.
:rtype: str
"""
return self._directory
@directory.setter
def directory(self, directory):
"""Sets the directory of this ResourceInfo.
资源所在目录
:param directory: The directory of this ResourceInfo.
:type: str
"""
self._directory = directory
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
56b342083fe99216f195da0c3369e49ae969dc03 | 43ede7b8fb546c00804c0ef94501f6e48ba170d6 | /Cursos Python/Análise de dados do zero. (PYTHON & PANDAS)/04 - Pandas DataFrame.py | 0731ed8b3a8a6bae993c69a735c235213f50e39c | [] | no_license | bopopescu/Python-13 | db407d17252473e78e705e563cfee4dbd316c6b9 | c8bef500f2d3e4a63d850f96dfa219eff2ecebda | refs/heads/master | 2022-11-22T16:24:08.490879 | 2020-06-11T14:22:24 | 2020-06-11T14:22:24 | 281,830,055 | 0 | 0 | null | 2020-07-23T02:26:31 | 2020-07-23T02:26:30 | null | UTF-8 | Python | false | false | 298 | py | import pandas as pd
coluna = ['Frutas']
itens = ['Laranja', 'maça', 'abacate']
df = pd.DataFrame(itens, columns=coluna)
print(df, '\n'*2)
coluna = ['Frutas', 'Preco']
itens = [['Laranja', 10], ['Maça', 10], ['Abacate', 15]]
df = pd.DataFrame(itens, columns=coluna, dtype=float)
print(df) | [
"ofc.erickson@gmail.com"
] | ofc.erickson@gmail.com |
995822e51e2d34a880e4dc2ba08886ea425c6817 | 8b619e49c3b83778958ebdbe85b25c7182117ac0 | /settings.py | c821d3ed82cd0183cd3e3564514979c0846cf1da | [] | no_license | hellais/ooni.nu | c98417145304ce985c953b78d1a50f10154d5b60 | 763aff9a5a75e358c475cf1bf9fcdf6afc534266 | refs/heads/master | 2020-05-31T06:03:36.383462 | 2012-12-12T17:03:00 | 2012-12-12T17:03:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,735 | py | import os
here = lambda *x: os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
#Directories
LAYOUT_DIR = here('layout')
CONTENT_DIR = here('content')
MEDIA_DIR = here('media')
DEPLOY_DIR = here('deploy')
TMP_DIR = here('deploy_tmp')
BACKUPS_DIR = here('backups')
BACKUP = False
SITE_ROOT = "/"
SITE_WWW_URL = "http://www.ooni.nu"
SITE_NAME = "OONI"
SITE_AUTHOR = "Arturo Filasto'"
#URL Configuration
GENERATE_ABSOLUTE_FS_URLS = False
# Clean urls causes Hyde to generate urls without extensions. Examples:
# http://example.com/section/page.html becomes
# http://example.com/section/page/, and the listing for that section becomes
# http://example.com/section/
# The built-in CherryPy webserver is capable of serving pages with clean urls
# without any additional configuration, but Apache will need to use Mod_Rewrite
# to map the clean urls to the actual html files. The HtaccessGenerator site
# post processor is capable of automatically generating the necessary
# RewriteRules for use with Apache.
GENERATE_CLEAN_URLS = False
# A list of filenames (without extensions) that will be considered listing
# pages for their enclosing folders.
# LISTING_PAGE_NAMES = ['index']
LISTING_PAGE_NAMES = ['listing', 'index', 'default']
# Determines whether or not to append a trailing slash to generated urls when
# clean urls are enabled.
APPEND_SLASH = False
# {folder : extension : (processors)}
# The processors are run in the given order and are chained.
# Only a lone * is supported as an indicator for folders. Path
# should be specified. No wildcard card support yet.
# Starting under the media folder. For example, if you have media/css under
# your site root,you should specify just css. If you have media/css/ie you
# should specify css/ie for the folder name. css/* is not supported (yet).
# Extensions do not support wildcards.
MEDIA_PROCESSORS = {
'*':{
'.css':('hydeengine.media_processors.TemplateProcessor',
'hydeengine.media_processors.CSSmin',),
'.ccss':('hydeengine.media_processors.TemplateProcessor',
'hydeengine.media_processors.CleverCSS',
'hydeengine.media_processors.CSSmin',),
'.sass':('hydeengine.media_processors.TemplateProcessor',
'hydeengine.media_processors.SASS',
'hydeengine.media_processors.CSSmin',),
'.less':('hydeengine.media_processors.TemplateProcessor',
'hydeengine.media_processors.LessCSS',
'hydeengine.media_processors.CSSmin',),
'.styl':('hydeengine.media_processors.TemplateProcessor',
'hydeengine.media_processors.Stylus',
'hydeengine.media_processors.CSSmin',),
'.hss':(
'hydeengine.media_processors.TemplateProcessor',
'hydeengine.media_processors.HSS',
'hydeengine.media_processors.CSSmin',),
#'.js':(
# 'hydeengine.media_processors.TemplateProcessor',
# 'hydeengine.media_processors.JSmin',),
'.coffee':(
'hydeengine.media_processors.TemplateProcessor',
'hydeengine.media_processors.CoffeeScript',
'hydeengine.media_processors.JSmin',)
}
}
CONTENT_PROCESSORS = {
'prerendered/': {
'*.*' :
('hydeengine.content_processors.PassthroughProcessor',)
}
}
SITE_POST_PROCESSORS = {
# 'media/js': {
# 'hydeengine.site_post_processors.FolderFlattener' : {
# 'remove_processed_folders': True,
# 'pattern':"*.js"
# }
# }
}
CONTEXT = {
'GENERATE_CLEAN_URLS': GENERATE_CLEAN_URLS
}
FILTER = {
'include': (".htaccess",),
'exclude': (".*","*~")
}
#Processor Configuration
#
# Set this to the output of `which growlnotify`. If `which` returns emtpy,
# install growlnotify from the Extras package that comes with the Growl disk image.
#
#
GROWL = None
# path for YUICompressor, or None if you don't
# want to compress JS/CSS. Project homepage:
# http://developer.yahoo.com/yui/compressor/
#YUI_COMPRESSOR = "./lib/yuicompressor-2.4.2.jar"
YUI_COMPRESSOR = None
# path for Closure Compiler, or None if you don't
# want to compress JS/CSS. Project homepage:
# http://closure-compiler.googlecode.com/
#CLOSURE_COMPILER = "./lib/compiler.jar"
CLOSURE_COMPRILER = None
# path for HSS, which is a preprocessor for CSS-like files (*.hss)
# project page at http://ncannasse.fr/projects/hss
#HSS_PATH = "./lib/hss-1.0-osx"
HSS_PATH = None # if you don't want to use HSS
#Django settings
TEMPLATE_DIRS = (LAYOUT_DIR, CONTENT_DIR, TMP_DIR, MEDIA_DIR)
INSTALLED_APPS = (
'hydeengine',
'django.contrib.webdesign',
)
| [
"art@fuffa.org"
] | art@fuffa.org |
b1c3e2d58b093839cd1bc3b86f002ea815f7bc28 | b3cd00de3608e5225758bb99b28d58320ce81a47 | /practice_20190516/set_practices.py | bb0c1042527f669ab59618349237b43efafce1f1 | [] | no_license | buyi823/learn_python | fc4ec7e35ec421842975956933cfb56371786d7a | b411d5277dba1764d421a267f0ba36da40d8c6e9 | refs/heads/master | 2022-05-13T01:05:35.961946 | 2022-05-09T15:20:52 | 2022-05-09T15:20:52 | 71,314,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | #!/usr/bin/python3
basket={'apple', 'orange', 'apple', 'pear', 'orange', 'banana'}
print(basket)
i = 'orange' in basket
print(i)
a = set('abracadabra')
b = set('alacazam')
print(a)
print(a-b) # 在 a 中的字母,但不在 b 中
print(a | b) # 在 a 或 b 中的字母
print(a & b) # 在 a 和 b 中都有的字母
print(a ^ b) # 在 a 或 b 中的字母,但不同时在 a 和 b 中
a = {x for x in 'abracadabra' if x not in 'abc'}
print(a) | [
"309521086@qq.com"
] | 309521086@qq.com |
5203e3e2fd16915c35a794b6a84738bbe3de2171 | 135f293948b38aa8eaa6ac31dde79f8bc091fe5b | /Estruturas de repetição em Python/Exercicio12.py | ee4e9bcc87e70a6ac7801efa3e2c0c3ff253416f | [] | no_license | Kaiquenakao/Python | 70e42b5fee08a3b50c34913f2f5763f07e754c25 | 008eefce828522b5107917dc699fc3590c7ef52c | refs/heads/master | 2023-08-12T17:03:24.317996 | 2021-09-25T22:31:46 | 2021-09-25T22:31:46 | 279,083,005 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | """
12. Faça um programa que leia um número inteiro positivo N e imprima todos os números naturais
de 0 até N em ordem decrescente.
"""
try:
num = int(input('Insira um número positivo: '))
if num > 0:
for i in range(num, -1, -1):
print(i, end=' ')
else:
print('ERRO!!! O número não é positivo')
except ValueError:
print('ERRO!!!! Só pode ser digitados números inteiros positivos') | [
"noreply@github.com"
] | Kaiquenakao.noreply@github.com |
32ea6cd4e3873c9f26199e4214374be704126b4e | a26b214e60287af3c39e5ee40eae4d1869d596d0 | /train_imagenet.py | 6e46043cbac1543a40368e6cf5f624aad1ec7e09 | [] | no_license | beam2d/chainer-efficient-densenet | 17a326ba783d320dff310beb4f04a8873a9db6cf | 075b07450df9d030b943bca4f7d0fe52e838c55b | refs/heads/master | 2021-05-04T18:44:57.559524 | 2017-10-06T05:04:36 | 2017-10-06T05:10:39 | 105,968,804 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,103 | py | #!/usr/bin/env python
"""Example code of learning a large scale convnet from ILSVRC2012 dataset.
Prerequisite: To run this example, crop the center of ILSVRC2012 training and
validation images, scale them to 256x256 and convert them to RGB, and make
two lists of space-separated CSV whose first column is full path to image and
second column is zero-origin label (this format is same as that used by Caffe's
ImageDataLayer).
"""
from __future__ import print_function
import argparse
import random
import numpy as np
import chainer
from chainer import training
from chainer.training import extensions
import densenet
class PreprocessedDataset(chainer.dataset.DatasetMixin):
def __init__(self, path, root, mean, crop_size, random=True):
self.base = chainer.datasets.LabeledImageDataset(path, root)
self.mean = mean.astype('f')
self.crop_size = crop_size
self.random = random
def __len__(self):
return len(self.base)
def get_example(self, i):
# It reads the i-th image/label pair and return a preprocessed image.
# It applies following preprocesses:
# - Cropping (random or center rectangular)
# - Random flip
# - Scaling to [0, 1] value
crop_size = self.crop_size
image, label = self.base[i]
_, h, w = image.shape
if self.random:
# Randomly crop a region and flip the image
top = random.randint(0, h - crop_size - 1)
left = random.randint(0, w - crop_size - 1)
if random.randint(0, 1):
image = image[:, :, ::-1]
else:
# Crop the center
top = (h - crop_size) // 2
left = (w - crop_size) // 2
bottom = top + crop_size
right = left + crop_size
image = image[:, top:bottom, left:right]
image -= self.mean[:, top:bottom, left:right]
image *= (1.0 / 255.0) # Scale to [0, 1]
return image, label
def main():
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--arch', '-a', default='DenseNetBC161',
help='DenseNet architecture')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--epoch', '-E', type=int, default=10,
help='Number of epochs to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU')
parser.add_argument('--initmodel',
help='Initialize the model from given file')
parser.add_argument('--loaderjob', '-j', type=int,
help='Number of parallel data loading processes')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Mean file (computed by compute_mean.py)')
parser.add_argument('--resume', '-r', default='',
help='Initialize the trainer from given file')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--root', '-R', default='.',
help='Root directory path of image files')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--test', action='store_true')
parser.set_defaults(test=False)
args = parser.parse_args()
# Initialize the model to train
model = chainer.links.Classifier(getattr(densenet, args.arch)())
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use() # Make the GPU current
model.to_gpu()
# Load the datasets and mean file
mean = np.load(args.mean)
train = PreprocessedDataset(args.train, args.root, mean, 224)
val = PreprocessedDataset(args.val, args.root, mean, 224, False)
# These iterators load the images with subprocesses running in parallel to
# the training/validation.
train_iter = chainer.iterators.MultiprocessIterator(
train, args.batchsize, n_processes=args.loaderjob)
val_iter = chainer.iterators.MultiprocessIterator(
val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)
# Set up an optimizer
optimizer = chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
# Set up a trainer
updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.out)
val_interval = (10 if args.test else 100000), 'iteration'
log_interval = (10 if args.test else 1000), 'iteration'
trainer.extend(extensions.Evaluator(val_iter, model, device=args.gpu),
trigger=val_interval)
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
# Be careful to pass the interval directly to LogReport
# (it determines when to emit log rather than when to read observations)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'lr'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| [
"beam.web@gmail.com"
] | beam.web@gmail.com |
81f06546a31f3a1d148091a703f3d6f664345263 | 42b30769e4c676014d3fd8753bc4b1bbcc2a3e3c | /eggs/bx_python-0.7.2-py2.6-linux-x86_64-ucs4.egg/EGG-INFO/scripts/bed_rand_intersect.py | 7f6ee25a6a4b19b8256296f2b0fe48ddc9262184 | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | psnehal/MethylSig | 4e510685349b1712c92667d40e795fa798ee1702 | 5efad71e71ff2515feff2e49579c856ef9a1bbd8 | refs/heads/master | 2020-05-18T21:03:51.240410 | 2015-09-03T20:23:30 | 2015-09-03T20:23:30 | 26,826,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,927 | py | #!/afs/bx.psu.edu/project/pythons/py2.6-linux-x86_64-ucs4/bin/python2.6
"""
From a set of regions and two sets of intervals inside those regions
compute (for each region separately) the overlap between the two sets
of intervals and the overlap in `nsamples` random coverings of the
regions with intervals having the same lengths. Prints the z-score relative
to the mean and sample stdev of the random coverings.
Currently intervals must be in bed 3+ format.
TODO: There are a few versions of this floating around, including a
better/faster one using gap lists instead of bitsets. Need to track
that down and merge as necessary.
usage: %prog bounding_region_file intervals1 intervals2 nsamples
"""
from __future__ import division
import sys, random
import bisect
from bx_extras import stats
from Numeric import *
from bx.bitset import *
from bx.intervals.random_intervals import *
maxtries = 10
class MaxtriesException( Exception ):
pass
def bit_clone( bits ):
"""
Clone a bitset
"""
new = BitSet( bits.size )
new.ior( bits )
return new
def throw_random( lengths, mask ):
"""
Try multiple times to run 'throw_random'
"""
saved = None
for i in range( maxtries ):
try:
return throw_random_bits( lengths, mask )
except MaxtriesException, e:
saved = e
continue
raise e
def as_bits( region_start, region_length, intervals ):
"""
Convert a set of intervals overlapping a region of a chromosome into
a bitset for just that region with the bits covered by the intervals
set.
"""
bits = BitSet( region_length )
for chr, start, stop in intervals:
bits.set_range( start - region_start, stop - start )
return bits
def interval_lengths( bits ):
"""
Get the length distribution of all contiguous runs of set bits from
"""
end = 0
while 1:
start = bits.next_set( end )
if start == bits.size: break
end = bits.next_clear( start )
yield end - start
def count_overlap( bits1, bits2 ):
"""
Count the number of bits that overlap between two sets
"""
b = BitSet( bits1.size )
b |= bits1
b &= bits2
return b.count_range( 0, b.size )
def overlapping_in_bed( fname, r_chr, r_start, r_stop ):
"""
Get from a bed all intervals that overlap the region defined by
r_chr, r_start, r_stop.
"""
rval = []
for line in open( fname ):
if line.startswith( "#" ) or line.startswith( "track" ):
continue
fields = line.split()
chr, start, stop = fields[0], int( fields[1] ), int( fields[2] )
if chr == r_chr and start < r_stop and stop >= r_start:
rval.append( ( chr, max( start, r_start ), min( stop, r_stop ) ) )
return rval
def main():
region_fname = sys.argv[1]
mask_fname = sys.argv[2]
nsamples = int( sys.argv[3] )
intervals1_fname = sys.argv[4]
intervals2_fnames = sys.argv[5:]
nfeatures = len( intervals2_fnames )
total_actual = zeros( nfeatures )
# total_lengths1 = 0
total_lengths2 = zeros( nfeatures )
total_samples = zeros( ( nsamples, nfeatures ) )
for line in open( region_fname ):
# Load lengths for all intervals overlapping region
fields = line.split()
print >>sys.stderr, "Processing region:", fields[3]
r_chr, r_start, r_stop = fields[0], int( fields[1] ), int( fields[2] )
r_length = r_stop - r_start
# Load the mask
mask = overlapping_in_bed( mask_fname, r_chr, r_start, r_stop )
bits_mask = as_bits( r_start, r_length, mask )
bits_not_masked = bit_clone( bits_mask ); bits_not_masked.invert()
# Load the first set
intervals1 = overlapping_in_bed( intervals1_fname, r_chr, r_start, r_stop )
bits1 = as_bits( r_start, r_length, intervals1 )
# Intersect it with the mask
bits1.iand( bits_not_masked )
# Sanity checks
assert count_overlap( bits1, bits_mask ) == 0
# For each data set
for featnum, intervals2_fname in enumerate( intervals2_fnames ):
print >>sys.stderr, intervals2_fname
intervals2 = overlapping_in_bed( intervals2_fname, r_chr, r_start, r_stop )
bits2 = as_bits( r_start, r_length, intervals2 )
bits2.iand( bits_not_masked )
assert count_overlap( bits2, bits_mask ) == 0
# Observed values
actual_overlap = count_overlap( bits1, bits2 )
total_actual[featnum] += actual_overlap
# Sample
lengths2 = list( interval_lengths( bits2 ) )
total_lengths2[ featnum ] += sum( lengths2 )
for i in range( nsamples ):
# Build randomly covered bitmask for second set
random2 = throw_random( lengths2, bits_mask )
# Find intersection
random2 &= bits1
# Print amount intersecting
total_samples[ i, featnum ] += random2.count_range( 0, random2.size )
print >>sys.stderr, total_samples[ i, featnum ]
fraction_overlap = total_samples / total_lengths2
print "\t".join( intervals2_fnames )
print "\t".join( map( str, total_actual/total_lengths2 ) )
for row in fraction_overlap:
print "\t".join( map( str, row ) )
#print "total covered by first: %d, second: %d, overlap: %d" % ( total_lengths1, total_lengths2, total_actual )
print "observed overlap: %d, sample mean: %d, sample stdev: %d" % ( total_actual, stats.amean( total_samples ), stats.asamplestdev( total_samples ) )
print "z-score:", ( total_actual - stats.amean( total_samples ) ) / stats.asamplestdev( total_samples )
print "percentile:", sum( total_actual > total_samples ) / nsamples
if __name__ == "__main__":
main()
| [
"snehal@lnx-306501.(none)"
] | snehal@lnx-306501.(none) |
935e668353281522257cae2c1d7cf7f920d7109b | ba182c0ef3f2d9be57454808e999972a44a17246 | /papis_zotero/__init__.py | cd4b219ea84afd857e3af21d24fe69025ec8f0c0 | [] | no_license | stuartthomas25/papis-zotero | 9bc1aea061a2a5653e3f8a2f0dca20ca6591f649 | 00911a6a94191715429649b36f99f32005db530a | refs/heads/master | 2023-01-20T14:53:53.738651 | 2020-12-03T01:27:27 | 2020-12-03T01:27:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,966 | py | import click
import os
import logging
import http.server
import papis_zotero.server
@click.group('zotero')
@click.help_option('-h', '--help')
def main():
"""
Zotero interface for papis
"""
pass
@main.command('serve')
@click.help_option('-h', '--help')
@click.option(
"--port",
help="Port to listen to",
default=papis_zotero.server.zotero_port,
type=int
)
@click.option(
"--address",
help="Address to bind",
default="localhost"
)
def serve(address, port):
"""Start a zotero-connector server"""
global logger
server_address = (address, port)
httpd = http.server.HTTPServer(
server_address,
papis_zotero.server.PapisRequestHandler
)
httpd.serve_forever()
@main.command('import')
@click.help_option('-h', '--help')
@click.option(
'-f', '--from-bibtex', 'from_bibtex',
help='Import zotero library from a bibtex dump, the files fields in '
'the bibtex files should point to valid paths',
default=None,
type=click.Path(exists=True)
)
@click.option(
'-s', '--from-sql', 'from_sql',
help='Path to the FOLDER where the "zotero.sqlite" file resides',
default=None,
type=click.Path(exists=True)
)
@click.option(
'-o', '--outfolder',
help='Folder to save the imported library',
type=str,
required=True
)
@click.option(
'--link',
help='Wether to link the pdf files or copy them',
default=None
)
def do_importer(from_bibtex, from_sql, outfolder, link):
"""Import zotero libraries into papis libraries
"""
import papis_zotero.bibtex
import papis_zotero.sql
if not os.path.exists(outfolder):
os.makedirs(outfolder)
if from_bibtex is not None:
papis_zotero.bibtex.add_from_bibtex(
from_bibtex, outfolder, link
)
elif from_sql is not None:
papis_zotero.sql.add_from_sql(
from_sql, outfolder
)
if __name__ == "__main__":
main()
| [
"aamsgallo@gmail.com"
] | aamsgallo@gmail.com |
e1a3e6b019db32d46d544ec8533a4e2420661ae1 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /DPGAnalysis/Skims/python/TPGSkims_cfg.py | 72a0da830b6a2f7874b2e7bdbf09b435556f9e7a | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 6,095 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("SKIM")
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
name = cms.untracked.string('$Source' ),
annotation = cms.untracked.string('TPG skim')
)
#
#
# This is for testing purposes.
#
#
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
# run 136066 lumi~500
'/store/data/Run2010A/MinimumBias/RECO/v1/000/136/066/18F6DB82-5566-DF11-B289-0030487CAF0E.root'),
secondaryFileNames = cms.untracked.vstring(
'/store/data/Run2010A/MinimumBias/RAW/v1/000/136/066/38D48BED-3C66-DF11-88A5-001D09F27003.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*", "drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(500)
)
#------------------------------------------
# Load standard sequences.
#------------------------------------------
process.load('Configuration/StandardSequences/MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration/StandardSequences/GeometryIdeal_cff')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'GR10_P_V6::All'
process.load("Configuration/StandardSequences/RawToDigi_Data_cff")
process.load("Configuration/StandardSequences/Reconstruction_cff")
process.load('Configuration/EventContent/EventContent_cff')
#drop collections created on the fly
process.FEVTEventContent.outputCommands.append("drop *_MEtoEDMConverter_*_*")
process.FEVTEventContent.outputCommands.append("drop *_*_*_SKIM")
#
# Load common sequences
#
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskAlgoTrigConfig_cff')
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
##################################TPG Skims############################################
process.load('DPGAnalysis/Skims/singleMuonSkim_cff')
process.load('DPGAnalysis/Skims/singleElectronSkim_cff')
process.load('DPGAnalysis/Skims/muonTagProbeFilters_cff')
process.load('DPGAnalysis/Skims/electronTagProbeFilters_cff')
process.load('DPGAnalysis/Skims/singlePhotonSkim_cff')
process.load('DPGAnalysis/Skims/jetSkim_cff')
process.load('DPGAnalysis/Skims/METSkim_cff')
process.load('DPGAnalysis/Skims/singlePfTauSkim_cff')
#process.singleMuPt20SkimPath=cms.Path(process.singleMuPt20RecoQualitySeq)
#process.singleMuPt15SkimPath=cms.Path(process.singleMuPt15RecoQualitySeq)
#process.singleMuPt10SkimPath=cms.Path(process.singleMuPt10RecoQualitySeq)
process.singleMuPt5SkimPath=cms.Path(process.singleMuPt5RecoQualitySeq)
#process.singleElectronPt20SkimPath=cms.Path(process.singleElectronPt20RecoQualitySeq)
#process.singleElectronPt15SkimPath=cms.Path(process.singleElectronPt15RecoQualitySeq)
#process.singleElectronPt10SkimPath=cms.Path(process.singleElectronPt10RecoQualitySeq)
process.singleElectronPt5SkimPath=cms.Path(process.singleElectronPt5RecoQualitySeq)
#process.singlePhotonPt20SkimPath=cms.Path(process.singlePhotonPt20QualitySeq)
#process.singlePhotonPt15SkimPath=cms.Path(process.singlePhotonPt15QualitySeq)
#process.singlePhotonPt10SkimPath=cms.Path(process.singlePhotonPt10QualitySeq)
process.singlePhotonPt5SkimPath=cms.Path(process.singlePhotonPt5QualitySeq)
#process.muonZMMSkimPath=cms.Path(process.muonZMMRecoQualitySeq)
process.muonJPsiMMSkimPath=cms.Path(process.muonJPsiMMRecoQualitySeq)
#process.electronZEESkimPath=cms.Path(process.electronZEERecoQualitySeq)
process.jetSkimPath=cms.Path(process.jetRecoQualitySeq)
#process.METSkimPath=cms.Path(process.METQualitySeq)
process.singlePfTauPt15SkimPath=cms.Path(process.singlePfTauPt15QualitySeq)
process.outTPGSkim = cms.OutputModule("PoolOutputModule",
outputCommands = process.FEVTHLTALLEventContent.outputCommands,
fileName = cms.untracked.string("/tmp/azzi/TPGSkim.root"),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('USER'),
filterName = cms.untracked.string('TPGSkim')
),
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring(
#'singleMuPt20SkimPath',
#'singleMuPt15SkimPath',
#'singleMuPt10SkimPath',
'singleMuPt5SkimPath',
#'singleElectronPt20SkimPath',
#'singleElectronPt15SkimPath',
#'singleElectronPt10SkimPath',
'singleElectronPt5SkimPath',
#'singlePhotonPt20SkimPath',
#'singlePhotonPt15SkimPath',
#'singlePhotonPt10SkimPath',
'singlePhotonPt5SkimPath',
#'muonZMMSkimPath',
'muonJPsiMMSkimPath',
#'electronZEESkimPath',
'jetSkimPath',
#'METSkimPath',
'singlePfTauPt15SkimPath'))
)
###########################################################################
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.outpath = cms.EndPath(process.outTPGSkim)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
4c3ff6795992f4880623aff1d7912e1e2af55406 | 95b6f547270557a99c435b785b907896f62e87d1 | /test_cluster_mocking_nested_ratio.py | 1369246b5107a36125ed7de9c6953ae44bb947a7 | [] | no_license | phizaz/seeding-strategy-ssl | 6b3b58c9b1f556f8cd42fea5e3dc20e623462a08 | 85655ce3297130b273d5f86075ee6bdf1f12be0a | refs/heads/master | 2021-01-10T06:18:26.618009 | 2016-04-02T14:50:08 | 2016-04-02T14:50:08 | 49,761,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | from dataset import *
from pipe import Pipe
from wrapper import *
from badness import *
from ssltools import *
from sklearn.cluster import KMeans
dataset = get_yeast().rescale()
def seed_randomly(prob):
pipe = Pipe() \
.x(dataset.X) \
.y(dataset.Y) \
.y_seed(seeding_random(prob)) \
.connect(stop())
return pipe['y_seed']
def seed_some(prob, clusters_cnt):
pipe = Pipe() \
.x(dataset.X) \
.y(dataset.Y) \
.y_seed(seeding_some(prob, clusters_cnt)) \
.connect(stop())
return pipe['y_seed']
def seed_cache(file):
file = 'seeding/' + file
cache = StorageCache(file)
y_seed = np.array(cache.get())
return y_seed
kmeans = KMeans(dataset.cluster_cnt * 3)
kmeans.fit(dataset.X)
model = ClusterMockingNestedRatio(dataset.X, kmeans.labels_)
random_seed = seed_randomly(0.1)
badness = model.run(random_seed)
print('badness random:', badness)
#
# random_cache = seed_cache('yeast_prob-0.06.json')
# badness = model.run(random_cache)
# print('badness cache 0.06:', badness)
#
# random_cache = seed_cache('yeast_prob-0.05.json')
# badness = model.run(random_cache)
# print('badness cache 0.05:', badness) | [
"the.akita.ta@gmail.com"
] | the.akita.ta@gmail.com |
b2e081c7ce0957970be4524dd7569d94f5999f5b | 9e4fa1142a3656e97c637a247fd9e0a4b4192537 | /manage.py | 2f588eabb7d9bc15f00a4b0377a195b3614bfb9f | [] | no_license | LokeshKD/ckts-app-demo | 41bb51f5ab4c94344cf8a1f7d0a5b8ee970d5375 | 309209b87737099249f010c505e689efc287c8c3 | refs/heads/master | 2022-02-11T06:22:53.556163 | 2019-08-13T02:38:49 | 2019-08-13T02:38:49 | 198,107,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | import os
import unittest
import coverage
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from project import app, db
app.config.from_object(os.environ['APP_SETTINGS'])
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
@manager.command
def test():
""" Runs Tests without coverage"""
tests = unittest.TestLoader().discover('.')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def cov():
"""Runs the unit tests with coverage."""
cov = coverage.coverage(
branch=True,
include='project/*'
)
cov.start()
tests = unittest.TestLoader().discover('.')
unittest.TextTestRunner(verbosity=2).run(tests)
cov.stop()
cov.save()
print ('Coverage Summary:')
cov.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'coverage')
cov.html_report(directory=covdir)
cov.erase()
if __name__ == '__main__':
manager.run()
| [
"i.lokesh@gmail.com"
] | i.lokesh@gmail.com |
fc209154dfb2a5e90e53f5ea78fe360348b50cb5 | 4c28bbe099672d9402b4c2c75625fc450b7d1c99 | /misc/formatter.py | 09180ed9b437614ea97a03ea48f812b72e64672a | [
"MIT"
] | permissive | GINK03/lightgbm-feature-transform | e1edaf1d3dc541f816f844b280335b8d4d18fdfe | 18c0eaf64ef159d9c5cc041244b45bbea7314ca8 | refs/heads/master | 2021-07-22T15:46:17.221420 | 2017-11-07T15:10:40 | 2017-11-07T15:10:40 | 107,000,998 | 12 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | import re
import json
lines = []
for line in open('./gbdt_prediction_formatted.cpp'):
line = line[:-1]
line = line.replace(' {', ':')
line = line.replace('} else', 'else')
line = re.sub(r'^double', 'def', line)
line = line.replace('const double *arr', 'arr')
line = line.replace('const std::vector<uint32_t> cat_threshold = {};', '')
line = line.replace('double ', '')
line = line.replace('0.0f', '0.0')
if 'const' in line:
continue
if 'namespace' in line:
continue
if len(line) >= 1:
if line[-1] == '}':
...
else:
lines.append( line )
text = '\n'.join(lines)
text = re.sub(r'arr.*?\n.*?\[', 'arr[', text, flags=re.MULTILINE)
text = re.sub(r'=.*?\n\s{1,}?arr', '= arr', text, flags=re.MULTILINE)
text = re.sub(r'<=\s{1,}?\n\s{1,}?0', '<= 0', text, flags=re.MULTILINE)
text = re.sub(r'<=\s{1,}?\n\s{1,}?1', '<= 1', text, flags=re.MULTILINE)
print(text)
| [
"gim.kobayashi@gmail.com"
] | gim.kobayashi@gmail.com |
dcb44358e58a964fba44912fffb44036d3b53921 | 7db9f50e35b13bb2d5a7fcf564e841be1961b937 | /djangocms_teaser/migrations_django/0001_initial.py | 9e4b8c0bb3268b4bc0f680f15ca6b2fc2cab8eb4 | [] | no_license | tanderegg/djangocms-teaser | 3df391b42931b4cb5a2fd067d88da36e3defa20b | 29b09fbb921de052c9bf7a1e3bda8ed0a8b1fa56 | refs/heads/master | 2020-04-02T22:26:34.614574 | 2014-08-21T17:24:28 | 2014-08-21T17:24:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cms.models.pluginmodel
class Migration(migrations.Migration):
dependencies = [
('cms', '__first__'),
]
operations = [
migrations.CreateModel(
name='Teaser',
fields=[
('title', models.CharField(max_length=255, verbose_name='title')),
('image', models.ImageField(upload_to=cms.models.pluginmodel.get_plugin_media_path, null=True, verbose_name='image', blank=True)),
('url', models.CharField(help_text='If present image will be clickable.', max_length=255, null=True, verbose_name='link', blank=True)),
('description', models.TextField(null=True, verbose_name='description', blank=True)),
('cmsplugin_ptr', models.OneToOneField(auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('page_link', models.ForeignKey(blank=True, to='cms.Page', help_text='If present image will be clickable', null=True, verbose_name='page')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| [
"i.spalletti@nephila.it"
] | i.spalletti@nephila.it |
f430ca748728f396149648eb89ce978d127ee811 | 26f8a8782a03693905a2d1eef69a5b9f37a07cce | /test/test_dictionary_component_response_ofint64_and_destiny_item_talent_grid_component.py | bbf9b4777cea26e0cf7a3694065e35cbc650d67d | [] | no_license | roscroft/openapi3-swagger | 60975db806095fe9eba6d9d800b96f2feee99a5b | d1c659c7f301dcfee97ab30ba9db0f2506f4e95d | refs/heads/master | 2021-06-27T13:20:53.767130 | 2017-08-31T17:09:40 | 2017-08-31T17:09:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | # coding: utf-8
"""
Bungie.Net API
These endpoints constitute the functionality exposed by Bungie.net, both for more traditional website functionality and for connectivity to Bungie video games and their related functionality.
OpenAPI spec version: 2.0.0
Contact: support@bungie.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.dictionary_component_response_ofint64_and_destiny_item_talent_grid_component import DictionaryComponentResponseOfint64AndDestinyItemTalentGridComponent
class TestDictionaryComponentResponseOfint64AndDestinyItemTalentGridComponent(unittest.TestCase):
""" DictionaryComponentResponseOfint64AndDestinyItemTalentGridComponent unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testDictionaryComponentResponseOfint64AndDestinyItemTalentGridComponent(self):
"""
Test DictionaryComponentResponseOfint64AndDestinyItemTalentGridComponent
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.dictionary_component_response_ofint64_and_destiny_item_talent_grid_component.DictionaryComponentResponseOfint64AndDestinyItemTalentGridComponent()
pass
if __name__ == '__main__':
unittest.main()
| [
"adherrling@gmail.com"
] | adherrling@gmail.com |
a1d45b4a1306f1720ace5d28dab4653d0d2a6868 | bd02997a44218468b155eda45dd9dd592bb3d124 | /baekjoon_2108.py | cb9102297f1407ace276cbdd2e7a5b699dbacc1a | [] | no_license | rheehot/ProblemSolving_Python | 88b1eb303ab97624ae6c97e05393352695038d14 | 4d6dc6aea628f0e6e96530646c66216bf489427f | refs/heads/master | 2023-02-13T03:30:07.039231 | 2021-01-04T06:04:11 | 2021-01-04T06:04:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | '''
Problem Solving Baekjoon 2108
Author: Injun Son
Date: September 5, 2020
'''
from collections import deque
import sys
from itertools import combinations
import math
import math
input = sys.stdin.readline
N = int(input())
arr = [int(input()) for _ in range(N)]
count_arr = [0]*8001
tmp_sum = 0
tmp_max = -1*sys.maxsize
tmp_min = sys.maxsize
for i in range(len(arr)):
tmp_sum += arr[i]
count_arr[arr[i]+4000]+=1
if arr[i] > tmp_max:
tmp_max = arr[i]
if arr[i] < tmp_min:
tmp_min = arr[i]
mode_list = []
for i in range(0, 8001):
if count_arr[i]!=0:
mode_list.append([i-4000, count_arr[i]])
mode_list = sorted(mode_list, key = lambda x: (-x[1], x[0]), )
arr.sort()
# print(arr)
# print(mode_list)
'''
첫째 줄에는 산술평균을 출력한다. 소수점 이하 첫째 자리에서 반올림한 값을 출력한다.
둘째 줄에는 중앙값을 출력한다.
셋째 줄에는 최빈값을 출력한다. 여러 개 있을 때에는 최빈값 중 두 번째로 작은 값을 출력한다.
넷째 줄에는 범위를 출력한다.
'''
print(round(tmp_sum/len(arr)))
print(arr[len(arr)//2])
if len(mode_list)>=2 and mode_list[0][1]==mode_list[1][1]:
print(mode_list[1][0])
else:
print(mode_list[0][0])
print(tmp_max - tmp_min) | [
"ison@sfu.ca"
] | ison@sfu.ca |
636406ddd942396e9b6aedb4bdaaba02783aa51f | c0ad282ab743a315e2f252a627933cb168434c1d | /shapeworld/torch_util.py | fa6ad12567eb305a5acdeee76fecf0a4791ff2fd | [
"MIT"
] | permissive | AlexKuhnle/ShapeWorld | 6d1e16adc94e860abae99ade869f72575f573bc4 | e720bf46e57fc01326d04d639fa6133d9c12158f | refs/heads/master | 2021-07-09T00:02:33.808969 | 2021-04-19T11:10:52 | 2021-04-19T11:10:52 | 80,815,972 | 58 | 28 | MIT | 2021-04-19T11:10:53 | 2017-02-03T09:40:19 | Python | UTF-8 | Python | false | false | 2,806 | py | import sys
import numpy as np
import torch.utils.data
class ShapeWorldDataset(torch.utils.data.Dataset):
def __init__(self, dataset, mode=None, include_model=False, epoch=False, is_channels_first=True, preprocessing=None):
super(ShapeWorldDataset, self).__init__()
self.dataset = dataset
self.mode = mode
self.include_model = include_model
self.epoch = epoch
if self.epoch:
self.dataset.random_sampling = False
self.is_channels_first = is_channels_first
self.preprocessing = dict() if preprocessing is None else preprocessing
self.initialize_iterator()
self.index = -1
def initialize_iterator(self):
if self.epoch:
self.iterator = self.dataset.epoch(n=1, mode=self.mode, include_model=self.include_model, alternatives=False)
else:
self.iterator = self.dataset.iterate(n=1, mode=self.mode, include_model=self.include_model, alternatives=False)
def __getitem__(self, index):
self.index += 1
assert index == self.index, 'random shuffling invalid: ' + str((index, self.index, self, self.mode))
try:
generated = next(self.iterator)
for value_name, value in generated.items():
if self.is_channels_first and (self.dataset.values[value_name] == 'world' or value_name.endswith('_features')):
generated[value_name] = np.transpose(value[0], axes=(2, 0, 1))
else:
generated[value_name] = value[0]
for value_name, preprocessing in self.preprocessing.items():
generated[value_name] = preprocessing(generated[value_name])
return {value_name: value for value_name, value in generated.items()}
except StopIteration:
self.initialize_iterator()
self.index = -1
return None
def __len__(self):
return sys.maxsize
def __add__(self, other):
raise NotImplementedError
class ShapeWorldDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size=1, num_workers=0):
assert isinstance(dataset, ShapeWorldDataset)
super(ShapeWorldDataLoader, self).__init__(dataset=dataset, batch_size=batch_size, num_workers=num_workers)
def __iter__(self):
self.sample_iter = iter(self.batch_sampler)
while True:
indices = next(self.sample_iter)
batch = list()
for i in indices:
instance = self.dataset[i]
if instance is None:
break
batch.append(instance)
yield self.collate_fn(batch)
if instance is None:
break
def __len__(self):
return sys.maxsize
| [
"aok25@cl.cam.ac.uk"
] | aok25@cl.cam.ac.uk |
9c55f255b57e0eaf9ce3639bd0dd51cd1eb40334 | deec7cdd0916936f71a194616060f2935291dba1 | /examples/05_logging/null_handler/connect_ssh.py | f46d7698d5180a2d54819386a56cf94bd7067e8d | [] | no_license | denjjack/advpyneng-examples-exercises | 938077e0e0d2265481746d894e970eb5688fc235 | 50871f0f228bb309f99ddee7ca56de25da0bfa1b | refs/heads/master | 2022-12-15T02:43:50.148470 | 2020-09-11T11:51:38 | 2020-09-11T11:51:38 | 294,704,149 | 1 | 0 | null | 2020-09-11T13:34:23 | 2020-09-11T13:34:22 | null | UTF-8 | Python | false | false | 194 | py | from base_ssh_class import BaseSSH
import logging
logging.basicConfig(level=logging.DEBUG)
r1 = BaseSSH("192.168.100.1", "cisco", "cisco")
print(r1.send_show_command("sh clock"))
r1.close()
| [
"nataliya.samoylenko@gmail.com"
] | nataliya.samoylenko@gmail.com |
09c1ec7ba50d187876c91d783b241ad7c7c92641 | 75d8667735782cd1d0eb4877e52c89da5cd92dde | /nova/conductor/tasks/base.py | 170e5ebdf37200c833e06619b72a68d7ef3d0ae3 | [
"Apache-2.0"
] | permissive | bopopescu/nova-token | ffecfd3ec561936b7d9d7e691bc57383cde05436 | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | refs/heads/master | 2022-11-22T09:53:31.073483 | 2016-05-14T02:47:01 | 2016-05-15T22:02:55 | 282,105,621 | 0 | 0 | Apache-2.0 | 2020-07-24T02:42:19 | 2020-07-24T02:42:18 | null | UTF-8 | Python | false | false | 3,441 | py | begin_unit
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'abc'
newline|'\n'
name|'import'
name|'functools'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_utils'
name|'import'
name|'excutils'
newline|'\n'
name|'import'
name|'six'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|rollback_wrapper
name|'def'
name|'rollback_wrapper'
op|'('
name|'original'
op|')'
op|':'
newline|'\n'
indent|' '
op|'@'
name|'functools'
op|'.'
name|'wraps'
op|'('
name|'original'
op|')'
newline|'\n'
DECL|function|wrap
name|'def'
name|'wrap'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'original'
op|'('
name|'self'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'Exception'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'excutils'
op|'.'
name|'save_and_reraise_exception'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'rollback'
op|'('
op|')'
newline|'\n'
dedent|''
dedent|''
dedent|''
name|'return'
name|'wrap'
newline|'\n'
nl|'\n'
nl|'\n'
dedent|''
op|'@'
name|'six'
op|'.'
name|'add_metaclass'
op|'('
name|'abc'
op|'.'
name|'ABCMeta'
op|')'
newline|'\n'
DECL|class|TaskBase
name|'class'
name|'TaskBase'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
nl|'\n'
DECL|member|__init__
indent|' '
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
name|'context'
op|','
name|'instance'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'context'
op|'='
name|'context'
newline|'\n'
name|'self'
op|'.'
name|'instance'
op|'='
name|'instance'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'rollback_wrapper'
newline|'\n'
DECL|member|execute
name|'def'
name|'execute'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Run task\'s logic, written in _execute() method\n """'
newline|'\n'
name|'return'
name|'self'
op|'.'
name|'_execute'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'abc'
op|'.'
name|'abstractmethod'
newline|'\n'
DECL|member|_execute
name|'def'
name|'_execute'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Descendants should place task\'s logic here, while resource\n initialization should be performed over __init__\n """'
newline|'\n'
name|'pass'
newline|'\n'
nl|'\n'
DECL|member|rollback
dedent|''
name|'def'
name|'rollback'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Rollback failed task\n Descendants should implement this method to allow task user to\n rollback status to state before execute method was call\n """'
newline|'\n'
name|'pass'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| [
"dmg@uvic.ca"
] | dmg@uvic.ca |
1b84a43972de2a1222d45566cf6969e535176b4b | 0e0ce88c886370df9af51855115c99dfc003e5da | /2012/04_Sistema_de_Atomos/func.Curve/color_junto.py | 71bce10f2562d2ac7869dad6a0f355e2f20b019f | [] | no_license | miguelzeph/Python_Git | ed80db9a4f060836203df8cc2e42e003b0df6afd | 79d3b00236e7f4194d2a23fb016b43e9d09311e6 | refs/heads/master | 2021-07-08T18:43:45.855023 | 2021-04-01T14:12:23 | 2021-04-01T14:12:23 | 232,007,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | from __future__ import division
from visual import *
c= curve( pos=[(-1,0,0)], color=color.blue,radius=1)
c.append( pos=(0,0,0) ) # add a blue point (optional)
c.append( pos=(0,0,0), color=color.orange) # same point
c.append( pos=(1,0,0) ) # add orange point | [
"miguel.junior.mat@hotmail.com"
] | miguel.junior.mat@hotmail.com |
24593538760a1009040733cb6b22ec4ed9378e7c | 0308ce9bc6772a9c49b4f6ff4b73a9a31b9ad33e | /source/django-oscar/tests/settings.py | 9cbdfebebdc619e0c15f36a191516da7942aeb4b | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | SpivEgin/devenv | 27a9211c68293b16578fe3959578be822da7e1ca | 8c89aa77a38d6a0e915d99e02afcc5ae46fc39f3 | refs/heads/master | 2023-01-20T21:00:20.978045 | 2017-05-19T03:55:36 | 2017-05-19T03:55:36 | 91,119,096 | 1 | 1 | null | 2023-01-12T06:51:14 | 2017-05-12T18:27:47 | Python | UTF-8 | Python | false | false | 4,252 | py | import os
from django import VERSION as DJANGO_VERSION
import oscar
from oscar.defaults import * # noqa
# Path helper
location = lambda x: os.path.join(os.path.dirname(os.path.realpath(__file__)), x)
ALLOWED_HOSTS = ['test', '.oscarcommerce.com']
DATABASES = {
'default': {
'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.postgresql_psycopg2'),
'NAME': os.environ.get('DATABASE_NAME', 'oscar'),
}
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.staticfiles',
'widget_tweaks',
# contains models we need for testing
'tests._site.model_tests_app',
'tests._site.myauth',
# Use a custom partner app to test overriding models. I can't
# find a way of doing this on a per-test basis, so I'm using a
# global change.
] + oscar.get_core_apps(['tests._site.apps.partner', 'tests._site.apps.customer'])
AUTH_USER_MODEL = 'myauth.User'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
location('_site/templates'),
oscar.OSCAR_MAIN_TEMPLATE_DIR,
],
'OPTIONS': {
'loaders': [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
],
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
'oscar.apps.search.context_processors.search_form',
'oscar.apps.customer.notifications.context_processors.notifications',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
]
}
}
]
if DJANGO_VERSION < (1, 10):
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
]
else:
MIDDLEWARE = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
]
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.EmailBackend',
'django.contrib.auth.backends.ModelBackend',
)
HAYSTACK_CONNECTIONS = {'default': {'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'}}
PASSWORD_HASHERS = ['django.contrib.auth.hashers.MD5PasswordHasher']
ROOT_URLCONF = 'tests._site.urls'
LOGIN_REDIRECT_URL = '/accounts/'
STATIC_URL = '/static/'
DEBUG = False
SITE_ID = 1
USE_TZ = 1
APPEND_SLASH = True
DDF_DEFAULT_DATA_FIXTURE = 'tests.dynamic_fixtures.OscarDynamicDataFixtureClass'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
LANGUAGE_CODE = 'en-gb'
# temporary workaround for issue in sorl-thumbnail in Python 3
# https://github.com/mariocesar/sorl-thumbnail/pull/254
THUMBNAIL_DEBUG = False,
OSCAR_INITIAL_ORDER_STATUS = 'A'
OSCAR_ORDER_STATUS_PIPELINE = {'A': ('B',), 'B': ()}
OSCAR_INITIAL_LINE_STATUS = 'a'
OSCAR_LINE_STATUS_PIPELINE = {'a': ('b', ), 'b': ()}
SECRET_KEY = 'notverysecret'
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
| [
"spivegin@txtsme.com"
] | spivegin@txtsme.com |
da48712a86cf4e6e34fa836703e6b99e8b23ddda | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /x44ZRvQtJ6TyZQhwx_20.py | f70bec02e1d036caf0e6764bad719e56541eceeb | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py |
def is_pandigital(n):
holder = []
for num in str(n):
holder.append(num)
unique = list(map(int, list(dict.fromkeys(holder))))
values_to_check = list(range(0, 10))
result = all(elem in unique for elem in values_to_check)
return(result)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
0b4fa5edf61e03ebd315420faa11e09cf3b22720 | 564b9c85088ad5f3dd5929fb617f04086967558c | /m_f_CWproj/m_f_app/views.py | 249c133416c8d61b51313d7ea33c903c39fbd650 | [] | no_license | cs-fullstack-2019-spring/django-modelform1-cw-Joshtg1104 | 044b760627f350d1657470b30d1bb5ece5aed203 | dd96648f9b5f019b1cbf6deb198f63af3dc29915 | refs/heads/master | 2020-04-25T18:08:01.539136 | 2019-03-01T19:46:32 | 2019-03-01T19:46:32 | 172,974,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | from django.http import HttpResponse
from django.shortcuts import render
from .forms import NewPostForm
# Create your views here.
# index function that applies the form "NewPostForm" to newPost.html
def index(request):
newPost = NewPostForm()
if request.method == "POST":
print("It works")
newPost = NewPostForm(request.POST)
if newPost.is_valid():
newPost.save(commit=True)
return render(request, "m_f_app/blogEntry.html") # render blogEntry.html once post is entered
else:
context = {
"posts": newPost
}
return render(request, "m_f_app/newPost.html", context)
| [
"joshtg1104@gmail.com"
] | joshtg1104@gmail.com |
c23eb8db332afec90a2ef03172fdfaffb27a4130 | 5cbd6ef31c94f671c76e49d4c8ea607df157ac02 | /tests/test_colors.py | 2e97602836ddf342aaabff90faf2d5f519541dd8 | [
"MIT"
] | permissive | brianbruggeman/kelte | 5e182370f1d6b88cd9e262c06c075926870181f6 | 6dd8a53da07697ffc87e62aa397be7b3b08f0aa0 | refs/heads/master | 2020-03-20T23:17:58.841258 | 2018-07-10T21:53:18 | 2018-07-24T15:55:48 | 137,839,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,950 | py | import pytest
@pytest.mark.parametrize(
"data",
[
{ # default
"kwds": {},
"expected": {
"red": 0,
"green": 0,
"blue": 0,
"alpha": 255,
"hex": "00" * 3,
"hexa": "00" * 3 + "ff",
},
},
{ # black
"attrs": {"hexa": "000000ff"},
"expected": {
"red": 0,
"green": 0,
"blue": 0,
"alpha": 255,
"hex": "00" * 3,
"hexa": "00" * 3 + "ff",
},
},
{ # white
"kwds": {"r": 1.0, "g": 1.0, "b": 1.0, "a": 1.0},
"expected": {
"red": 255,
"green": 255,
"blue": 255,
"alpha": 255,
"hex": "ff" * 3,
"hexa": "ff" * 4,
},
},
],
)
def test_color_data_class(data):
from kelte.colors import Color
kwds = data.get("kwds")
attrs = data.get("attrs")
expected = data["expected"]
if kwds:
c = Color(**kwds)
else:
c = Color()
if attrs:
for attr_name, attr_value in attrs.items():
try:
setattr(c, attr_name, attr_value)
except AttributeError:
c.__dict__[attr_name].__setattr__("value", attr_value)
for key, value in expected.items():
assert getattr(c, key) == value
assert c == tuple(c)
@pytest.mark.parametrize(
"data",
[
{ # default
"name": "red",
"args": (255, 0, "00", 1.0),
"named_tdl_color": "red",
"named_args": (255, 0, 0, 255),
}
],
)
def test_tdl_color_integration(data):
import tcod as tdl
from kelte.colors import Color
# Get
name = data["name"]
args = data["args"]
tdl_color = getattr(tdl, name)
c = Color(*args)
assert tdl_color == c.tdl_color
# Set
named_tdl_color = data["named_tdl_color"]
named_args = data["named_args"]
named_tdl_color = getattr(tdl, named_tdl_color)
c.tdl_color = named_tdl_color
assert c == Color(*named_args)
assert c == named_tdl_color
@pytest.mark.parametrize(
"data",
[
{"args": [None], "raises": TypeError, "expected": None}, # None
{"kwds": {"value": 1.0}, "raises": None, "expected": 1.0}, # float
{"kwds": {"value": 255}, "raises": None, "expected": 1.0}, # integer
{"args": ["ff"], "raises": None, "expected": 1.0}, # string
{ # out of range integer
"kwds": {"value": 256},
"raises": ValueError,
"expected": None,
},
{ # out of range float
"kwds": {"value": 1.1},
"raises": ValueError,
"expected": None,
},
],
)
def test_convert(data):
from kelte.colors import _convert
args = data.get("args", [])
kwds = data.get("kwds", {})
raises = data.get("raises")
expected = data.get("expected")
def run(*args, **kwds):
if args and kwds:
result = _convert(*args, **kwds)
elif args:
result = _convert(*args)
elif kwds:
result = _convert(**kwds)
else:
raise RuntimeError("No data")
assert result == expected
if not raises:
run(*args, **kwds)
else:
with pytest.raises(raises):
run(*args, **kwds)
def test_get_color():
import tcod as tdl
from kelte.colors import get_color
tdl_colors = {
d: getattr(tdl, d) for d in dir(tdl) if isinstance(getattr(tdl, d), tdl.Color)
}
for tdl_color_name, tdl_color in tdl_colors.items():
kelte_color = get_color(tdl_color_name)
assert kelte_color == tdl_color
if __name__ == "__main__":
import sys
pytest.main(sys.argv)
| [
"Brian.M.Bruggeman@gmail.com"
] | Brian.M.Bruggeman@gmail.com |
dd2ad3e05f47b691c6e78059e89ea5774dccd80d | 6559138a129678027dc82c059330cfaa14d61897 | /src/0.py | 8065041b974a6cf118ce1091864cc2ab446a4da4 | [
"CC0-1.0"
] | permissive | ytyaru/Python.curses.20210607091706 | 7c9b449920ba426a5a5bd7d739efc6f071007b38 | 50a38593cbe71da4d8c667a3afa8be0b36ee8f02 | refs/heads/master | 2023-05-31T13:19:40.736629 | 2021-06-09T04:05:11 | 2021-06-09T04:05:11 | 374,500,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | #!/usr/bin/env python3
# coding: utf8
# 色をセットする。
import os, curses
def main(stdscr):
if not curses.has_colors(): raise Exception('このターミナルは色を表示できません。')
if not curses.can_change_color(): raise Exception('このターミナルは色を変更できません。')
curses.use_default_colors()
for i in range(1, curses.COLORS):
curses.init_pair(i, i-1, -1)
curses.init_pair(1, 0, 15) # curses.COLOR_BLACK, curses.COLOR_WHITE
try:
for i in range(1, curses.COLORS):
stdscr.addstr(str(i).rjust(3), curses.A_REVERSE | curses.color_pair(i))
except curses.ERR: pass
stdscr.refresh()
stdscr.getkey()
if __name__ == "__main__":
os.environ['TERM'] = 'xterm-256color'
curses.wrapper(main)
| [
"yttry0@gmail.com"
] | yttry0@gmail.com |
ad040adb2f5a1b4fa3eb0e8111e40362616bd91a | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Trigger/TrigSteer/TrigSteering/share/pureSteering_menu_with_multi_seeding.py | 29a92c02af4ab242863feb1cbb6a7965ae498cdf | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,667 | py | from TriggerMenu.TriggerConfigLVL1 import TriggerConfigLVL1
from TriggerMenu.l1.Lvl1Flags import Lvl1Flags
from TriggerMenu.l1.Lvl1MenuItems import LVL1MenuItem
from TriggerMenu.l1.TriggerTypeDef import TT
from TriggerMenu.l1.Lvl1Condition import ThrCondition, Lvl1InternalTrigger
from TriggerMenu.l1.Logic import Logic
# L1 necessary
Lvl1Flags.thresholds = ['EM3','EM25i']
Lvl1Flags.items = ['L1_EM3','L1_EM25i']
l1menu = TriggerConfigLVL1(outputFile = 'l1menu.xml', menuName = 'menutest', topoMenu = None)
LVL1MenuItem.l1configForRegistration = l1menu
bgrp = Logic(Lvl1InternalTrigger('BGRP0')) & Logic(Lvl1InternalTrigger('BGRP1'))
thr = l1menu.registerThr('EM3','EM').addThrValue(3)
LVL1MenuItem('L1_EM3').setLogic( ThrCondition(thr) & bgrp ).setTriggerType( TT.calo )
thr = l1menu.registerThr('EM25i','EM').addThrValue(25)
LVL1MenuItem('L1_EM25i').setLogic( ThrCondition(thr) & bgrp ).setTriggerType( TT.calo )
from TriggerMenu.menu.TriggerPythonConfig import TriggerPythonConfig
from TriggerMenu.menu.HLTObjects import HLTChain
menu = TriggerPythonConfig('pureSteering_menu.xml')
from TrigSteeringTest.TrigSteeringTestConf import PESA__dummyAlgo, PESA__dummyHypo, PESA__newDummyAlgo2To1, PESA__newDummyAlgoAllTEs, PESA__dummyAlgoHLTErrorCode, PESA__dummyMonitoredFex, PESA__dummyAlgoForCalibration
Egamma = PESA__dummyAlgo('Egamma_L2') # create configurable instance
Egamma2 = PESA__dummyAlgo('Egamma2_L2') # create configurable another instance
EgammaAdv3 = PESA__dummyAlgo("EgammaAdv3")
EgammaAdv_L2 = PESA__dummyAlgo("EgammaAdv_L2")
menu.addSequence("EM15i" , [ Egamma, Egamma2, EgammaAdv3 ] , "em15i" ) # use bot here, mixing with old fassioned way
menu.addSequence("em15i" , EgammaAdv_L2, "em15i'" ) # create configurable on the fly
# chains
# passage based on EM3
chain = HLTChain( chain_name="L2_Ored", chain_counter="100", lower_chain_name="L1_MU6,L1_EM25i", level="HLT", prescale="1", pass_through="0")
chain.addStreamTag('electrons', prescale='1', obeyLB="0")
chain.addGroup("electrons")
menu.addHLTChain(chain)
chain = HLTChain( chain_name="L2_e25i", chain_counter="101", lower_chain_name="L1_EM25i", level="HLT", prescale="1", pass_through="0")
chain.addStreamTag('electrons', prescale='1', obeyLB="0")
chain.addGroup("electrons")
menu.addHLTChain(chain)
chain = HLTChain( chain_name="L2_mu6", chain_counter="102", lower_chain_name="L1_MU6", level="HLT", prescale="1", pass_through="0")
chain.addStreamTag('electrons', prescale='1', obeyLB="0")
chain.addGroup("electrons")
menu.addHLTChain(chain)
menu.writeConfigFiles();
menu.dot(algs=True)
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
a149115e46739b848bda41b1dcf90d87e52bf605 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_13371.py | 322e85169c6dc9863b7ef4ed1e782d6d7ded750a | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | # How to implemente oauth process in a different window and then reload caller window
window.parent.location.reload();
window.close();
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
9f71501b969443ac8f8eb704fe52c12ef0ec0c03 | 6e9c9128054da7eea28a4627381df28f95416ee5 | /finance_ml/labeling/trend.py | 5f114e57c71fe6fb5efa361de0ecb43606c8b615 | [
"MIT"
] | permissive | BTCTON/finance_ml | c5a4ad2486608ad19c92c04c70fe513be135c236 | a585be2d04db5a749eb6b39b7336e5aeb30d6327 | refs/heads/master | 2021-12-23T07:53:13.791609 | 2021-10-15T01:47:41 | 2021-10-15T01:47:41 | 158,898,508 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,671 | py | import numpy as np
import pandas as pd
import multiprocessing as mp
import statsmodels.api as sm
from ..multiprocessing import mp_pandas_obj
def t_val_linreg(close):
x = np.ones((close.shape[0], 2))
x[:, 1] = np.arange(close.shape[0])
ols = sm.OLS(close, x).fit()
return ols.tvalues[1]
def _get_bins_from_trend(molecule, close, min_step, max_step, step):
out = pd.DataFrame(index=molecule, columns=['t1', 't_val','bin'])
hrzns = list(range(min_step, max_step + 1, step))
for dt0 in molecule:
iloc0 = close.index.get_loc(dt0)
if iloc0 + max(hrzns) > close.shape[0]:
continue
df0 = pd.Series()
for hrzn in hrzns:
dt1 = close.index[iloc0 + hrzn - 1]
df1 = close.loc[dt0:dt1]
df0.loc[dt1] = t_val_linreg(df1.values)
# Get maximum tstats point
dt1 = df0.replace([-np.inf, np.inf, np.nan], 0).abs().idxmax()
out.loc[dt0, ['t1', 't_val', 'bin']] = df0.index[-1], df0[dt1], np.sign(df0[dt1])
out['t1'] = pd.to_datetime(out['t1'])
out['bin'] = pd.to_numeric(out['bin'], downcast='signed')
return out.dropna(subset=['bin'])
def get_bins_from_trend(close, max_step, min_step=3, step=1, num_threads=None):
if num_threads is None:
num_threads = mp.cpu_count()
output = mp_pandas_obj(func=_get_bins_from_trend,
pd_obj=('molecule', close.index),
num_threads=num_threads,
close=close,
max_step=max_step,
min_step=min_step,
step=step)
return output
| [
"f.j.akimoto@gmail.com"
] | f.j.akimoto@gmail.com |
796050d7fcc74771bea6bbb5c4dcb549237f4767 | 88f7a8c58de6003722ec45a0ad752023470a2121 | /snippets/logger.py | fdbfbc110e6c46ea24078264cf0f075ce6dd0b5b | [
"MIT"
] | permissive | boada/planckClusters | 5f90fcdddb13040a6ba50ef8d24eac390ae6e476 | 731be562e9e91e4468f5a90d8f161a4e9593fcc8 | refs/heads/master | 2021-01-13T14:30:19.473845 | 2019-12-18T21:19:43 | 2019-12-18T21:19:43 | 74,033,781 | 3 | 0 | null | 2017-09-05T15:09:18 | 2016-11-17T14:18:33 | Python | UTF-8 | Python | false | false | 1,452 | py | """
These functions can be used for logging information.
.. Warning:: logger is not multiprocessing safe.
:author: Sami-Matias Niemi
:contact: smn2@mssl.ucl.ac.uk
:version: 0.3
"""
from __future__ import print_function
from builtins import object
import logging
import logging.handlers
def setUpLogger(log_filename, loggername='logger'):
"""
Sets up a logger.
:param: log_filename: name of the file to save the log.
:param: loggername: name of the logger
:return: logger instance
"""
# create logger
logger = logging.getLogger(loggername)
logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(log_filename)
#maxBytes=20, backupCount=5)
# create formatter
formatter = logging.Formatter(
'%(asctime)s - %(module)s - %(funcName)s - %(levelname)s - %(message)s')
# add formatter to ch
handler.setFormatter(formatter)
# add handler to logger
logger.addHandler(handler)
return logger
class SimpleLogger(object):
"""
A simple class to create a log file or print the information on screen.
"""
def __init__(self, filename, verbose=False):
self.file = open(filename, 'w')
self.verbose = verbose
def write(self, text):
"""
Writes text either to file or screen.
"""
print(text, file=self.file)
if self.verbose: print(text)
| [
"stevenboada@gmail.com"
] | stevenboada@gmail.com |
56bbe72cc69782b3a1eb894bcbf4c83145739ce8 | 19907d6f47f7af804b0ddee642d98edd170d7f58 | /tests/test_param.py | 943303a993ebac47c916b34093468acd6d0c3503 | [
"MIT"
] | permissive | GuancongLuo/roslibpy | 3560080271d9df841a051abab402479fdcc895f2 | 92b97d6daa78d30384e3a347d46be51c6e7fbd01 | refs/heads/main | 2023-07-13T02:06:10.846579 | 2021-08-13T11:48:22 | 2021-08-13T11:48:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | from roslibpy import Param
from roslibpy import Ros
def test_param_manipulation():
ros = Ros('127.0.0.1', 9090)
ros.run()
param = Param(ros, 'test_param')
assert param.get() is None
param.set('test_value')
assert param.get() == 'test_value'
param.delete()
assert param.get() is None
ros.close()
| [
"casas@arch.ethz.ch"
] | casas@arch.ethz.ch |
09b864579ac3497d0486423317416ded477a0db9 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog_tags/initial_9331.py | 552551a0597c6aa2699bf45fd4d1323e76fb108c | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,332 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((565, 306, 449), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((962, 365, 656), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((508, 155, 114), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((684, 442, 968), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((865, 924, 616), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((975, 275, 494), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((73, 361, 504), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((980, 209, 831), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((337, 707, 802), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((925, 828, 933), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((633, 959, 252), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((382, 631, 801), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((701, 484, 387), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((288, 985, 78), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((732, 724, 98), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((383, 183, 918), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((749, 218, 45), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((534, 194, 633), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((768, 306, 997), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((925, 416, 300), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((464, 22, 225), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
6e30cc3df48b3e879295e8c3e9842e10d1b8cbbe | 67a7c314fc99d9cd7a677fcb6bc2b6dfa20a9cff | /feeds/admin.py | 1468de4b629f86c39aa9a3bf5e76bab219607b47 | [] | no_license | Xodarap/Eipi | 7ebbb9fd861fdb411c1e273ea5d2a088aa579930 | d30997a737912e38316c198531f7cb9c5693c313 | refs/heads/master | 2016-09-11T06:28:01.333832 | 2011-05-03T15:35:20 | 2011-05-03T15:35:20 | 1,367,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | from eipi2.feeds.models import Story, Source
from django.contrib import admin
#admin.site.register(Story)
admin.site.register(Source)
| [
"eipi@mybox.(none)"
] | eipi@mybox.(none) |
9a1fd6a0cc906e207694663ee5bd730c1dabfb9a | 4ae3b27a1d782ae43bc786c841cafb3ace212d55 | /Test_Slen/Pytest_proj/01/unit_proj1/unittest_code/test_calc.py | ad41eb73c03723bfdbcc20d36f7d612633eab79e | [] | no_license | bopopescu/Py_projects | c9084efa5aa02fd9ff6ed8ac5c7872fedcf53e32 | a2fe4f198e3ca4026cf2e3e429ac09707d5a19de | refs/heads/master | 2022-09-29T20:50:57.354678 | 2020-04-28T05:23:14 | 2020-04-28T05:23:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,838 | py | # Run all tests in the project
# (01) C:\Users\jsun\Documents\Py_projects\Pytest_proj\01\unit_proj1>python -m unittest discover
#
# run certain test files
# (01) C:\Users\jsun\Documents\Py_projects\Pytest_proj\01\unit_proj1>python -m unittest unittest_code.test_calc
#
# run a module in a file
# (01) C:\Users\jsun\Documents\Py_projects\Pytest_proj\01\unit_proj1>python -m unittest unittest_code.test_calc.CalculatorTest
import unittest
from dev_code.calc import Calculator
NUMBER_1 = 3.0
NUMBER_2 = 2.0
FAILURE = 'incorrect value'
class CalculatorTest(unittest.TestCase):
def setUp(self):
self.calc = Calculator()
def test_last_answer_init(self):
value = self.calc.last_answer
self.assertEqual(value, 0.0, FAILURE)
def test_add(self):
value = self.calc.add(NUMBER_1, NUMBER_2)
self.assertEqual(value, 5.0, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_subtract(self):
value = self.calc.subtract(NUMBER_1, NUMBER_2)
self.assertEqual(value, 1.0, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_subtract_negative(self):
value = self.calc.subtract(NUMBER_2, NUMBER_1)
self.assertEqual(value, -1.0, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_multiply(self):
value = self.calc.multiply(NUMBER_1, NUMBER_2)
self.assertEqual(value, 6.0, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_divide(self):
value = self.calc.divide(NUMBER_1, NUMBER_2)
self.assertEqual(value, 1.5, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_divide_by_zero(self):
self.assertRaises(ZeroDivisionError, self.calc.divide, NUMBER_1, 0) | [
"sunusd@yahoo.com"
] | sunusd@yahoo.com |
f464a13b1fcfdec9f6c272a83db482c04e00a7ea | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/darcymason_pydicom/pydicom-master/pydicom/examples/write_new.py | fa26c8a48166bc0bbc812d9e5803e25e98916302 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 2,901 | py | # write_new.py
"""Simple example of writing a DICOM file from scratch using pydicom.
This example does not produce a DICOM standards compliant file as written,
you will have to change UIDs to valid values and add all required DICOM data
elements
"""
# Copyright (c) 2010-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
from __future__ import print_function
import sys
import datetime
import os.path
import pydicom
from pydicom.dataset import Dataset, FileDataset
import pydicom.uid
if __name__ == "__main__":
print("---------------------------- ")
print("write_new.py example program")
print("----------------------------")
print("Demonstration of writing a DICOM file using pydicom")
print("NOTE: this is only a demo. Writing a DICOM standards compliant file")
print("would require official UIDs, and checking the DICOM standard to ensure")
print("that all required data elements were present.")
print()
if sys.platform.lower().startswith("win"):
filename = r"c:\temp\test.dcm"
filename2 = r"c:\temp\test-explBig.dcm"
else:
homedir = os.path.expanduser("~")
filename = os.path.join(homedir, "test.dcm")
filename2 = os.path.join(homedir, "test-explBig.dcm")
print("Setting file meta information...")
# Populate required values for file meta information
file_meta = Dataset()
file_meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.2' # CT Image Storage
file_meta.MediaStorageSOPInstanceUID = "1.2.3" # !! Need valid UID here for real work
file_meta.ImplementationClassUID = "1.2.3.4" # !!! Need valid UIDs here
print("Setting dataset values...")
# Create the FileDataset instance (initially no data elements, but file_meta supplied)
ds = FileDataset(filename, {}, file_meta=file_meta, preamble=b"\0" * 128)
# Add the data elements -- not trying to set all required here. Check DICOM standard
ds.PatientName = "Test^Firstname"
ds.PatientID = "123456"
# Set the transfer syntax
ds.is_little_endian = True
ds.is_implicit_VR = True
# Set creation date/time
dt = datetime.datetime.now()
ds.ContentDate = dt.strftime('%Y%m%d')
timeStr = dt.strftime('%H%M%S.%f') # long format with micro seconds
ds.ContentTime = timeStr
print("Writing test file", filename)
ds.save_as(filename)
print("File saved.")
# Write as a different transfer syntax
ds.file_meta.TransferSyntaxUID = pydicom.uid.ExplicitVRBigEndian # XXX shouldn't need this but pydicom 0.9.5 bug not recognizing transfer syntax
ds.is_little_endian = False
ds.is_implicit_VR = False
print("Writing test file as Big Endian Explicit VR", filename2)
ds.save_as(filename2)
| [
"659338505@qq.com"
] | 659338505@qq.com |
8e0824c850a042e87ea7c8ce05119a0b59d37eca | dd35833bead7de2f2ca7affd985ac0d345b2ab6e | /apps/trade/adminx.py | ab5b4d915f633a0a7dbb263fc9dd28104e07e87c | [] | no_license | lhsheild/MxShop | df14c11aa7457f304194ff099a35869d83f0d9a7 | 811be4dad55284e737c80ebd4d00c079837393f2 | refs/heads/master | 2020-05-27T16:23:00.578686 | 2019-09-10T08:09:01 | 2019-09-10T08:09:01 | 188,130,934 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | # trade/adminx.py
__author__ = 'derek'
import xadmin
from .models import ShoppingCart, OrderInfo, OrderGoods
class ShoppingCartAdmin(object):
list_display = ["user", "goods", "nums", ]
class OrderInfoAdmin(object):
list_display = ["user", "order_sn", "trade_no", "pay_status", "post_script", "order_mount",
"order_mount", "pay_time", "add_time"]
class OrderGoodsInline(object):
model = OrderGoods
exclude = ['add_time', ]
extra = 1
style = 'tab'
inlines = [OrderGoodsInline, ]
xadmin.site.register(ShoppingCart, ShoppingCartAdmin)
xadmin.site.register(OrderInfo, OrderInfoAdmin)
| [
"lhsheild@yahoo.com"
] | lhsheild@yahoo.com |
a1424d110e77f4bb77673e4f6a542829529d41b1 | de40d3fa8d8af0030556d27d6833f6a1a0e7700c | /baekjoon/3036py/a.py | 981dc22c90b5ec6aadaa481201e84db3e998b531 | [] | no_license | NeoMindStd/CodingLife | cd6a627209c0353f4855f09fd5dfef8da4bbfef6 | bcb6c3752f472e6a4f3b8f158d02bc3599dfcda3 | refs/heads/master | 2022-12-24T10:42:45.390085 | 2022-12-11T16:27:16 | 2022-12-11T16:27:16 | 191,797,634 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
input()
rings = list(map(int, input().split()))
for i in range(1, len(rings)):
g = gcd(rings[0], rings[i])
print("%d/%d" %(rings[0]//g, rings[i]//g))
| [
"dwj1996@naver.com"
] | dwj1996@naver.com |
3fb17157eabb72cf0582411fe8606ee38f2b47bc | 0d4ec25fb2819de88a801452f176500ccc269724 | /missing_words.py | f9dc6f23dfc2433a00eec544316e21f47b3862a2 | [] | no_license | zopepy/leetcode | 7f4213764a6a079f58402892bd0ede0514e06fcf | 3bfee704adb1d94efc8e531b732cf06c4f8aef0f | refs/heads/master | 2022-01-09T16:13:09.399620 | 2019-05-29T20:00:11 | 2019-05-29T20:00:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | def missingWords(s, t):
s = s.split()
t = t.split()
sl = len(s)
tl = len(t)
i = 0
j = 0
missing = []
while i<sl or j<tl:
if i<sl and j<tl and s[i] == t[j]:
i+=1
j+=1
else:
missing.append(s[i])
i+=1
return missing
s = "I am using hackerrank to improve programming"
t = "am hackerrank to improve"
print(missingWords(s, t)) | [
"rohithiitj@gmail.com"
] | rohithiitj@gmail.com |
6cf8cd691b13e24ce9df52d46259cef29fdf0c2d | 1fe8d4133981e53e88abf633046060b56fae883e | /venv/lib/python3.8/site-packages/keras/optimizer_v2/learning_rate_schedule.py | 44b502c0fd85e896c1bb8b0cb0755f152482e8d8 | [] | no_license | Akira331/flask-cifar10 | 6c49db8485038731ce67d23f0972b9574746c7a7 | 283e7a2867c77d4b6aba7aea9013bf241d35d76c | refs/heads/master | 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d64207cd938934798e16ea7cab234dc04101b6193775075e683965bd07f43ce7
size 38611
| [
"business030301@gmail.com"
] | business030301@gmail.com |
58d7bb7fcdd3c6a57d6f09ecd8bf6afd942d5ddc | 1d60c5a7b8ce6277bff514e376f79848f706344c | /Data Scientist with Python - Career Track /22. Machine Learning with the Experts: School Budgets/02. Creating a simple first model/04. Writing out your results to a csv for submission.py | 1ecf2dabc884d95e4128cdc5264f21ac5eb946f2 | [] | no_license | DidiMilikina/DataCamp | 338c6e6d3b4f5b6c541c1aba155a36e9ee24949d | 3bf2cf3c1430190a7f8e54efda7d50a5fd66f244 | refs/heads/master | 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,935 | py | '''
Writing out your results to a csv for submission
At last, you're ready to submit some predictions for scoring. In this exercise, you'll write your predictions to a .csv using the .to_csv() method on a pandas DataFrame. Then you'll evaluate your performance according to the LogLoss metric discussed earlier!
You'll need to make sure your submission obeys the correct format.
To do this, you'll use your predictions values to create a new DataFrame, prediction_df.
Interpreting LogLoss & Beating the Benchmark:
When interpreting your log loss score, keep in mind that the score will change based on the number of samples tested. To get a sense of how this very basic model performs, compare your score to the DrivenData benchmark model performance: 2.0455, which merely submitted uniform probabilities for each class.
Remember, the lower the log loss the better. Is your model's log loss lower than 2.0455?
Instructions
100 XP
Create the prediction_df DataFrame by specifying the following arguments to the provided parameters pd.DataFrame():
pd.get_dummies(df[LABELS]).columns.
holdout.index.
predictions.
Save prediction_df to a csv file called 'predictions.csv' using the .to_csv() method.
Submit the predictions for scoring by using the score_submission() function with pred_path set to 'predictions.csv'.
'''
SOLUTION
# Generate predictions: predictions
predictions = clf.predict_proba(holdout[NUMERIC_COLUMNS].fillna(-1000))
# Format predictions in DataFrame: prediction_df
prediction_df = pd.DataFrame(columns=pd.get_dummies(df[LABELS]).columns,
index=holdout.index,
data=predictions)
# Save prediction_df to csv
prediction_df.to_csv('predictions.csv')
# Submit the predictions for scoring: score
score = score_submission(pred_path='predictions.csv')
# Print score
print('Your model, trained with numeric data only, yields logloss score: {}'.format(score)) | [
"didimilikina8@gmail.com"
] | didimilikina8@gmail.com |
7901103214afbd0b66f90ed496afaa03613081e1 | 1f2b85cc95a1f65df81c0dec8bfb8d7e5652aa88 | /schema/sdss5db/catalogdb/sdss_qso/dr16/sdss_dr16_qso_load.py | aa63b00775fb326e5c4ad9f70ad4140c5bfbffa5 | [
"BSD-3-Clause"
] | permissive | sdss/sdssdb | 2f22a90c51e1281c154f6c518a1ce8951f84a5ce | f7d3ef3ccfecd87b50ce05cf6af5a564679f15f5 | refs/heads/main | 2023-09-01T14:51:05.374886 | 2023-08-30T20:23:10 | 2023-08-30T20:23:10 | 136,257,152 | 9 | 3 | BSD-3-Clause | 2023-09-14T16:31:47 | 2018-06-06T01:55:39 | Python | UTF-8 | Python | false | false | 912 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: José Sánchez-Gallego (gallegoj@uw.edu)
# @Date: 2020-07-17
# @Filename: sdss_dr16_qso_load.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import os
import astropy.table
from sdssdb.peewee.sdss5db import database
from sdssdb.utils.ingest import to_csv
assert database.connected
def main():
database.become_admin()
file_ = os.environ['CATALOGDB_DIR'] + '/sdss_qso/dr16q/DR16Q_v4.fits'
data = astropy.table.Table.read(file_)
data.meta = {}
data.rename_columns(data.colnames, list(map(lambda x: x.lower(), data.colnames)))
to_csv(data, file_ + '.csv', header=False, delimiter=',')
del data
cursor = database.cursor()
fileobj = open(file_ + '.csv')
cursor.copy_from(fileobj, 'catalogdb.sdss_dr16_qso', sep=',')
database.commit()
if __name__ == '__main__':
main()
| [
"gallegoj@uw.edu"
] | gallegoj@uw.edu |
458e5843502120422f19ee16994dd5abe8dbc752 | f3b233e5053e28fa95c549017bd75a30456eb50c | /ptp1b_input/L80/80-79_MD_NVT_rerun/set_7.py | 7458323d9005088ca69f07063dec0e7ea8f6b48f | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L80/MD_NVT_rerun/ti_one-step/80_79/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_7.in'
temp_pbs = filesdir + 'temp_7.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_7.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_7.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
7ace0aa7551fd6e52de05ba8a71f111252c72041 | 6ea5b91e7f14114bea0aadfd59178997c59cd75d | /mysite/settings.py | 515a48f15dedf8c785836a2b9b94ed1f90ba2cf3 | [
"MIT"
] | permissive | sharif-42/django-elasticsearch | 68fe15333f4a2ed69c66b73bdeb27a0a1a28a115 | 999ee3382ef2ddcfcc62bd7ba330323000f3f1bf | refs/heads/main | 2023-06-18T04:04:54.162123 | 2021-07-18T03:58:47 | 2021-07-18T03:58:47 | 386,169,552 | 0 | 0 | MIT | 2021-07-18T03:58:48 | 2021-07-15T05:03:33 | Python | UTF-8 | Python | false | false | 3,525 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-#qh_-(j9#^^$li$o*o0qt85ek3^1#_e4#^n93q!ef-16_*_k#a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
'rest_framework',
'django_elasticsearch_dsl',
]
LOCAL_APPS = [
'library',
'search',
]
INSTALLED_APPS = INSTALLED_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Elastic Search
ELASTICSEARCH_DSL = {
'default': {
'hosts': '127.0.0.1:9200'
},
} | [
"sharifulcsehstu@gmail.com"
] | sharifulcsehstu@gmail.com |
22d4d44bb76d79af871bf58881fbafb98270f5cf | 9390033cecf4247f5edc6551c39e9de39c04b036 | /python/django/new_project/new_project/settings.py | 337949d998a67723f8026da2805507ec234a3a9f | [] | no_license | JennaDalgety/coding-dojo | 4940bb853a9fcd706bc30f1615ff824fc7f7788d | d2647257a5e93652978c911d65e2a6ff97653392 | refs/heads/master | 2020-02-26T14:59:02.324328 | 2019-03-20T23:13:38 | 2019-03-20T23:13:38 | 67,653,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,130 | py | """
Django settings for new_project project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@ps_ai9jh@lz9+x9h)et$&@xscl(-!)u&*-h1e!jj(c1z8fu-c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.new_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'new_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'new_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"jdalgety@gmail.com"
] | jdalgety@gmail.com |
24d12eba01fefcbfab00c21bc1fade185faa78e6 | 6c8c6e5d952cd821dbf6661e71896a473494a8eb | /blog/admin.py | 1e6fb4d2db410c56734a47133101fd69c3095635 | [] | no_license | Lethons/mywebsite | fe2d8fae75f1228e309a847071a7d4182f7b005d | 3a26c764ae0fa4bb628b3ed3fad3f04784e78153 | refs/heads/master | 2020-03-21T12:59:25.712867 | 2018-06-29T09:04:24 | 2018-06-29T09:04:24 | 138,582,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | from django.contrib import admin
from .models import Tag, Blog
# Register your models here.
@admin.register(Blog)
class BlogAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'tag', 'publish_time', 'update_time')
fields = ('title', 'content', 'publish_time', 'author', 'tag')
list_per_page = 20
list_filter = ('tag',)
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ('tag',)
list_per_page = 20
| [
"lethons@163.com"
] | lethons@163.com |
cc487a546b0b4540e9263ae5b052b35e62beae3d | 84a70e27982094ac78d34642674f4e873d571b23 | /src/apps/productos/migrations/0015_auto_20190812_2008.py | bfdc2ab0a43d78dca4ff19b2e75fce2e18fa6372 | [] | no_license | valenciacamilo12/Pagina-Web-Farmacia---Django- | 2b9eef0b0f5011247274a528d63fdaa0ed557fad | 7c8a9ac78bd2cf48706a2ef76cb32ac0824094ea | refs/heads/master | 2020-07-02T09:38:31.978196 | 2019-08-16T03:07:58 | 2019-08-16T03:07:58 | 201,487,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2019-08-13 01:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productos', '0014_auto_20190812_1939'),
]
operations = [
migrations.AlterField(
model_name='producto',
name='producto_imagen',
field=models.ImageField(default='productos/product_01.png', upload_to='media/productos'),
),
]
| [
"camilo3341215@gmail.com"
] | camilo3341215@gmail.com |
7a32049f420bba3e44a3f942d34827eddd50153c | 68561c61c2a784f1765fae8b07e90d7ec53f1edd | /maestral/__init__.py | 7a979d2e1fb6e272c6317970de4a5baf55e2f40a | [
"MIT"
] | permissive | ailiao/maestral-dropbox | 68213e219a21f0d76c80be839f73d65120cf99f5 | f7043ef23062d6cc85d61701cb129bdc8f09f065 | refs/heads/master | 2021-04-07T17:14:26.936449 | 2020-03-20T00:31:22 | 2020-03-20T00:31:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | # -*- coding: utf-8 -*-
"""
@author: Sam Schott (ss2151@cam.ac.uk)
(c) Sam Schott; This work is licensed under a Creative Commons
Attribution-NonCommercial-NoDerivs 2.0 UK: England & Wales License.
The following APIs should remain stable for front ends:
* maestral.main.Maestral
* maestral.oauth
* maestral.constants
* maestral.daemon
* maestral.errors
* maestral.config.main
* maestral.utils.appdirs
* maestral.utils.backend
* maestral.utils.autostart
"""
__version__ = "0.6.2"
__author__ = "Sam Schott"
__url__ = "https://github.com/SamSchott/maestral"
| [
"ss2151@cam.ac.uk"
] | ss2151@cam.ac.uk |
0dfdbb64ae87c90a2faea1081212a757f355b52a | 593132164b6992b8b047cdf43a9d04c59fe607ee | /py_simple/14.直方图/14.4.3绘制图像直方图.py | ea06b99e0fe02ba77074c538cc3d9a4ee1bcd1d6 | [
"Apache-2.0"
] | permissive | ztfmars/OpenCV_Python_Tutorial | b69b98e5e6b4e31c4f699fdbbaac36db68b8098b | 2c9cf57f469dfec2aca8356f59f7473e5a506988 | refs/heads/main | 2023-02-14T22:40:38.456751 | 2021-01-17T02:18:21 | 2021-01-17T02:18:21 | 329,609,724 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 4 15:14:29 2018
"""
import cv2
import matplotlib.pyplot as plt
o=cv2.imread(r"../image/boatGray.bmp")
histb = cv2.calcHist([o],[0],None,[256],[0,255])
plt.plot(histb,color='r')
plt.show() | [
"ztfmars@163.com"
] | ztfmars@163.com |
fc0b447a2456e32eab54d3280fb6ec002374a543 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/21/usersdata/132/7646/submittedfiles/exercicio24.py | 6f1494ca275e078bdc33c5054fba35ef34694292 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
salario=float(input('digite o salario:')
b=salario
imposto=0
if b>3000:
imposto=imposto+((b-1000)*0.35)
b=3000
if base>1000:
imposto=imposto + ((base-1000)*0.2)
print(' salario: %.2f'%(salario, imposto)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
0830399447aa0f121363d3b62bf09941e4d667bb | 6496d7ab505ab1df594d373c0c58206d580302f6 | /exercicio2.py | 63ad04b2a7e4b5da032aad729336883f6191a972 | [] | no_license | alicesilva/P1-Python-Problemas | 7050f323a073ff51c51cd94d451bddfac71bf30b | eade521a3c7f740bb901a6714385d91356a37835 | refs/heads/master | 2020-03-09T21:04:32.211493 | 2018-08-25T19:50:53 | 2018-08-25T19:50:53 | 129,000,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | #coding: utf-8
numero1 = int(raw_input())
numero2 = int(raw_input())
numero3 = int(raw_input())
if numero1 <= numero2 and numero1 <= numero3 and numero2 < numero3:
print numero1
print numero2
print numero3
elif numero1<numero2 and numero2<numero3 and numero3<numero2:
print numero1
print numero3
print numero2
elif numero1 > numero2 and numero2 < numero3 and numero1 < numero3:
print numero2
print numero1
print numero3
elif numero1 > numero2 and numero2<numero3 and numero3<numero1:
print numero2
print numero3
print numero1
elif numero3<numero1 and numero3<numero2 and numero1<numero2:
print numero3
print numero1
print numero2
else:
print numero3
print numero2
print numero1
print
print numero1
print numero2
print numero3
| [
"alice.silva@ccc.ufcg.edu.br"
] | alice.silva@ccc.ufcg.edu.br |
3820696ada3ab15a036ddeea0e29c7a9fbd88bae | 83646bb54129da9fae70ec05e943093cc221ad85 | /Interface.py | 56afa59ebd7c4cccc2d1aa017097fa1fc2fb57fe | [] | no_license | Rybak-a87/password_store_gui | 88b6464fbbe5643c63c0a00caee02f937f4007bc | 2a2c771e34b5fda01b2d404b713cc00fa0b35816 | refs/heads/main | 2023-02-06T06:44:10.178644 | 2020-12-25T12:32:10 | 2020-12-25T12:32:10 | 318,624,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,632 | py | import tkinter
from BackendJson import BackendJson
from Cipher import Cipher
class Interface:
def __init__(self, window_name):
self.backend = BackendJson("data")
self.cipher = Cipher()
self.root = tkinter.Tk()
self.root.title(window_name)
# self.root.iconbitmap("./ico/icon.ico")
# размер и положение окна (середира экрана)
wx = self.root.winfo_screenwidth()
wy = self.root.winfo_screenheight()
x = int(wx/2 - 600/2)
y = int(wy/2 - 400/2)
self.root.geometry(f"600x400+{x}+{y}")
self.root.resizable(False, False)
# вызов начального фрейма
self.frame_home()
# создание фреймa
def create_frames(self):
return tkinter.Frame(self.root, width=600, height=400)
# главный экран
def frame_home(self):
f_home = self.create_frames()
f_home.place(relx=0.5, rely=0.5, anchor="center")
self.__author(f_home)
welcome_text = "Добро пожаловать!\nДанная программа поможет вам хранить\nи управлять Вашими паролями"
tkinter.Label(f_home, font="Arial 15", text=welcome_text).place(relx=0.5, rely=0.1, anchor="n")
# кнопки
buttons = ["Добавить", "Удалить", "Показать"]
x = 80
y = 300
for btn in buttons:
pressing = lambda but=btn: self.button_logics(but)
tkinter.Button(f_home, text=btn, font="Arial 15", command=pressing,
).place(x=x, y=y, width=110, height=50)
x += 160
# ввод ключа шифрования
tkinter.Label(f_home, font="Arial 10", text="Ключ шифрования",
).place(relx=0.05, rely=0.45, anchor="w")
self.scrypt_key = tkinter.Entry(f_home, font="Arial 10")
self.scrypt_key.place(relx=0.05, rely=0.5, anchor="w", width=480)
tkinter.Button(f_home, font="Arial 7", text="Добавить ключ",
command=lambda but="add_key": self.button_logics(but),
).place(relx=0.855, rely=0.5, anchor="w")
self.use_scrypt_key = tkinter.Label(f_home, font="Arial 7",
text="Используестя ключ по умолчанию")
self.use_scrypt_key.place(relx=0.5, rely=0.53, anchor="n")
# раздел добавления
def frame_add(self):
f_add = self.create_frames()
f_add.place(relx=0.5, rely=0.5, anchor="center")
self.button_back(f_add)
self.__author(f_add)
self.descriptions = {"Ресурс": None, "Ваш логин": None, "Ваш пароль": None}
y = 10
for description in self.descriptions.keys():
tkinter.Label(f_add, font="Arial 15", text=description).place(relx=0.5, y=y, anchor="n")
self.descriptions[description] = tkinter.Entry(f_add, font="Arial 15", width=30,
)
self.descriptions[description].place(relx=0.5, y=y + 30, anchor="n")
y += 100
tkinter.Button(f_add, command=lambda but="add_data": self.button_logics(but),
text="Сохранить", font="Arial 16", width=20,).place(relx=0.5, rely=0.8, anchor="n")
# раздел удаления
def frame_del(self):
f_del = self.create_frames()
f_del.place(relx=0.5, rely=0.5, anchor="center")
self.button_back(f_del)
self.__author(f_del)
self.temp_f_frame = f_del
self.del_list = tkinter.Listbox(f_del, font="Arial 10", selectmode=tkinter.MULTIPLE, bd=1)
tkinter.Button(f_del, command=lambda but="del_data": self.button_logics(but),
font="Arial 15", text="Удалить").place(relx=0.5, rely=0.85, anchor="n")
# работа со списком
self.data = self.backend.read_file()
for atr in self.data:
self.del_list.insert(tkinter.END, f"{self.cipher.decipher(atr)}")
self.del_list.place(relx=0.5, y=3, anchor="n", width=444, height=330)
# раздел просмотра
def frame_view(self):
f_view = self.create_frames()
f_view.place(relx=0.5, rely=0.5, anchor="center")
self.button_back(f_view)
self.__author(f_view)
self.info = tkinter.Text(f_view, font="Arial 10")
# работа со списком
data = self.backend.read_file()
for k, v in data.items():
out = f"{4*' '}{self.cipher.decipher(k)}\n" \
f"\tLogin: {self.cipher.decipher(v['login'])}\n" \
f"\tPassword: {self.cipher.decipher(v['password'])}\n" \
f"{110*'-'}"
self.info.insert(tkinter.END, out)
self.info.place(relx=0.5, y=3, anchor="n", width=446, height=380)
# логика нажатий на кнопки
def button_logics(self, pressing):
if pressing == "Добавить":
self.frame_add()
elif pressing == "Удалить":
self.frame_del()
elif pressing == "Показать":
self.frame_view()
elif pressing == "add_data":
self.add()
elif pressing == "del_data":
self.dell()
elif pressing == "add_key":
self.add_scrypt_key()
# Кнопка добавить
def add(self):
resource = self.cipher.encipher(self.descriptions["Ресурс"].get())
login = self.cipher.encipher(self.descriptions["Ваш логин"].get())
password = self.cipher.encipher(self.descriptions["Ваш пароль"].get())
if resource and login and password:
# очистить поля
self.descriptions["Ресурс"].delete(0, tkinter.END)
self.descriptions["Ваш логин"].delete(0, tkinter.END)
self.descriptions["Ваш пароль"].delete(0, tkinter.END)
# добавление данных в память
self.backend.add_to_file(resource, login, password)
# кнопка удалить
def dell(self):
if self.data:
need_del = [self.cipher.encipher(i) for i in self.del_list.selection_get().split("\n")]
self.backend.del_from_file(need_del)
# перезапуск фрейма
self.temp_f_frame.destroy()
self.frame_del()
# кнопка добавить клюс шифрования
def add_scrypt_key(self):
key = self.scrypt_key.get()
if key:
self.cipher.scrypt_key_set(key.lower())
self.use_scrypt_key["text"] = f"Используется ключ: {key}"
else:
self.cipher.scrypt_key_default()
self.use_scrypt_key["text"] = "Используестя ключ по умолчанию"
# кнопка "назад"
def button_back(self, frame):
tkinter.Button(frame, text="< Назад", font="Arial 8", command=lambda: frame.destroy(),
).place(x=3, y=3, anchor="nw")
# запуск окна
def start(self):
self.root.mainloop()
# автор "водяной знак"
def __author(self, root):
my_name = "Programm by Rybak A."
tkinter.Label(root, font="Tahoma 7", text=my_name, fg="Blue"
).place(relx=1, rely=1, anchor="se")
| [
"rybak.a87@gmail.com"
] | rybak.a87@gmail.com |
6d61f46d1ce445b4126a332ddd0961ed016516be | e3e7e069b1b66c5d63319a23a2201a38171df23f | /dataUpdate/util/ts_util.py | 590eb20c1be54ba8a6958b00f1cae028f8c95ba1 | [] | no_license | webclinic017/backtrader_learning | b6ba1199ac31b5bbad0ce7abe7e4fca912cb9c8a | dfa5202a99af96273d49a4e92412c3b0d3c724a7 | refs/heads/main | 2023-08-18T17:44:20.751035 | 2021-10-09T08:43:29 | 2021-10-09T08:43:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | import tushare as ts
token = 'e554b98ac431b1146b1db6bcb89dde62837fd87fb4c31aca54ff249f'
pro = ts.pro_api(token=token)
def getStockBasicInfo():
'''
获取stock的基础信息
:return:
'''
data = pro.query('stock_basic', exchange='', list_status='L',
fields='ts_code,symbol,name,area,industry,market,list_date')
return data
def nameConvert(ts_name):
'''
:param ts_name: 000001.SZ
:return:0000001.sz
'''
code,market = ts_name.split('.')
market = str.lower(market)
return market+'.'+code | [
"wuzifan0817@gmail.com"
] | wuzifan0817@gmail.com |
3a204339dd77109d1f6901ef336b30dc4e400fea | 8fa003141ed3a01e5aa55369d63c332f43110c5b | /test/fx2trt/converters/acc_op/test_min.py | 2f19a72cf9753885e1e7a766851f211477aa7e28 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | EshikaShah/pytorch | aec1e648fbcc32f2af5b59500e3986df3cfa4d9e | 8734eed829b0a14b819b97982222e3f494db01d1 | refs/heads/master | 2023-08-18T19:37:54.722565 | 2021-10-18T18:04:03 | 2021-10-18T18:04:03 | 414,952,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,426 | py | import torch
import torch.fx.experimental.fx_acc.acc_ops as acc_ops
from caffe2.torch.fb.fx2trt.tests.test_utils import AccTestCase
from parameterized import parameterized
class TestMinConverter(AccTestCase):
@parameterized.expand(
[
("dim0_keepdim", 0, True, torch.randn(2, 2, 3)),
("dim1_keepdim", 1, True, torch.randn(2, 2, 3)),
("dim2_keepdim", 2, True, torch.randn(2, 2, 3)),
("dim3_keepdim", 3, True, torch.randn(2, 2, 3, 3)),
("dim2_no_keepdim", 2, False, torch.randn(2, 2, 3)),
("dim1_no_keepdim", 1, False, torch.randn(2, 2, 3)),
("dim0_no_keepdim", 0, False, torch.randn(2, 2, 3)),
]
)
def test_min_dim_reduce(self, test_name, dim, keepdim, input):
class MinDimReduce(torch.nn.Module):
def __init__(self, dim, keepdim):
super().__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, x):
return torch.min(x, self.dim, self.keepdim)
inputs = [input]
self.run_test(
MinDimReduce(dim, keepdim),
inputs,
expected_ops={acc_ops.min_dim_reduce},
test_implicit_batch_dim=(dim != 0),
)
@parameterized.expand(
[
("no_dim_no_keepdim"),
]
)
def test_min_full_reduce(
self,
test_name,
):
class MinFullReduce(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.min(x)
inputs = [torch.randn(3, 2, 3, 3)]
self.run_test(
MinFullReduce(),
inputs,
expected_ops={acc_ops.min_full_reduce},
# We can't do a full reduce over the batch dimension
test_implicit_batch_dim=False,
)
@parameterized.expand(
[
("min_method_no_dim_no_keepdim"),
("min_method_no_dim_no_keepdim"),
]
)
def test_min_method(self, test_name):
class MinMethod(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, other):
return input.min(other)
inputs = [torch.randn(3, 4), torch.randn(3, 4)]
self.run_test(MinMethod(), inputs, expected_ops={acc_ops.minimum})
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
4adb2fcf27d222cf31a55b5486f751060deec10d | 54a26bf56aebd604d4dece733f08d7d30cd27f89 | /zdemo/aclassview/views.py | 071bb0719b01a7d382ba98131db3b8a1ebd7040c | [
"MIT"
] | permissive | zzZaida/django_27 | b78f5ae8bccfa11074221ba32241878d703aa535 | bbbba8be9547fb815c68e94fadb7e8b6eebf75c9 | refs/heads/master | 2020-07-03T19:47:25.037195 | 2019-08-13T12:11:29 | 2019-08-13T12:11:29 | 202,030,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | from django.http import HttpResponse
from django.shortcuts import render
from django.utils.decorators import method_decorator # utils --> 工具类
from django.views import View
# 定义装饰器
def my_decorator(func):
def wrapper(request, **kwargs):
print('添加装饰器-----', request.method)
return func(request, **kwargs)
return wrapper
# @method_decorator(my_decorator,name='get')
@method_decorator(my_decorator, name='dispatch') # --> 经手的dispatch都加装饰器
# --> 负责将 IndexView 中的函数弄出来
# ValueError: The keyword argument `name` must be the name of a method of the decorated Got '' instead
# 定义类视图
class IndexView(View):
# @my_decorator --> 报错:wrapper() takes 1 positional argument but 2 were given
# 对象方法
# @method_decorator(my_decorator) # --> 一个添加 其他不添加装饰器
def get(self, request):
return HttpResponse('method_decorator------register.html')
def post(self, request):
return HttpResponse('method_decorator-------这里实现注册逻辑')
def index(request):
# 获取请求方法,判断是GET/POST请求
if request.method == 'GET':
# 处理GET请求,返回注册页面
return render(request, 'register.html')
else:
# 处理POST请求,实现注册逻辑
return HttpResponse('这里实现注册逻辑') | [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
90f8b5f0863837b4f24ed7543348060ac40bbcc6 | d842a95213e48e30139b9a8227fb7e757f834784 | /gcloud/google-cloud-sdk/.install/.backup/lib/surface/dataproc/clusters/create.py | ebf2e974a6b41b416363de240513ced274b6ed35 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/JobSniperRails | f37a15edb89f54916cc272884b36dcd83cdc868a | 39e7f871887176770de0f4fc6789e9ddc7f32b1f | refs/heads/master | 2022-11-22T18:12:37.972441 | 2019-09-20T22:43:14 | 2019-09-20T22:43:14 | 282,293,504 | 0 | 0 | MIT | 2020-07-24T18:47:35 | 2020-07-24T18:47:34 | null | UTF-8 | Python | false | false | 4,982 | py | # -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create cluster command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.dataproc import compute_helpers
from googlecloudsdk.api_lib.dataproc import constants
from googlecloudsdk.api_lib.dataproc import dataproc as dp
from googlecloudsdk.api_lib.dataproc import util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.dataproc import clusters
from googlecloudsdk.command_lib.kms import resource_args as kms_resource_args
from googlecloudsdk.command_lib.util.args import labels_util
def _CommonArgs(parser, beta=False):
"""Register flags common to all tracks."""
base.ASYNC_FLAG.AddToParser(parser)
parser.add_argument('name', help='The name of this cluster.')
clusters.ArgsForClusterRef(parser, beta, include_ttl_config=True)
# Add gce-pd-kms-key args
kms_flag_overrides = {'kms-key': '--gce-pd-kms-key',
'kms-keyring': '--gce-pd-kms-key-keyring',
'kms-location': '--gce-pd-kms-key-location',
'kms-project': '--gce-pd-kms-key-project'}
kms_resource_args.AddKmsKeyResourceArg(
parser, 'cluster', flag_overrides=kms_flag_overrides)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base.CreateCommand):
"""Create a cluster."""
BETA = False
detailed_help = {
'EXAMPLES': """\
To create a cluster, run:
$ {command} my_cluster
"""
}
@staticmethod
def Args(parser):
_CommonArgs(parser, beta=False)
@staticmethod
def ValidateArgs(args):
if args.single_node:
# --num-workers and --num-preemptible-workers must be None (unspecified)
# or 0
if args.num_workers:
raise exceptions.ConflictingArgumentsException(
'--single-node', '--num-workers')
if args.num_preemptible_workers:
raise exceptions.ConflictingArgumentsException(
'--single-node', '--num-preemptible-workers')
if constants.ALLOW_ZERO_WORKERS_PROPERTY in args.properties:
raise exceptions.InvalidArgumentException(
'--properties',
'Instead of %s, use gcloud beta dataproc clusters create '
'--single-node to deploy single node clusters' %
constants.ALLOW_ZERO_WORKERS_PROPERTY)
def Run(self, args):
self.ValidateArgs(args)
dataproc = dp.Dataproc(self.ReleaseTrack())
cluster_ref = util.ParseCluster(args.name, dataproc)
compute_resources = compute_helpers.GetComputeResources(
self.ReleaseTrack(), args.name)
cluster_config = clusters.GetClusterConfig(args, dataproc,
cluster_ref.projectId,
compute_resources, self.BETA,
include_ttl_config=True)
cluster = dataproc.messages.Cluster(
config=cluster_config,
clusterName=cluster_ref.clusterName,
projectId=cluster_ref.projectId)
self.ConfigureCluster(dataproc.messages, args, cluster)
return clusters.CreateCluster(dataproc, cluster, args.async, args.timeout)
@staticmethod
def ConfigureCluster(messages, args, cluster):
"""Performs any additional configuration of the cluster."""
cluster.labels = labels_util.ParseCreateArgs(args,
messages.Cluster.LabelsValue)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class CreateBeta(Create):
"""Create a cluster."""
BETA = True
@staticmethod
def Args(parser):
_CommonArgs(parser, beta=True)
clusters.BetaArgsForClusterRef(parser)
@staticmethod
def ValidateArgs(args):
super(CreateBeta, CreateBeta).ValidateArgs(args)
if args.master_accelerator and 'type' not in args.master_accelerator:
raise exceptions.InvalidArgumentException(
'--master-accelerator', 'accelerator type must be specified. '
'e.g. --master-accelerator type=nvidia-tesla-k80,count=2')
if args.worker_accelerator and 'type' not in args.worker_accelerator:
raise exceptions.InvalidArgumentException(
'--worker-accelerator', 'accelerator type must be specified. '
'e.g. --worker-accelerator type=nvidia-tesla-k80,count=2')
| [
"luizfper@gmail.com"
] | luizfper@gmail.com |
a3b5797db823677eca00cbbb4314cf2ab0379c65 | 3b9bf497cd29cea9c24462e0411fa8adbfa6ba60 | /leetcode/Problems/468--Validate-IP-Address-Medium.py | a283e3d9f2615a578f4a10ccff3742d93dffa0fb | [] | no_license | niteesh2268/coding-prepation | 918823cb7f4965bec096ec476c639a06a9dd9692 | 19be0766f6b9c298fb32754f66416f79567843c1 | refs/heads/master | 2023-01-02T05:30:59.662890 | 2020-10-17T13:12:34 | 2020-10-17T13:12:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | class Solution:
def validateIPV4(self, IP):
ipSplit = IP.split('.')
if len(ipSplit) != 4:
return False
for i in range(4):
if len(ipSplit[i]) == 0 or len(ipSplit[i]) > 3:
return False
if not ipSplit[i].isdigit():
return False
intVal = int(ipSplit[i])
if intVal < 0 or intVal > 255:
return False
if ipSplit[i][0] == '0' and len(ipSplit[i]) > 1:
return False
return True
def validateIPV6(self, IP):
ipSplit = IP.split(':')
validChar = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a','b','c','d','e','f','A','B','C','D','E','F']
if len(ipSplit) != 8:
return False
for i in range(8):
if len(ipSplit[i]) == 0 or len(ipSplit[i]) > 4:
return False
for ch in ipSplit[i]:
if ch not in validChar:
return False
# if ipSplit[i][0] == '0' and len(ipSplit[i]) > 1:
# return False
return True
def validIPAddress(self, IP: str) -> str:
ipv4, ipv6 = False, False
if '.' in IP:
ipv4 = self.validateIPV4(IP)
elif ':' in IP:
ipv6 = self.validateIPV6(IP)
if not ipv4 and not ipv6:
return 'Neither'
elif ipv4:
return 'IPv4'
else:
return 'IPv6'
| [
"akulajayaprakash@gmail.com"
] | akulajayaprakash@gmail.com |
5dc820764db1b26f0d3a0a84d332b95f306ea84b | 4c83b4d7aca6bbcd15b922ad7314440fea7c9a70 | /2020-07-27_modo_horario_cp_onda1_10d/bkp/script_modo_horario_2020-03-05.py | f79f6c5aa1e5becacdcfb011cd1bd0d48794ccbc | [] | no_license | poloplanejamento/odmatrix-joinville | 63b60a85055700698cdb590c181e7c8a4d5c7361 | be7ce0814fb9dad2d289cd836dde51baa9c0850d | refs/heads/main | 2023-01-23T11:43:45.451126 | 2020-12-10T23:17:58 | 2020-12-10T23:17:58 | 320,402,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,337 | py | #!/usr/bin/env python3
# Bibliotecas
from http.client import HTTPSConnection
from base64 import b64encode
import json
import csv
import pandas as pd
# Variáveis
projectID = "40" # ID do projeto, conforme mostrado no frontend Web
c = HTTPSConnection("api.odmatrix.app")
userAndPass = b64encode(b"fe6b53f0280443d5bd40d5d30694f356").decode("ascii")
headers = { 'Authorization' : 'Basic %s' % userAndPass }
finall_list = []
# Formato da data: AAAA-MM-DD. Até três datas no array e o script parece rodar sem problemas
# Datas desejadas: 12/11, 13/11, 19/11, 20/11, 21/11, 03/03, 04/03, 05/03, 11/03 e 12/03
for date in ["2020-03-05"] :
for ftriptype in ["microtrip","bus","private_transport"] :
for ftimeorigin in ["0000_0059","0100_0159","0200_0259","0300_0359","0400_0459","0500_0559","0600_0659","0700_0759","0800_0859","0900_0959","1000_1059","1100_1159","1200_1259","1300_1359","1400_1459","1500_1559","1600_1659","1700_1759","1800_1859","1900_1959","2000_2059","2100_2159","2200_2259","2300_2359"] :
print(ftimeorigin)
request = "/generatematrix?format=json&project={}&date={}&ftriptype={}&ftimeorigin={}&fchk_XXX=true".format(projectID, date, ftriptype, ftimeorigin)
c.request('GET', request, headers=headers)
res = c.getresponse()
data = res.read()
matrix = json.loads(data)
for i, column in enumerate(matrix['ColumnLabels']):
for j, row in enumerate(matrix['RowLabels']):
value = matrix['Data'][j][i]
if value == 0:
continue
full_row = {}
full_row['ProjectID'] = projectID
full_row['Date'] = date
full_row['TimeOrigin'] = ftimeorigin
full_row['Origin'] = row
full_row['Destination'] = column
full_row['Modo'] = ftriptype
full_row['Trips'] = value
finall_list.append(full_row)
print(full_row)
#print(finall_list)
data = pd.DataFrame(finall_list)
final_data = pd.pivot_table(data, index=['ProjectID', 'Date', 'Origin', 'Destination', 'Modo'], columns='TimeOrigin', values='Trips')
final_data.to_csv("OD_por_modo_horario_fchk_XXX_2020-03-05.csv")
| [
"caiocco@gmail.com"
] | caiocco@gmail.com |
bb2ea7c18040e64a3b1053c49a768e69116d0f47 | 625f2f86f2b2e07cb35204d9b3232427bf462a09 | /HIRun2018PbPb/L1T/L1Ntuple_HIRun2015_HIMinimumBias2_egBypassFGBit1ShapeBit1/l1Ntuple_RAW2DIGI.py | 2a05fe0379f2e8a5c4507ee92d3ee8daa127a5b5 | [] | no_license | ttrk/production | abb84c423a076fd9966276b7ed4350936c755e0b | f8a64c9c38de215802799365f0f7a99e1ee78276 | refs/heads/master | 2023-02-08T23:48:56.355141 | 2023-01-26T08:46:22 | 2023-01-26T08:46:22 | 52,877,406 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,716 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: l1Ntuple -s RAW2DIGI --era=Run2_2017 --customise=L1Trigger/Configuration/customiseReEmul.L1TEventSetupForHF1x1TPs --customise=L1Trigger/Configuration/customiseReEmul.L1TReEmulFromRAW2015 --customise=L1Trigger/L1TNtuples/customiseL1Ntuple.L1NtupleEMU --customise=L1Trigger/Configuration/customiseUtils.L1TTurnOffUnpackStage2GtGmtAndCalo --customise=FWCore/ParameterSet/MassReplace.massReplaceInputTag --conditions=auto:run1_data -n 100 --data --no_exec --no_output --filein=root://xrootd.cmsaf.mit.edu//store/hidata/HIRun2015/HIMinimumBias2/RAW/v1/000/263/261/00000/0029C4B7-149B-E511-BEEE-02163E01431C.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('RAW2DIGI',eras.Run2_2017)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration.StandardSequences.RawToDigi_Data_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('root://xrootd.cmsaf.mit.edu//store/hidata/HIRun2015/HIMinimumBias2/RAW/v1/000/263/261/00000/0029C4B7-149B-E511-BEEE-02163E01431C.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('l1Ntuple nevts:100'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run1_data', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.endjob_step = cms.EndPath(process.endOfProcess)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.endjob_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from L1Trigger.Configuration.customiseReEmul
from L1Trigger.Configuration.customiseReEmul import L1TEventSetupForHF1x1TPs,L1TReEmulFromRAW2015
#call to customisation function L1TEventSetupForHF1x1TPs imported from L1Trigger.Configuration.customiseReEmul
process = L1TEventSetupForHF1x1TPs(process)
#call to customisation function L1TReEmulFromRAW2015 imported from L1Trigger.Configuration.customiseReEmul
process = L1TReEmulFromRAW2015(process)
# Automatic addition of the customisation function from L1Trigger.L1TNtuples.customiseL1Ntuple
from L1Trigger.L1TNtuples.customiseL1Ntuple import L1NtupleEMU
#call to customisation function L1NtupleEMU imported from L1Trigger.L1TNtuples.customiseL1Ntuple
process = L1NtupleEMU(process)
# Automatic addition of the customisation function from L1Trigger.Configuration.customiseUtils
from L1Trigger.Configuration.customiseUtils import L1TTurnOffUnpackStage2GtGmtAndCalo
#call to customisation function L1TTurnOffUnpackStage2GtGmtAndCalo imported from L1Trigger.Configuration.customiseUtils
process = L1TTurnOffUnpackStage2GtGmtAndCalo(process)
# Automatic addition of the customisation function from FWCore.ParameterSet.MassReplace
from FWCore.ParameterSet.MassReplace import massReplaceInputTag
#call to customisation function massReplaceInputTag imported from FWCore.ParameterSet.MassReplace
process = massReplaceInputTag(process)
# End of customisation functions
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
process.simCaloStage2Layer1Digis.hcalToken = cms.InputTag('hcalDigis')
process.caloStage2Params.egBypassFGBit = cms.uint32(1)
process.caloStage2Params.egBypassShapeBit = cms.uint32(1)
| [
"tatark@mit.edu"
] | tatark@mit.edu |
177ec5ab1a07b52261f471748deaed236f5d9924 | 99e88bd6c2bb50e38f5bb68f0d5242def0442f7f | /tests/test_struct.py | a378da250be0b66d31d8f26b5b0d02c745c21343 | [] | no_license | vahtras/vb | ea1bb59a8e6125203d9498f2808a7bf8e6ad5916 | 425402e619aab7d69d7f5d3971439d532d36de0b | refs/heads/master | 2021-01-15T15:42:46.158363 | 2016-10-13T09:06:22 | 2016-10-13T09:06:22 | 47,419,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | import unittest
from util.full import init
from findifftool.core import clgrad, DELTA
from . import vb
from vb.core import *
class StructTest(unittest.TestCase):
def setUp(self):
Nod.S = init([[1.0, 0.1], [0.1, 1.0]])
Nod.C = init([[0.7, 0.7], [0.7, -0.7]])
self.alpha0 = Nod([0], [])
self.alpha1 = Nod([1], [])
self.beta0 = Nod([], [0])
self.beta1 = Nod([], [1])
self.ab00 = Nod([0], [0])
def tearDown(self):
pass
def test_structure_coefficients_consistent(self):
with self.assertRaises(StructError):
struct = Structure([Nod([0], [0])], [])
def test_structure_output(self):
struct_a = Structure([self.alpha0], [1.0])
self.assertEqual(str(struct_a), "0.963143 (0|)")
def test_structure_ms(self):
with self.assertRaises(StructError):
struct = Structure([self.alpha0, self.beta0], [1, 1])
def test_normalized(self):
ab = Structure([self.ab00], [1.0])
self.assertAlmostEqual(ab*ab, 1.0)
def test_keep_unnormalized(self):
ab = Structure([self.ab00], [1.0], normalize=False)
self.assertAlmostEqual(ab*ab, 1.162084)
def test_norm_gradient(self):
ab = Structure([self.ab00], [1.0])
#assert False
num_diff = clgrad(ab, 'overlap', 'C')()
ana_diff = ab.overlap_gradient()
np.testing.assert_allclose(ana_diff, num_diff, rtol=DELTA, atol=DELTA)
def test_struct_mo_propagated(self):
ab = Structure([self.ab00], [1.0])
ab.C = init([1,2,3,4])
self.assertEqual(id(ab.C), id(ab.nods[0].C))
if __name__ == "__main__":
unittest.main()
| [
"vahtras@kth.se"
] | vahtras@kth.se |
0bfa8bd26f28131255750f7fceb64d05ccfe39e6 | 360c777a2b77be466b1cf7c8fd74d6fd04f56b55 | /migrations/versions/1e5cd35569af_.py | eabfd29c2469753fa35c275467e477d5e0946760 | [
"MIT"
] | permissive | hreeder/nexus-auth | 790a3b2623ddf443138a4b0f0af1380dbc4db8ae | 8d51aef01647e32ba4a284f02de73a2caad7cf49 | refs/heads/master | 2021-01-10T10:08:37.190558 | 2016-02-29T12:27:21 | 2016-02-29T12:27:21 | 52,789,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | """empty message
Revision ID: 1e5cd35569af
Revises: 51d27a60b822
Create Date: 2014-06-24 22:26:10.421081
"""
# revision identifiers, used by Alembic.
revision = '1e5cd35569af'
down_revision = '51d27a60b822'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('character_keys',
sa.Column('character_id', sa.Integer(), nullable=True),
sa.Column('key_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['character_id'], ['character.id'], ),
sa.ForeignKeyConstraint(['key_id'], ['api_key.id'], )
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('character_keys')
### end Alembic commands ###
| [
"harry@harryreeder.co.uk"
] | harry@harryreeder.co.uk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.