blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
58be4518bcc02c88e3fe519fe8cc98faaf47f62f | 75fa11b13ddab8fd987428376f5d9c42dff0ba44 | /smoke-test/tests/test_stateful_ingestion.py | a10cf13a08029da9a728f7d6532ae0e04b1eefc9 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] | permissive | RyanHolstien/datahub | 163d0ff6b4636919ed223ee63a27cba6db2d0156 | 8cf299aeb43fa95afb22fefbc7728117c727f0b3 | refs/heads/master | 2023-09-04T10:59:12.931758 | 2023-08-21T18:33:10 | 2023-08-21T18:33:10 | 246,685,891 | 0 | 0 | Apache-2.0 | 2021-02-16T23:48:05 | 2020-03-11T21:43:58 | TypeScript | UTF-8 | Python | false | false | 5,402 | py | from typing import Any, Dict, Optional, cast
from datahub.ingestion.api.committable import StatefulCommittable
from datahub.ingestion.run.pipeline import Pipeline
from datahub.ingestion.source.sql.mysql import MySQLConfig, MySQLSource
from datahub.ingestion.source.state.checkpoint import Checkpoint
from datahub.ingestion.source.state.entity_removal_state import GenericCheckpointState
from datahub.ingestion.source.state.stale_entity_removal_handler import StaleEntityRemovalHandler
from sqlalchemy import create_engine
from sqlalchemy.sql import text
from tests.utils import (
get_gms_url,
get_mysql_password,
get_mysql_url,
get_mysql_username,
)
def test_stateful_ingestion(wait_for_healthchecks):
def create_mysql_engine(mysql_source_config_dict: Dict[str, Any]) -> Any:
mysql_config = MySQLConfig.parse_obj(mysql_source_config_dict)
url = mysql_config.get_sql_alchemy_url()
return create_engine(url)
def create_table(engine: Any, name: str, defn: str) -> None:
create_table_query = text(f"CREATE TABLE IF NOT EXISTS {name}{defn};")
engine.execute(create_table_query)
def drop_table(engine: Any, table_name: str) -> None:
drop_table_query = text(f"DROP TABLE {table_name};")
engine.execute(drop_table_query)
def run_and_get_pipeline(pipeline_config_dict: Dict[str, Any]) -> Pipeline:
pipeline = Pipeline.create(pipeline_config_dict)
pipeline.run()
pipeline.raise_from_status()
return pipeline
def validate_all_providers_have_committed_successfully(pipeline: Pipeline) -> None:
provider_count: int = 0
for name, provider in pipeline.ctx.get_committables():
provider_count += 1
assert isinstance(provider, StatefulCommittable)
stateful_committable = cast(StatefulCommittable, provider)
assert stateful_committable.has_successfully_committed()
assert stateful_committable.state_to_commit
assert provider_count == 1
def get_current_checkpoint_from_pipeline(
pipeline: Pipeline,
) -> Optional[Checkpoint[GenericCheckpointState]]:
# TODO: Refactor to use the helper method in the metadata-ingestion tests, instead of copying it here.
mysql_source = cast(MySQLSource, pipeline.source)
return mysql_source.state_provider.get_current_checkpoint(
StaleEntityRemovalHandler.compute_job_id(
getattr(mysql_source, "platform", "default")
)
)
source_config_dict: Dict[str, Any] = {
"host_port": get_mysql_url(),
"username": get_mysql_username(),
"password": get_mysql_password(),
"database": "datahub",
"stateful_ingestion": {
"enabled": True,
"remove_stale_metadata": True,
"fail_safe_threshold": 100.0,
"state_provider": {
"type": "datahub",
"config": {"datahub_api": {"server": get_gms_url()}},
},
},
}
pipeline_config_dict: Dict[str, Any] = {
"source": {
"type": "mysql",
"config": source_config_dict,
},
"sink": {
"type": "datahub-rest",
"config": {"server": get_gms_url()},
},
"pipeline_name": "mysql_stateful_ingestion_smoke_test_pipeline",
"reporting": [
{
"type": "datahub",
}
],
}
# 1. Setup the SQL engine
mysql_engine = create_mysql_engine(source_config_dict)
# 2. Create test tables for first run of the pipeline.
table_prefix = "stateful_ingestion_test"
table_defs = {
f"{table_prefix}_t1": "(id INT, name VARCHAR(10))",
f"{table_prefix}_t2": "(id INT)",
}
table_names = sorted(table_defs.keys())
for table_name, defn in table_defs.items():
create_table(mysql_engine, table_name, defn)
# 3. Do the first run of the pipeline and get the default job's checkpoint.
pipeline_run1 = run_and_get_pipeline(pipeline_config_dict)
checkpoint1 = get_current_checkpoint_from_pipeline(pipeline_run1)
assert checkpoint1
assert checkpoint1.state
# 4. Drop table t1 created during step 2 + rerun the pipeline and get the checkpoint state.
drop_table(mysql_engine, table_names[0])
pipeline_run2 = run_and_get_pipeline(pipeline_config_dict)
checkpoint2 = get_current_checkpoint_from_pipeline(pipeline_run2)
assert checkpoint2
assert checkpoint2.state
# 5. Perform all assertions on the states
state1 = checkpoint1.state
state2 = checkpoint2.state
difference_urns = list(
state1.get_urns_not_in(type="*", other_checkpoint_state=state2)
)
assert len(difference_urns) == 1
assert (
difference_urns[0]
== "urn:li:dataset:(urn:li:dataPlatform:mysql,datahub.stateful_ingestion_test_t1,PROD)"
)
# 6. Cleanup table t2 as well to prevent other tests that rely on data in the smoke-test world.
drop_table(mysql_engine, table_names[1])
# 7. Validate that all providers have committed successfully.
# NOTE: The following validation asserts for presence of state as well
# and validates reporting.
validate_all_providers_have_committed_successfully(pipeline_run1)
validate_all_providers_have_committed_successfully(pipeline_run2)
| [
"noreply@github.com"
] | RyanHolstien.noreply@github.com |
ffda0a09de33d284ab2df4cc442161c7af34907f | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_examples/_algorithms_challenges/leetcode/leetCode/LinkedList/160_IntersectionOfTwoLinkedLists.py | 02ca4ec4485da1426b1045081c588a002f6a40d7 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 557 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
# Refer to:
# https://leetcode.com/discuss/17278/accepted-shortest-explaining-algorithm-comments-improvements
p1, p2 = headA, headB
while(p1 != p2):
p1 = headB if not p1 else p1.next
p2 = headA if not p2 else p2.next
return p1
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
d4035a0d109421eeaa4a7aa57eb5f56252559836 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/118/usersdata/37/25629/submittedfiles/questao2.py | 8a87584abfbf2f20ff1af27fd15c677482690be2 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | tom1=1
tom2=2
tom3=3
tom4=4
tom5=5
tom6=6
tom7=7
tom8=8
tom9=9
t0m10=10
tom0=0
l=[]
n=input('Digite n: ')
for i in range(0,n,1):
for i in range(0,m.shape(0),1):
for j in range(0,m.shape(1),1):
m=m.append(input('Digite 0 ou -1: ')
if m[i,j]!=0
d=(m[i,j]-((m[i,j]+1)!=(m[i,j]==0)))
m[i,j]=d
else:
m[i,j]=tom0
l=l.append(m[i,j])
print l
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
94fa1da6010571b7f8dc49b5540b59295bb345fc | ac227cc22d5f5364e5d029a2cef83816a6954590 | /applications/physbam/physbam-lib/Scripts/Archives/pd/sim/CLIENT_LIBRARY.py | e7a5d4c3b369bd10ee2e36f79dfcf8ecb4aacd02 | [
"BSD-3-Clause"
] | permissive | schinmayee/nimbus | 597185bc8bac91a2480466cebc8b337f5d96bd2e | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | refs/heads/master | 2020-03-11T11:42:39.262834 | 2018-04-18T01:28:23 | 2018-04-18T01:28:23 | 129,976,755 | 0 | 0 | BSD-3-Clause | 2018-04-17T23:33:23 | 2018-04-17T23:33:23 | null | UTF-8 | Python | false | false | 4,921 | py | from pd.common import CONFIG
from pd.common import SOCKET
import sys
import time
import os
import socket
import dialog
client=None
try:
client=SOCKET.CLIENT(CONFIG.pdsim_server_host,CONFIG.pdsim_server_port,
(CONFIG.client_private_key_file,CONFIG.client_certificate_file,CONFIG.ca_certificate_file))
# client=SOCKET.CLIENT(os.environ["PSIM_SERVER_HOST"],int(os.environ["PSIM_SERVER_PORT"]),
# (os.environ["PSIM_CLIENT_KEY"],os.environ["PSIM_CLIENT_CERT"],os.environ["PSIM_CA_CERT"]))
except KeyError:
print "You must define the environment variables PSIM_SERVER_HOST and PSIM_SERVER_PORT, PSIM_CLIENT_KEY, \nPSIM_CA_CERT, PSIM_CLIENT_CERT"
sys.exit(1)
except socket.error:
print "Unable to connect to server"
sys.exit(1)
username=os.environ["USER"]
def handle_dialog_code(d,code):
if code in (d.DIALOG_CANCEL, d.DIALOG_ESC):
sys.exit(1)
return 0
def New_Session(d):
# Get memory
(code,answer)=d.inputbox("How much memory (GBytes) will this job take?",init="4")
handle_dialog_code(d,code)
memory=int(answer)
# Get CPUs
(code,answer)=d.inputbox("How many CPUs will this job take?",init="1")
handle_dialog_code(d,code)
cpus=int(answer)
# Get Label
(code,label)=d.inputbox("What do you call this job?",init="")
handle_dialog_code(d,code)
# Try making the job
session_info=client.Create_Session(username,memory,cpus)
id=None
try:
id=session_info["id"]
except:
d.infobox("Failed to get session")
sys.exit(1)
client.Label_Session(id,label)
return id
# query_states is a list of states we want
# users is a list of users we want
def Get_Session(d,query_states=None,users=None):
sessions=client.Session_List()
formatted_choices=[]
for i in sessions.keys():
session=sessions[i]
if query_states and not session["state"] in query_states: continue
if users and not session["username"] in users: continue
statestr=session["state"]
if statestr=="active": statestr="active@%s"%session["machine"]
formatted_choices.append((str(session["id"]),"%-8s %-20s %-200s"%(session["username"],statestr,session["label"])))
if len(formatted_choices)==0:
print "No sessions found with state %s and users %s"%(repr(query_states),repr(users))
sys.exit(1)
(code,session)=d.menu("Choose session", width=230,height=-1,menu_height=0,choices=formatted_choices)
handle_dialog_code(d,code)
return int(session)
def Label_Session(d,id):
label=client.Session_Info(id)["label"]
(code,label)=d.inputbox("What should the label be?",init=label)
handle_dialog_code(d,code)
client.Label_Session(id,label)
def Deactivate_Session(d,id):
(code,state)=d.menu("What state should it get?",width=60,height=-1,menu_height=0,choices=[("inactive","Not running but might be soon"),("done","Pretty much done")])
handle_dialog_code(d,code)
client.Deactivate_Session(id,state)
def Activate_Session(d,id):
while 1:
hosts=client.Host_List()
formatted=[]
for host in hosts.keys():
claims=hosts[host]["claims"]
mem=hosts[host]["max_memory"]
cpu=hosts[host]["max_cpus"]
users=[]
for claim in claims.keys():
cpu-=claims[claim]["cpus"]
mem-=claims[claim]["memory"]
users.append(claims[claim]["user"])
avail_string="Free CPU=%2d Free Mem=%3d claims=%s"%(cpu,mem,", ".join(users))
formatted.append((host,avail_string))
if len(formatted)==0:
print "No hosts"
sys.exit(1)
(code,hostname)=d.menu("Which host you would like?", width=60,height=-1,menu_height=0,choices=formatted)
handle_dialog_code(d,code)
# now try to attach
try:
client.Activate_Session(id,hostname)
d.infobox("Session %d successfully attached to %s"%(id,hostname));
break
except SOCKET.COMMAND_EXCEPTION,e:
d.msgbox("The following error occured when trying to get host:\n\n"+str(e))
def Status(d,id):
info=client.Session_Info(id)
lines=["%20s %d"%("ID:",info["id"]),
"%20s %s"%("Label:",info["label"]),
"%20s %s"%("State:",info["state"]),
"%20s %d"%("Memory (GB):",info["memory"]),
"%20s %d"%("CPUs:",info["cpus"]),
"%20s %s"%("Username:",info["username"]),
"%20s %s"%("Machine:",info["machine"]),
"%20s %s"%("Date Created:",time.ctime(info["created_date"])),
"",
"User Status",
"-----------"]
status=info["user_status"]
stats=status.keys()
stats.sort()
for i in stats:
lines.append(" %20s : %s"%(i,repr(status[i])))
d.msgbox(width=-1,height=-1,text="\n".join(lines))
| [
"quhang@stanford.edu"
] | quhang@stanford.edu |
b7040f5517b2041949cb1948f352b486386a60f6 | d1d79d0c3889316b298852834b346d4246825e66 | /blackbot/core/wss/ttp/art/art_T1069.002-4.py | b2323cce9e6f0197c920a21ed0d2d5a454bf2cc0 | [] | no_license | ammasajan/Atomic-Red-Team-Intelligence-C2 | 78d1ed2de49af71d4c3c74db484e63c7e093809f | 5919804f0bdeb15ea724cd32a48f377bce208277 | refs/heads/master | 2023-07-17T12:48:15.249921 | 2021-08-21T20:10:30 | 2021-08-21T20:10:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | py | from blackbot.core.utils import get_path_in_package
from blackbot.core.wss.atomic import Atomic
from terminaltables import SingleTable
import os
import json
class Atomic(Atomic):
def __init__(self):
self.name = 'Discovery/T1069.002-4'
self.controller_type = ''
self.external_id = 'T1069.002'
self.blackbot_id = 'T1069.002-4'
self.version = ''
self.language = 'boo'
self.description = self.get_description()
self.last_updated_by = 'Blackbot, Inc. All Rights reserved'
self.references = ["System.Management.Automation"]
self.options = {
'OutString': {
'Description' : 'Appends Out-String to the PowerShellCode',
'Required' : False,
'Value' : True,
},
'BypassLogging': {
'Description' : 'Bypasses ScriptBlock and Techniques logging',
'Required' : False,
'Value' : True,
},
'BypassAmsi': {
'Description' : 'Bypasses AMSI',
'Required' : False,
'Value' : True,
}
}
def payload(self):
with open(get_path_in_package('core/wss/ttp/art/src/powershell.boo'), 'r') as ttp_src:
src = ttp_src.read()
pwsh_script = get_path_in_package('core/wss/ttp/art/pwsh_ttp/discovery/T1069.002-4')
with open(pwsh_script) as pwsh:
src = src.replace("POWERSHELL_SCRIPT", pwsh.read())
src = src.replace("OUT_STRING", str(self.options["OutString"]["Value"]).lower())
src = src.replace("BYPASS_LOGGING", str(self.options["BypassLogging"]["Value"]).lower())
src = src.replace("BYPASS_AMSI", str(self.options["BypassAmsi"]["Value"]).lower())
return src
def get_description(self):
path = get_path_in_package('core/wss/ttp/art/pwsh_ttp/discovery/T1069.002-4')
with open(path) as text:
head = [next(text) for l in range(4)]
technique_name = head[0].replace('#TechniqueName: ', '').strip('\n')
atomic_name = head[1].replace('#AtomicTestName: ', '').strip('\n')
description = head[2].replace('#Description: ', '').strip('\n')
language = head[3].replace('#Language: ', '').strip('\n')
aux = ''
count = 1
for char in description:
if char == '&':
continue
aux += char
if count % 126 == 0:
aux += '\n'
count += 1
out = '{}: {}\n{}\n\n{}\n'.format(technique_name, language, atomic_name, aux)
return out
| [
"root@uw2artic201.blackbot.net"
] | root@uw2artic201.blackbot.net |
c36fd5af1a819d31260212e83fad532b77b86bca | 7bbc83f3f84d7e5057cb04f6895082ab3e016e90 | /keras/keras09_val2.py | b7f8415c59b79aa0aea11813adddbfec31dea9dc | [] | no_license | osy1223/bit_seoul | 908f6adf007c0a7d0df2659b4fae75eb705acaea | b523d78c7b80d378a2d148b35466304f10bf4af4 | refs/heads/master | 2023-02-02T14:26:40.120989 | 2020-12-18T00:46:04 | 2020-12-18T00:46:04 | 311,279,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | import numpy as np
#1. 데이터 준비
x_train=np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
y_train=np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
# x_val = np.array([11,12,13,14,15])
# y_val = np.array([11,12,13,14,15])
# x_pred=np.array([16,17,18])
x_test = np.array([16,17,18,19,20])
y_test = np.array([16,17,18,19,20])
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
#2. 모델 구성
model = Sequential()
model.add(Dense(30,input_dim=1))
model.add(Dense(500))
model.add(Dense(200))
model.add(Dense(800))
model.add(Dense(900))
model.add(Dense(200))
model.add(Dense(700))
model.add(Dense(1))
#3. 컴파일, 훈련 (컴퓨터가 알아들을 수 있도록)
# model.compile(loss='mse', optimizer='adam', metrics=['mse'])
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
#훈련할때 훈련용 데이터, 검증용 데이터 (문제지-답안지 컨닝)
model.fit(x_train, y_train, epochs=100, validation_split=0.2)
#validation_split=0.2 : 20%로 나눠서 val (x,y는 동일하게 움직인다(동일하게 짤린다))
#4. 평가, 예측
loss = model.evaluate(x_test,y_test)
print("loss : ", loss)
y_predict = model.predict(x_test)
print("결과물 : ",y_predict)
#실습 : 결과물 오차 수정. 미세조정
#RMSE 함수 사용자 정의
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test,y_predict))
print("RMSE :", RMSE(y_test,y_predict))
#R2 함수
from sklearn.metrics import r2_score
r2=r2_score(y_test, y_predict)
print("R2 : ",r2) | [
"osu1223@gmail.com"
] | osu1223@gmail.com |
a842165c139624026322c930813f9c75e602bb52 | 0e4a037398cf0a54004d4ab272ebe4ddb296fe7f | /config/settings.py | a4245207b9b55badc3bf291ebc7977ed55f94861 | [] | no_license | sug5806/Blog | 9aacf1c8c1d78688163ae9ef93d23bc252b2b1ec | aa24629722c079f768d7fcde3a937223cb2ff5e0 | refs/heads/develop | 2022-12-17T16:48:09.204870 | 2019-06-01T12:27:30 | 2019-06-01T12:27:30 | 188,932,485 | 0 | 1 | null | 2022-12-08T05:10:54 | 2019-05-28T01:34:59 | Python | UTF-8 | Python | false | false | 3,601 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')95clqh=z^rk6asisdh!!r=w!p-m#_^3cb)n*mt1=9tq$$c9^z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'post',
'ckeditor',
'sslserver',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.naver',
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'common_templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
SITE_ID = 1
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/'
ACCOUNT_LOGOUT_REDIRECT_URL = '/' | [
"sug5806@gmail.com"
] | sug5806@gmail.com |
77120a6fa5a36871153e38ba22f1a9f83d82483e | 0adf94fc39a02018165b62e93dd83edddd041230 | /.history/Jobs/views/views_1_20190221112754.py | 2132bf17a5068dace16ac781e78dd077e3aeab86 | [] | no_license | SabitDeepto/BrJobs | 1e3baa143331cf46b9c70911c6644d1efd4fffd6 | 1a458c8c667f8093a2325d963e5542655467c7aa | refs/heads/master | 2020-04-24T08:02:26.350007 | 2019-03-17T05:53:30 | 2019-03-17T05:53:30 | 171,818,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | # from django.contrib.auth.decorators import login_required
from django.shortcuts import render
# from .models import SolutionPost
def home(request):
return render(request, 'basic/index.html')
def test(request):
return render(request, 'test.html')
# def update_profile(request, user_id):
# user = User.objects.get(pk=user_id)
# user.profile.bio = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit...'
# user.save()
# @login_required
@transaction.atomic
def update_profile(request):
if request.method == 'POST':
user_form = UserForm(request.POST, instance=request.user)
profile_form = ProfileForm(request.POST, instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, _('Your profile was successfully updated!'))
return redirect('settings:profile')
else:
messages.error(request, _('Please correct the error below.'))
else:
user_form = UserForm(instance=request.user)
profile_form = ProfileForm(instance=request.user.profile)
return render(request, 'profiles/profile.html', {
'user_form': user_form,
'profile_form': profile_form
}) | [
"deepto69@gmail.com"
] | deepto69@gmail.com |
87193fc338f23a42a20af7ae9ae3eaa872a6f802 | c533017c711b7edf3fb8590504c983286a509804 | /logging_level.py | 8fe5066a5caa1ec87ca7e6a6cb84c5d8893826b2 | [] | no_license | zgotter/Flask | 78dface6b7968087aa19f7221d21d3dbd8793dda | c0ed83812a4cdd7ce70ca900712c6ac40f9c71d0 | refs/heads/master | 2022-06-17T18:41:28.008422 | 2020-05-08T08:05:06 | 2020-05-08T08:05:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | import logging
# 파일로 남기기 위해 filename='test.log' parameter로 추가한다.
logging.basicConfig(filename='test.log', level=logging.DEBUG)
logging.debug("debug")
logging.info("info")
logging.warning("warning")
logging.error("error")
logging.critical("critical")
| [
"shkim4738@gmail.com"
] | shkim4738@gmail.com |
2a36a2937b9885b3dd22b6f636af9dbbc8d2d620 | ca66a4283c5137f835377c3ed9a37128fcaed037 | /djangoPIWebsite/pages/urls.py | 27c2ff3beec290d2c3c8ca0f634526b361686a73 | [] | no_license | NamithaKonda09/majorProject | f377f7a77d40939a659a3e59f5f1b771d88889ad | 4eff4ff18fa828c6278b00244ff2e66522e0cd51 | refs/heads/master | 2023-06-04T20:25:38.450271 | 2021-06-24T19:03:46 | 2021-06-24T19:03:46 | 370,240,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | from django.urls import path
from . import views
urlpatterns = [
path('',views.analysis, name='home'),
path('dashboard/', views.dashboard, name='dashboard'),
path('annotate/', views.upload_annotate, name='annotate'),
path('test/', views.upload_test, name='test'),
path('analysis/', views.analysis, name='analysis'),
path('result/', views.result, name='result'),
path('testresult/', views.testresult, name='testresult'),
]
| [
"namithakonda09@gmail.com"
] | namithakonda09@gmail.com |
425c8d1b40bc20b6c30f8231fc233c7a216592df | d66818f4b951943553826a5f64413e90120e1fae | /hackerrank/Algorithms/Circular Array Rotation/solution.py | 2b18de59ae789f0cc2f536aa5aec470e5256052f | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 658 | py | #!/bin/python3
import os
# Complete the circularArrayRotation function below.
def circularArrayRotation(a, k, queries):
l = len(a)
return [a[(l - k % l + q) % l] for q in queries]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nkq = input().split()
n = int(nkq[0])
k = int(nkq[1])
q = int(nkq[2])
a = list(map(int, input().rstrip().split()))
queries = []
for _ in range(q):
queries_item = int(input())
queries.append(queries_item)
result = circularArrayRotation(a, k, queries)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
| [
"hbinhct@gmail.com"
] | hbinhct@gmail.com |
a0447be691ddc7f0c16a8a89de42c18b13880fc5 | 147fd8d2fa164c173d3763b868d9df790045e53e | /file_build_failures.py | 8c193f0e151dfb76a8cc58d1c4eb90cf4613d126 | [
"CC0-1.0"
] | permissive | hroncok/mini-mass-rebuild | d49c0da413d48a3092e0393f02d4f7b3015e5022 | f12cae9ad7277c5524628a8da31c884e172a12e8 | refs/heads/master | 2022-03-11T15:32:55.818761 | 2022-01-31T11:18:12 | 2022-01-31T11:18:38 | 122,631,346 | 5 | 3 | null | 2020-10-30T12:56:06 | 2018-02-23T14:26:24 | Python | UTF-8 | Python | false | false | 2,172 | py | import bugzilla
import pathlib
import time
import sys
import webbrowser
from urllib.parse import urlencode
from textwrap import dedent
BUGZILLA = 'bugzilla.redhat.com'
TRACKER = 1686977 # PYTHON38
def bugzillas():
bzapi = bugzilla.Bugzilla(BUGZILLA)
query = bzapi.build_query(product='Fedora')
query['blocks'] = TRACKER
return sorted(bzapi.query(query), key=lambda b: -b.id)
def bug(bugs, package):
for b in bugs:
if b.component == package:
return b
return None
def open_bz(package):
summary = f"{package} fails to build with Python 3.8 on Fedora 32+"
description = dedent(f"""
{package} fails to build with Python 3.8.0b4 in Fedora 32.
See the build failures at https://koji.fedoraproject.org/koji/search?match=glob&type=package&terms={package}
...
It is not important whether the problem is relevant to Python 3.8, this issue is blocking the Python 3.8 rebuilds.
If this package won't build with 3.8, it won't be installable, along with all its dependent packages, in Fedora 32 and further.
Furthermore, as it fails to install, its dependent packages will fail to install and/or build as well.
Please rebuild the package in Fedora 32 (rawhide).
Let us know here if you have any questions. Thank You!
""")
url_prefix = 'https://bugzilla.redhat.com/enter_bug.cgi?'
params = {
'short_desc': summary,
'comment': description,
'component': package,
'blocked': 'PYTHON38,F32FTBFS,F32FailsToInstall',
'product': 'Fedora',
'version': 'rawhide',
'bug_severity': 'high',
}
webbrowser.open(url_prefix + urlencode(params))
time.sleep(1)
webbrowser.open(f'https://koji.fedoraproject.org/koji/search?match=glob&type=package&terms={package}')
time.sleep(1)
pkgs = pathlib.Path(sys.argv[1]).read_text().splitlines()
print('Getting bugzillas...', end=' ', flush=True)
bugs = bugzillas()
print('..done.')
for pkg in pkgs:
bz = bug(bugs, pkg)
if bz:
print(f'{pkg} bz{bz.id} {bz.status}')
if not bz or bz.status == 'CLOSED':
open_bz(pkg)
| [
"miro@hroncok.cz"
] | miro@hroncok.cz |
dc6c93b1ba2ac42b69d9d29f8212a43187f85984 | 4a59e35a12af911f588224f07aab52d24fd6b044 | /venv/lib/python2.7/site-packages/mbed_host_tests/host_tests_conn_proxy/conn_primitive_remote.py | bc5e0168c2d0d9dc23072be057abf7bb022bd8f6 | [] | no_license | ryankurte/mbed-node | 95caba48404e06c4f21f48a850152c08d911bbc8 | 3584d391fca00fc3cda138c26ae28fdbe5527d83 | refs/heads/master | 2021-01-22T13:23:08.506740 | 2017-11-02T17:40:26 | 2017-11-02T17:40:26 | 100,665,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,698 | py | #!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mbed_host_tests import DEFAULT_BAUD_RATE
from mbed_host_tests.host_tests_conn_proxy.conn_primitive import ConnectorPrimitive
class RemoteConnectorPrimitive(ConnectorPrimitive):
def __init__(self, name, config):
ConnectorPrimitive.__init__(self, name)
self.config = config
self.target_id = self.config.get('target_id', None)
self.grm_host = config.get('grm_host', None)
self.grm_port = int(config.get('grm_port', 8000))
self.grm_module = config.get('grm_module', 'unknown')
self.platform_name = config.get('platform_name', None)
self.baudrate = config.get('baudrate', DEFAULT_BAUD_RATE)
self.image_path = config.get('image_path', None)
self.polling_timeout = int(config.get('polling_timeout', 60))
# Global Resource Mgr tool-kit
self.remote_module = None
self.selected_resource = None
self.client = None
# Initialize remote resource manager
self.__remote_init()
def __remote_init(self):
"""! Initialize DUT using GRM APIs """
# We want to load global resource manager module by name from command line (switch --grm)
try:
self.remote_module = __import__(self.grm_module)
except ImportError as e:
self.logger.prn_err("unable to load global resource manager '%s' module!"% self.grm_module)
self.remote_module = None
return False
self.logger.prn_inf("remote resources initialization: remote(host=%s, port=%s)"% (self.grm_host, self.grm_port))
# Connect to remote global resource manager
self.client = self.remote_module.create(host=self.grm_host, port=self.grm_port)
# First get the resources
resources = self.client.get_resources()
self.logger.prn_inf("remote resources count: %d" % len(resources))
# Query for available resource
# Automatic selection and allocation of a resource
try:
self.selected_resource = self.client.allocate({
"platform_name": self.platform_name
})
except self.remote_module.resources.ResourceError as e:
self.logger.prn_err("can't allocate resource: '%s', reason: %s"% (self.platform_name, str(e)))
return False
# Remote DUT connection, flashing and reset...
try:
self.__remote_disconnect()
self.__remote_flashing(self.image_path)
self.__remote_connect(baudrate=self.baudrate)
self.__remote_reset()
except Exception as e:
self.logger.prn_err(str(e))
return False
return True
def __remote_connect(self, baudrate=DEFAULT_BAUD_RATE, buffer_size=6):
"""! Open remote connection to DUT """
self.logger.prn_inf("opening connection to platform at baudrate='%s, bufferSize=%d'"% (baudrate, buffer_size))
if not self.selected_resource:
raise Exception("remote resource not exists!")
try:
serial_parameters = self.remote_module.SerialParameters(lineMode=False, baudrate=baudrate, bufferSize=buffer_size)
self.selected_resource.openConnection(parameters=serial_parameters)
except self.remote_module.resources.ResourceError as e:
self.logger.prn_inf("openConnection() failed")
raise e
def __remote_disconnect(self):
if not self.selected_resource:
raise Exception("remote resource not exists!")
if self.selected_resource.is_connected:
self.selected_resource.closeConnection()
def __remote_reset(self):
"""! Use GRM remote API to reset DUT """
self.logger.prn_inf("remote resources reset...")
if not self.selected_resource:
raise Exception("remote resource not exists!")
if not self.selected_resource.reset():
raise Exception("remote resources reset failed!")
def __remote_flashing(self, filename, forceflash=False):
"""! Use GRM remote API to flash DUT """
self.logger.prn_inf("remote resources flashing with '%s'..."% filename)
if not self.selected_resource:
raise Exception("remote resource not exists!")
if not self.selected_resource.flash(filename, forceflash=forceflash):
raise Exception("remote resources flashing failed!")
def read(self, count):
"""! Read 'count' bytes of data from DUT """
if not self.selected_resource:
raise Exception("remote resource not exists!")
date = str()
try:
data = self.selected_resource.read(count)
except self.remote_module.resources.ResourceError as e:
self.logger.prn_err("RemoteConnectorPrimitive.read(%d): %s"% (count, str(e)))
return data
def write(self, payload, log=False):
"""! Write 'payload' to DUT """
if self.selected_resource:
self.selected_resource.write(payload)
if log:
self.logger.prn_txd(payload)
return True
def flush(self):
pass
def connected(self):
return all([self.remote_module,
self.selected_resource,
self.selected_resource.is_connected])
def finish(self):
# Finally once we're done with the resource
# we disconnect and release the allocation
if self.selected_resource:
try:
if self.selected_resource.is_connected:
self.selected_resource.closeConnection()
if self.selected_resource.is_allocated:
self.selected_resource.release()
self.selected_resource = None
except self.remote_module.resources.ResourceError as e:
self.logger.prn_err("RemoteConnectorPrimitive.finish() failed, reason: " + str(e))
def __del__(self):
self.finish()
| [
"ryan.kurte@trutest.co.nz"
] | ryan.kurte@trutest.co.nz |
3e942e91f7d2d29231fc55e21c086b11c76337b1 | e288180c977c8fccf31c00bb74b7e8f56ee69303 | /vkrb/newsitem/views.py | ca6ce1be70905efd68a1aa31f6f346ac625c8fed | [] | no_license | kaluginadaria/vkrb-back | 32e0c9aef7a647ea2a2e399c8d999622e993a433 | d037baaa9f17cb038d41dda5dfbf1dbb56acdf90 | refs/heads/master | 2022-12-07T23:36:32.902662 | 2019-05-22T15:06:31 | 2019-05-22T15:06:31 | 179,382,015 | 0 | 0 | null | 2022-11-22T02:38:25 | 2019-04-03T22:52:30 | Python | UTF-8 | Python | false | false | 5,188 | py | from django import forms
from django.contrib.contenttypes.models import ContentType
from django_serializer.base_views import (ListView,
DetailsView,
BaseView,
CreateView,
DeleteView)
from django_serializer.exceptions import ServerError
from django_serializer.mixins import ObjectMixin, SerializerMixin
from django_serializer.permissions import (
PermissionsModelMixin,
PermissionsMixin,
)
from vkrb.core.mixins import EventMixin, LimitOffsetFullPaginator
from vkrb.core.utils import get_absolute_bundle_urls, render_to_pdf
from vkrb.favorites.forms import FavoriteForm
from vkrb.favorites.models import FavoriteItem
from vkrb.newsitem.models import NewsItem, CategoryNewsItem
from vkrb.newsitem.serializers import (
NewsItemSerializer,
CategoryNewsItemSerializer,
)
class NewsListView(EventMixin, ListView):
class CategoryForm(forms.Form):
category_id = forms.ModelChoiceField(CategoryNewsItem.objects.all(),
required=False)
def get_serializer_kwargs(self, obj, **kwargs):
serializer_kwargs = super().get_serializer_kwargs(obj, **kwargs)
serializer_kwargs['user'] = self.request.user
return serializer_kwargs
def get_queryset(self):
queryset = super().get_queryset()
category = self.request_args.get('category_id')
if category:
return queryset.filter(category=category)
return queryset
section = 'news'
args_form = CategoryForm
authorized_permission = (PermissionsModelMixin.Permission.R,)
paginator = LimitOffsetFullPaginator
model = NewsItem
serializer = NewsItemSerializer
class NewsGetView(DetailsView):
authorized_permission = (PermissionsModelMixin.Permission.R,)
model = NewsItem
serializer = NewsItemSerializer
def get_serializer_kwargs(self, obj, **kwargs):
serializer_kwargs = super().get_serializer_kwargs(obj, **kwargs)
serializer_kwargs['user'] = self.request.user
return serializer_kwargs
class CategoryView(ListView):
authorized_permission = (PermissionsModelMixin.Permission.R,)
model = CategoryNewsItem
serializer = CategoryNewsItemSerializer
paginator = LimitOffsetFullPaginator
def get_queryset(self):
return self.model.objects.all().order_by('order')
class CreatePDFView(ObjectMixin, PermissionsMixin, BaseView):
model = NewsItem
authorized_permission = (PermissionsModelMixin.Permission.R,)
def response_wrapper(self, response):
return response
def get(self, request, *args, **kwargs):
self.check_r_permission(self.request.user)
newsitem = self.get_object()
ctx = {
'photos': newsitem.attachments.all().order_by('attachments__order'),
'newsitem': newsitem,
'css_urls': get_absolute_bundle_urls('pdf', 'css'),
}
return render_to_pdf(template_path='newsitem.html', ctx=ctx)
class FavoriteNewsCreateView(CreateView):
authorized_permission = (PermissionsModelMixin.Permission.R,
PermissionsModelMixin.Permission.W)
serializer = NewsItemSerializer
form_class = FavoriteForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['content_type'] = 'newsitem'
return kwargs
def get_serializer_kwargs(self, obj, **kwargs):
serializer_kwargs = super().get_serializer_kwargs(obj, **kwargs)
serializer_kwargs['user'] = self.request.user
return serializer_kwargs
def post(self, request, *args, **kwargs):
inst = super().post(request, *args, **kwargs)
return inst.content_object
class FavoriteNewsDeleteView(SerializerMixin, DeleteView):
authorized_permission = (PermissionsModelMixin.Permission.R,
PermissionsModelMixin.Permission.D)
model = FavoriteItem
serializer = NewsItemSerializer
class FavoriteForm(forms.Form):
object_id = forms.IntegerField()
def get_args_form(self):
return self.FavoriteForm
def get_object(self):
content_type = ContentType.objects.get(model='newsitem')
try:
return self.model.objects.get(user=self.request.user,
content_type=content_type,
object_id=self.request_args['object_id'])
except self.model.DoesNotExist:
raise ServerError(ServerError.NOT_FOUND)
def get_serializer_kwargs(self, obj, **kwargs):
serializer_kwargs = super().get_serializer_kwargs(obj, **kwargs)
serializer_kwargs['user'] = self.request.user
return serializer_kwargs
def post(self, request, *args, **kwargs):
super().post(request, *args, **kwargs)
try:
return NewsItem.objects.get(id=self.request_args['object_id'])
except NewsItem.DoesNotExist:
raise ServerError(ServerError.NOT_FOUND)
| [
"d.kalugina@ktsstudio.ru"
] | d.kalugina@ktsstudio.ru |
77516ca559071c396386ca15fe21b648acd1f37d | 4148260054c2cf4605dacb8bdef3605c82eca470 | /temboo/Library/RapidShare/AddRealFolder.py | a08056f99e344635a1c16c36fd020a144dfabb6a | [] | no_license | wimsy/actuarize-web | 0f23d5f00afe3d36d430621cdb497d2e64998416 | 5f43af3019da6fb08cafeec9ff0a89df5196b864 | refs/heads/master | 2021-03-12T19:38:21.887681 | 2012-12-19T01:13:50 | 2012-12-19T01:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,044 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# AddRealFolder
# Creates a new folder in RapidShare.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class AddRealFolder(Choreography):
"""
Create a new instance of the AddRealFolder Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/RapidShare/AddRealFolder')
def new_input_set(self):
return AddRealFolderInputSet()
def _make_result_set(self, result, path):
return AddRealFolderResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return AddRealFolderChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the AddRealFolder
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class AddRealFolderInputSet(InputSet):
"""
Set the value of the Login input for this choreography. ((required, string) Your RapidShare username)
"""
def set_Login(self, value):
InputSet._set_input(self, 'Login', value)
"""
Set the value of the Name input for this choreography. ((required, string) The name of the folder (Max character length is 250 bytes))
"""
def set_Name(self, value):
InputSet._set_input(self, 'Name', value)
"""
Set the value of the Parent input for this choreography. ((optional, integer) The ID of the parent folder. Defaults to 0 for 'root'.)
"""
def set_Parent(self, value):
InputSet._set_input(self, 'Parent', value)
"""
Set the value of the Password input for this choreography. ((required, password) Your RapidShare password)
"""
def set_Password(self, value):
InputSet._set_input(self, 'Password', value)
"""
A ResultSet with methods tailored to the values returned by the AddRealFolder choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class AddRealFolderResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. ((string) The response from RapidShare. The id of the newly created folder should be returned in the response upon a successful execution.)
"""
def get_Response(self):
return self._output.get('Response', None)
class AddRealFolderChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return AddRealFolderResultSet(response, path)
| [
"mike.wimsatt@gmail.com"
] | mike.wimsatt@gmail.com |
29e30f6c5d806f391e7443ee20d901d615f8045e | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/codingbat/codingbat-solutions-master/Python/List-2/sum67.py | fb8d4799059832c94349de8442523ed1b0a77f34 | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 538 | py | """
Return the sum of the numbers in the array,
except ignore sections of numbers starting with a 6 and
extending to the next 7 (every 6 will be followed by at least one 7).
Return 0 for no numbers.
sum67([1, 2, 2]) → 5
sum67([1, 2, 2, 6, 99, 99, 7]) → 5
sum67([1, 1, 6, 7, 2]) → 4
"""
def sum67(nums):
record = True
total = 0
for n in nums:
if n == 6:
record = False
if record:
total += n
continue
if n == 7:
record = True
return total
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
3355d566237ae53f9ec0cd4778ba796a468515fa | de644b254b17a28f82e9212d80872a3d9eca2149 | /lib/mock/asset/CodeTilesetAsset.py | 1a516b75ee1713f62b14e116e258e5eda5c53cdf | [
"MIT"
] | permissive | pixpil/gii | 506bee02b11eb412016b583d807dcfcc485e189c | ba6d94ada86d82bacae06f165567a02585264440 | refs/heads/master | 2021-12-03T06:30:31.503481 | 2021-11-24T03:02:49 | 2021-11-24T03:02:49 | 431,331,021 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | import os.path
import json
from gii.core import AssetManager, AssetLibrary, getProjectPath, app, JSONHelper
from .helper.psd2tileset import TilesetProject
from gii.core.CommonAsset.DataYAMLAsset import DataYAMLAssetManager
def _getModulePath( path ):
import os.path
return os.path.dirname( __file__ ) + '/' + path
class CodeTilesetAssetManager(DataYAMLAssetManager):
def getName(self):
return 'asset_manager.code_tileset'
def acceptAssetFile(self, filepath):
if not os.path.isfile(filepath): return False
if not filepath.endswith( '.code_tileset' ): return False
return True
def postDataLoad( self, node, data ):
tiles = data.get( 'tiles', None )
if not tiles: return
id = 0
for key, value in list(tiles.items()):
id +=1
value[ '_id' ] = id
def importAsset(self, node, reload = False ):
imported = super( CodeTilesetAssetManager, self ).importAsset( node, reload )
node.assetType = 'code_tileset'
return imported
CodeTilesetAssetManager().register()
AssetLibrary.get().setAssetIcon( 'code_tileset', 'cell' )
| [
"tommo.zhou@gmail.com"
] | tommo.zhou@gmail.com |
77f288050a171578be6b3051b61925b4a58bd900 | 75c7004744315a22afdad8a68f20c06b8d3efad0 | /网络设备脚本/思科命令行/路由映射.py | b2091c2660925823710d7ecc04e4199e812979c2 | [
"MIT"
] | permissive | cflw/network_device_script | b13cde8719f23402cdd6acd3ca9048a7d65952aa | c3644e933a3c557c44951a0a1994a49357e49c02 | refs/heads/master | 2023-08-03T11:00:29.188101 | 2023-07-29T13:58:09 | 2023-07-29T13:58:09 | 182,526,402 | 18 | 12 | null | null | null | null | UTF-8 | Python | false | false | 2,127 | py | from ..基础接口 import 操作
from ..基础接口 import 策略
from ..命令行接口 import 模式
from ..命令行接口 import 命令
from ..命令行接口 import 路由映射
from . import 访问控制列表 as 访问列表
class C路由映射组(路由映射.I路由映射组, 模式.C同级模式):
def __init__(self, a, a名称):
路由映射.I路由映射组.__init__(self, a, a名称)
def f模式_节点(self, a序号 = 10, a动作 = True, a操作 = None):
return C路由映射节点(self, a序号, a动作)
@策略.A自动策略()
class C路由映射节点(路由映射.I路由映射节点):
def __init__(self, a, a序号, a动作):
路由映射.I路由映射节点.__init__(self, a, a序号, a动作)
def fg进入命令(self):
"""route-map 名称 [动作] [序号]"""
return f"route-map {self.m名称} {访问列表.f生成允许(self.m动作)} {self.m序号}"
#匹配
@策略.A匹配(策略.E类型.e访问列表)
def f匹配_访问列表(self, a访问列表, a操作 = 操作.E操作.e添加):
if isinstance(a访问列表, 访问列表.I访问控制列表):
v命令 = f"match {a访问列表.m协议} address {a访问列表.m名称}"
else:
v命令 = f"match ip address {str(a访问列表)}"
self.f执行当前模式命令(v命令)
#设置
@策略.A设置(策略.E类型.e下一跳4)
def f设置_下一跳4(self, a地址, a操作 = 操作.E操作.e添加):
v命令 = f"set ip next-hop {a地址}"
self.f执行当前模式命令(v命令)
@策略.A设置(策略.E类型.e默认下一跳4)
def f设置_默认下一跳4(self, a地址, a操作 = 操作.E操作.e添加):
v命令 = f"set ip default next-hop {a地址}"
self.f执行当前模式命令(v命令)
@策略.A设置(策略.E类型.e出接口)
def f设置_出接口(self, a接口, a操作 = 操作.E操作.e添加):
v命令 = f"set interface {a接口}"
self.f执行当前模式命令(v命令)
@策略.A设置(策略.E类型.e默认出接口)
def f设置_默认出接口(self, a接口, a操作 = 操作.E操作.e添加):
v命令 = f"set default interface {a接口}"
self.f执行当前模式命令(v命令)
| [
"cflw@outlook.com"
] | cflw@outlook.com |
bc1f2755b787437621a08fd37cef0f3df85c95da | c2b8adb8b4062a14bfc7d8c8fa2938359530e028 | /mfes/evaluate_function/eval_sys_poker.py | 6b834b866e2eb3caf02c64e7d4631749ec387747 | [] | no_license | thomas-young-2013/hp-tuner | 1e7d277f3c0135b9032884e3f20b050f19012918 | e606569719a14d8445633e42aedc8296a63a577a | refs/heads/master | 2023-04-15T08:41:02.514912 | 2020-09-14T13:23:55 | 2020-09-14T13:23:55 | 225,173,361 | 0 | 2 | null | 2023-03-24T22:31:25 | 2019-12-01T14:17:29 | Python | UTF-8 | Python | false | false | 405 | py | from __future__ import division, print_function, absolute_import
import os
import sys
from functools import partial
sys.path.append(os.getcwd())
from solnml.datasets.utils import load_train_test_data
from mfes.evaluate_function.sys.combined_evaluator import train as _train
train_node, test_node = load_train_test_data('poker', data_dir='./', task_type=0)
train = partial(_train, data_node=train_node)
| [
"459240868@qq.com"
] | 459240868@qq.com |
bf59c63dc485ae90cd29cba0e2b0f40a6a812578 | 3c31584c1b661195a567ffd2603d30cb2e270493 | /codeforces/706/B.py | 31dd5d8a311b87f310cd96d3b478f008e8e8580c | [] | no_license | ku-nal/Codeforces | c7f621e35b5d4eea1ed11276ee8e91031252ca91 | df43c2fcbcfd1c9f96b6fe79c7abc9ddee054cb7 | refs/heads/main | 2023-04-10T19:00:40.559074 | 2021-04-27T15:15:51 | 2021-04-27T15:15:51 | 362,154,763 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,268 | py | #===========Template===============
from io import BytesIO, IOBase
import sys,os
inpl=lambda:list(map(int,input().split()))
inpm=lambda:map(int,input().split())
inpi=lambda:int(input())
inp=lambda:input()
rev,ra,l=reversed,range,len
P=print
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
def input(): return sys.stdin.readline().rstrip("\r\n")
#=========I/p O/p ========================================#
from bisect import bisect_left as bl
from bisect import bisect_right as br
import sys,operator,math,operator
from collections import Counter,deque
import random
#==============To chaliye shuru krte he====================#
n1=inpi()
li=inpl()
n=inpi()
li.sort()
for i in ra(n):
a=inpi()
P(br(li,a))
| [
"kunalmakwana18@gnu.ac.in"
] | kunalmakwana18@gnu.ac.in |
de272a3b846152173db1088aa944b13b8b03e905 | ec7f476240c8edf88bf9500c5b677113d840d6c6 | /PMModels/Implementations/Backtesting/mean_pnl.py | 3cab0e3ceec638f84a2f5440f990c7308e1d435f | [] | no_license | fagan2888/BehavioralAssignmentSolution | 80b82b71a94d0da68783b00046215755c1dd3715 | 15acead2d6af6e59f64a7d3baf7da5356cea57ec | refs/heads/master | 2021-04-21T08:31:40.453758 | 2018-08-27T06:18:41 | 2018-08-27T06:18:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
'''
Name: Beier (Benjamin) Liu
Date: 7/5/2018
Remark:
Python 3.6 is recommended
Before running please install packages *numpy, scipy, matplotlib
Using cmd line py -3,6 -m pip install [package_name]
'''
import os, time, logging
import copy, math
import functools, itertools
import numpy as np
from Implementations.compute_mean import *
from Implementations.compute_var import *
'''===================================================================================================
File content:
provide optimization target function fitness_sharpe, like cost
==================================================================================================='''
# 每笔盈亏
def mean_pnl(trades, freq='annually'):
'''==============================================================================================
Arguments:
trades -- list of objects, past expected returns of strategy
freq -- string, the frequency of computation
Returns:
res -- double,
=============================================================================================='''
# Preparation Phrase
res = 0.0;
# Handling Phrase
# Checking Phrase
return res
| [
"imbenliu@gmail.com"
] | imbenliu@gmail.com |
021fe2ec6b0f2d796211aa9535bec12b1d96a01e | 12f27e80ee7cf5f5274c98ba2e370defbdafa67b | /data-exporter/pipeline.py | 8dfb1c175af634643b9e62ecb169b82ae425ad77 | [] | no_license | dawnkd/beehive-server | 48f94abfe2392902845f4a682f59fdfefa64dbf7 | 21f61a9c5723554931a9735c2c9398e82186fbd1 | refs/heads/master | 2020-03-19T02:12:53.584756 | 2018-05-31T16:26:55 | 2018-05-31T16:26:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,163 | py | import binascii
import re
import struct
from waggle.coresense.utils import decode_frame as decode_frame_v3
from waggle.protocol.v5.decoder import decode_frame as decode_frame_v5
from waggle.protocol.v5.decoder import convert as convert_v5
def normalize_key(k):
return re.sub('[-_.]+', '_', k).lower()
def normalize_value(v):
if isinstance(v, dict):
return {normalize_key(k2): normalize_value(v2) for k2, v2 in v.items()}
if isinstance(v, list):
return [normalize_value(v2) for v2 in v]
if isinstance(v, float):
return round(v, 3)
return v
def trim_python_repr(s):
if s.startswith("b'"):
return s[2:-1]
return s
def trim_coresense_packet(source):
start = source.index(b'\xaa')
end = source.rindex(b'\x55')
return source[start:end + 1]
def reunpack_if_needed(source):
if source[0] != 0xaa:
return binascii.unhexlify(source.decode())
return source
def decode_coresense_3(source):
source = trim_coresense_packet(source)
source = reunpack_if_needed(source)
return decode_frame_v3(source)
still_raw_sensors = {
'Chemsense',
'Si1145',
}
def decode_coresense_4(source):
source = trim_coresense_packet(source)
source = reunpack_if_needed(source)
unpacked_data = decode_frame_v5(source)
raw_results = {}
for sensor_id, sensor_data in unpacked_data.items():
raw_results.update(sensor_data)
converted_results = {}
for sensor_id, sensor_data in unpacked_data.items():
for key, (value, unit) in convert_v5(sensor_data, sensor_id).items():
converted_results[key] = value
all_results = {}
for k, v in map_readings_4to3(raw_results).items():
all_results[('raw', k)] = v
for k, v in map_readings_4to3(converted_results).items():
if k in still_raw_sensors:
all_results[('raw', k)] = v
else:
all_results[('converted', k)] = v
return all_results
def decode18(data):
bincounts = struct.unpack_from('<16H', data, offset=0)
mtof = [x / 3 for x in struct.unpack_from('<4B', data, offset=32)]
pmvalues = sorted(struct.unpack_from('<3f', data, offset=50))
values = {
'bins': bincounts,
'mtof': mtof,
'pm': {'1': pmvalues[0], '2.5': pmvalues[1], '10': pmvalues[2]},
}
return values
def decode_alphasense_1(source):
return decode18(source)
decoders = {
'coresense:3': decode_coresense_3,
'coresense:4': decode_coresense_4,
'alphasense:1': decode_alphasense_1,
}
def decode(row):
plugin = ':'.join([row.plugin_name, row.plugin_version])
source = binascii.unhexlify(trim_python_repr(row.data))
if plugin not in decoders:
return {}
return decoders[plugin](source)
template_4to3 = {
'APDS-9006-020': {
'intensity': 'lightsense_apds_9006_020_light'
},
'BMP180': {
'pressure': 'metsense_bmp180_pressure',
'temperature': 'metsense_bmp180_temperature',
},
'HIH4030': {
'humidity': 'metsense_hih4030_humidity',
},
'HIH6130': {
'humidity': 'lightsense_hih6130_humidity',
'temperature': 'lightsense_hih6130_temperature',
},
'HMC5883L': {
'magnetic_field.x': 'lightsense_hmc5883l_hx',
'magnetic_field.y': 'lightsense_hmc5883l_hy',
'magnetic_field.z': 'lightsense_hmc5883l_hz',
},
'HTU21D': {
'humidity': 'metsense_htu21d_humidity',
'temperature': 'metsense_htu21d_temperature',
},
'LPS25H': {
'pressure': 'chemsense_lpp',
'temperature': 'chemsense_lpt',
},
'ML8511': {
'intensity': 'lightsense_ml8511',
},
'MLX75305': {
'intensity': 'lightsense_mlx75305',
},
'MMA8452Q': {
'acceleration.x': 'metsense_mma8452q_acc_x',
'acceleration.y': 'metsense_mma8452q_acc_y',
'acceleration.z': 'metsense_mma8452q_acc_z',
},
'SHT25': {
'humidity': 'chemsense_shh',
'temperature': 'chemsense_sht',
},
'Si1145': {
'ir_count': 'chemsense_sir',
'uv_count': 'chemsense_suv',
'visible_light_count': 'chemsense_svl',
},
'TMP421': {
'temperature': 'lightsense_tmp421',
},
'TSL250RD-LS': {
'intensity': 'lightsense_tsl250_light',
},
'TSL260RD': {
'intensity': 'lightsense_tsl260_light',
},
'Coresense ID': {
'mac_address': 'metsense_id',
},
'PR103J2': {
'temperature': 'metsense_pr103j2_temperature',
},
'SPV1840LR5H-B': {
'intensity': 'metsense_spv1840lr5h-b',
},
'TMP112': {
'temperature': 'metsense_tmp112',
},
'TSL250RD-AS': {
'intensity': 'metsense_tsl250rd_light',
},
'TSYS01': {
'temperature': 'metsense_tsys01_temperature',
},
'Chemsense ID': {
'mac_address': 'chemsense_id',
},
'Chemsense': {
'co': 'chemsense_cmo',
'h2s': 'chemsense_h2s',
'no2': 'chemsense_no2',
'o3': 'chemsense_ozo',
'so2': 'chemsense_so2',
'reducing_gases': 'chemsense_irr',
'oxidizing_gases': 'chemsense_iaq',
'at0': 'chemsense_at0',
'at1': 'chemsense_at1',
'at2': 'chemsense_at2',
'at3': 'chemsense_at3',
},
'Alphasense': {
'pm1': 'alphasense_pm1',
'pm2.5': 'alphasense_pm2.5',
'pm10': 'alphasense_pm10',
'bins': 'alphasense_bins',
'sample flow rate': 'alphasense_sample_flow_rate',
'sampling period': 'alphasense_sampling_period',
'id': 'alpha_serial',
'fw': 'alpha_firmware',
},
'PMS7003': {
'10um_particle': 'pms7003_10um_particle',
'1um_particle': 'pms7003_1um_particle',
'2_5um_particle': 'pms7003_2_5um_particle',
'5um_particle': 'pms7003_5um_particle',
'pm10_atm': 'pms7003_pm10_atm',
'pm10_cf1': 'pms7003_pm10_cf1',
'pm1_atm': 'pms7003_pm1_atm',
'pm1_cf1': 'pms7003_pm1_cf1',
'pm25_atm': 'pms7003_pm25_atm',
'pm25_cf1': 'pms7003_pm25_cf1',
'point_3um_particle': 'pms7003_point_3um_particle',
'point_5um_particle': 'pms7003_point_5um_particle',
},
'Net Broadband': {
'rx': 'net_broadband_rx',
'tx': 'net_broadband_tx',
},
'Net LAN': {
'rx': 'net_lan_rx',
'tx': 'net_lan_tx',
},
'Net USB': {
'rx': 'net_usb_rx',
'tx': 'net_usb_tx',
},
}
def stringify(x):
if isinstance(x, tuple) or isinstance(x, list):
return ','.join([stringify(xi) for xi in x])
if isinstance(x, bytes) or isinstance(x, bytearray):
return binascii.hexlify(x).decode()
return str(x)
def map_parameters_4to3(readings, parameters):
output = {}
for p, k in parameters.items():
output[p] = stringify(readings[k])
return output
def map_readings_4to3(readings):
output = {}
for sensor, parameters in template_4to3.items():
try:
output[sensor] = map_parameters_4to3(readings, parameters)
except KeyError:
continue
return output
| [
"sean.shahkarami@gmail.com"
] | sean.shahkarami@gmail.com |
dfc33f4392592784b20a4f10a81aaefb5131c5c3 | 1d363dfbe69b79bc1989251f085060232beb12f5 | /tests/test_chemical_package.py | a87f5ed3afd8ddfc87e362e8508edd4dcf84319c | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | CalebBell/thermo | ec602af2316875692e385287c6010e9f206b1bc3 | 8622fada3614179d4372192e0031b4a206384c93 | refs/heads/master | 2023-08-30T05:30:07.552575 | 2023-06-25T01:35:53 | 2023-06-25T01:35:53 | 62,404,647 | 529 | 127 | MIT | 2023-08-11T18:31:21 | 2016-07-01T16:04:56 | Python | UTF-8 | Python | false | false | 6,401 | py | '''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2020, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import json
from math import *
import pytest
from chemicals import *
from chemicals.utils import hash_any_primitive
from fluids.numerics import *
from thermo import *
@pytest.mark.fuzz
@pytest.mark.slow
def test_ChemicalConstantsPackage_from_json_as_json_large():
create_compounds = []
for k in dippr_compounds():
try:
if search_chemical(k) is not None:
create_compounds.append(k)
except:
pass
obj = ChemicalConstantsPackage.constants_from_IDs(create_compounds)
obj2 = ChemicalConstantsPackage.from_json(json.loads(json.dumps(obj.as_json())))
assert hash(obj) == hash(obj2)
assert obj == obj2
assert id(obj) != id(obj2)
obj = ChemicalConstantsPackage.correlations_from_IDs(create_compounds)
obj2 = PropertyCorrelationsPackage.from_json(json.loads(json.dumps(obj.as_json())))
assert hash(obj) == hash(obj2)
assert obj == obj2
assert id(obj) != id(obj2)
assert obj != int
assert obj != float
def test_ChemicalConstantsPackage_json_version_exported():
constants = ChemicalConstantsPackage(MWs=[18.01528, 106.165], names=['water', 'm-xylene'])
string = json.dumps(constants.as_json())
c2 = ChemicalConstantsPackage.from_json(json.loads(string))
assert 'py/object' in string
assert 'json_version' in string
assert not hasattr(c2, 'json_version')
def test_ChemicalConstantsPackage_json_export_does_not_change_hashes():
# There was a nasty bug where the hashing function was changing its result
# every call
obj = ChemicalConstantsPackage.correlations_from_IDs(['hexane'])
hashes_orig = [hash_any_primitive(getattr(obj, k)) for k in obj.correlations]
copy = obj.as_json()
hashes_after = [hash_any_primitive(getattr(obj, k)) for k in obj.correlations]
assert hashes_orig == hashes_after
def test_ChemicalConstantsPackage_json_export_sane_recursion():
# It might be nice to do something about the duplicate EOSs, but they could be different
# Really still feels like a different structure for that would be better.
obj = ChemicalConstantsPackage.correlations_from_IDs(['methane', 'ethane'])
assert 3 == json.dumps(obj.as_json()).count('VaporPressure')
def test_ChemicalConstantsPackage_json_export_same_output():
obj = ChemicalConstantsPackage.correlations_from_IDs(['hexane'])
obj2 = PropertyCorrelationsPackage.from_json(json.loads(json.dumps(obj.as_json())))
assert hash_any_primitive(obj.constants) == hash_any_primitive(obj2.constants)
for prop in obj.pure_correlations:
assert hash_any_primitive(getattr(obj, prop)) == hash_any_primitive(getattr(obj2, prop))
assert hash_any_primitive(obj.VaporPressures) == hash_any_primitive(obj2.VaporPressures)
assert hash_any_primitive(obj.ViscosityGases) == hash_any_primitive(obj2.ViscosityGases)
assert hash(obj.SurfaceTensionMixture) == hash(obj2.SurfaceTensionMixture)
assert hash(obj.VolumeGasMixture) == hash(obj2.VolumeGasMixture)
for prop in obj.mixture_correlations:
assert hash_any_primitive(getattr(obj, prop)) == hash_any_primitive(getattr(obj2, prop))
assert hash(obj) == hash(obj2)
assert obj == obj2
def test_ChemicalConstantsPackage_wrong_behaviors():
obj = ChemicalConstantsPackage.correlations_from_IDs(['7647-19-0'])
obj.VolumeLiquids[0].eos is None
assert obj != int
assert obj != float
def test_lemmon2000_package():
Ts = (150.0, 200.0, 300.0, 1000.0, 2000.0)
CoolProp_Cps = [29.030484473246823, 29.03511836728048, 29.103801681330573, 33.046833525551676, 36.210748112152906]
for T, Cp in zip(Ts, CoolProp_Cps):
assert_close(Cp, lemmon2000_correlations.HeatCapacityGases[0](T), rtol=2e-7)
def test_compound_index():
obj = ChemicalConstantsPackage(MWs=[18.01528, 106.165], names=['water', 'm-xylene'],
CASs=['7732-18-5', '108-38-3'],
InChI_Keys=['XLYOFNOQVPJJNP-UHFFFAOYSA-N', 'IVSZLXZYQVIEFR-UHFFFAOYSA-N'],
InChIs=['H2O/h1H2', 'C8H10/c1-7-4-3-5-8(2)6-7/h3-6H,1-2H3'],
smiless=['O', 'CC1=CC(=CC=C1)C'], PubChems=[962, 7929],)
assert 0 == obj.compound_index(name='water')
assert 1 == obj.compound_index(name='m-xylene')
assert 1 == obj.compound_index(PubChem=7929)
assert 0 == obj.compound_index(smiles='O')
assert 0 == obj.compound_index(CAS='7732-18-5')
assert 0 == obj.compound_index(InChI='H2O/h1H2')
assert 1 == obj.compound_index(InChI_Key='IVSZLXZYQVIEFR-UHFFFAOYSA-N')
def test_add_ChemicalConstantsPackage():
a = ChemicalConstantsPackage.constants_from_IDs(IDs=['water', 'hexane'])
b = ChemicalConstantsPackage.constants_from_IDs(IDs=['toluene'])
c = a + b
c_good = ChemicalConstantsPackage.constants_from_IDs(IDs=['water', 'hexane', 'toluene'])
assert c == c_good
def test_add_PropertyCorrelationsPackage():
a = ChemicalConstantsPackage.correlations_from_IDs(IDs=['water', 'hexane'])
b = ChemicalConstantsPackage.correlations_from_IDs(IDs=['toluene'])
c = a + b
c_good = ChemicalConstantsPackage.correlations_from_IDs(IDs=['water', 'hexane', 'toluene'])
assert c == c_good
| [
"Caleb.Andrew.Bell@gmail.com"
] | Caleb.Andrew.Bell@gmail.com |
5c377987f3a2b5dc3ed85c65b668b95b75cb4097 | c93f51492cfee3f98040f07d7f4323ec27ac81a5 | /refinery/units/misc/drp.py | b38fff444e00c9713d6f25da6071973808bbc9fe | [
"BSD-3-Clause"
] | permissive | prats84/refinery | cbe9ebfeb570c9c0531e13bbf13ec18801f12aca | 5f961051e9cc1857a06108ce4d36a6799ac9d720 | refs/heads/master | 2023-07-13T02:32:04.998285 | 2021-08-20T09:08:01 | 2021-08-20T09:08:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,630 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from .. import arg, Unit
from ...lib.suffixtree import SuffixTree
from ...lib.types import INF
class stackdepth:
def __init__(self, depth):
self.depth = depth
self.default = sys.getrecursionlimit()
def __enter__(self):
if self.depth > self.default:
sys.setrecursionlimit(self.depth)
return self
def __exit__(self, *args):
sys.setrecursionlimit(self.default)
return False
class drp(Unit):
"""
Detect Repeating Patterns - detects the most prevalent repeating byte pattern
in a chunk of data. The unit computes a suffix tree which may require a lot of
memory for large buffers.
"""
def __init__(
self,
consecutive: arg.switch('-c', help='Assume that the repeating pattern is consecutive when observable.') = False,
min: arg.number('-n', help='Minimum size of the pattern to search for. Default is {default}.') = 1,
max: arg.number('-N', help='Maximum size of the pattern to search for. Default is {default}.') = INF,
len: arg.number('-l', help='Set the exact size of the pattern. This is equivalent to --min=N --max=N.') = None,
all: arg.switch('-a', help='Produce one output for each repeating pattern that was detected.') = False,
threshold: arg.number('-t', help='Patterns must match this performance threshold in percent, lest they be discarded.') = 20,
weight: arg.number('-w', help='Specifies how much longer patterns are favored over small ones. Default is {default}.') = 0,
buffer: arg.number('-b', group='BFR', help='Maximum number of bytes to inspect at once. The default is {default}.') = 1024,
chug : arg.switch('-g', group='BFR', help='Compute the prefix tree for the entire buffer instead of chunking it.') = False
):
if len is not None:
min = max = len
super().__init__(
min=min,
max=max,
all=all,
consecutive=consecutive,
weight=weight,
buffer=buffer,
chug=chug,
threshold=threshold
)
def _get_patterns(self, data):
with stackdepth(len(data)):
tree = SuffixTree(data)
min_size = self.args.min
max_size = self.args.max
patterns = set()
cursor = 0
while cursor < len(data):
node = tree.root
rest = data[cursor:]
remaining = len(rest)
length = 0
offset = None
while node.children and length < remaining:
for child in node.children.values():
if tree.data[child.start] == rest[length]:
node = child
break
if node.start >= cursor:
break
offset = node.start - length
length = node.end + 1 - offset
if offset is None:
cursor += 1
continue
length = min(remaining, length)
if max_size >= length >= min_size:
pattern = rest[:length].tobytes()
patterns.add(pattern)
cursor += length
del tree
return patterns
@staticmethod
def _consecutive_count(data, pattern):
length = len(pattern)
if length == 1:
return data.count(pattern)
view = memoryview(data)
return max(sum(1 for i in range(k, len(view), length) if view[i:i + length] == pattern)
for k in range(len(pattern)))
@staticmethod
def _truncate_pattern(pattern):
offset = 0
for byte in pattern[1:]:
if byte == pattern[offset]:
offset += 1
else:
offset = 0
if offset > 0:
pattern = pattern[:-offset]
return pattern
def process(self, data):
memview = memoryview(data)
weight = 1 + (self.args.weight / 10)
if self.args.chug:
patterns = self._get_patterns(memview)
else:
patterns = set()
chunksize = self.args.buffer
for k in range(0, len(memview), chunksize):
patterns |= self._get_patterns(memview[k:k + chunksize])
if not patterns:
raise RuntimeError('unexpected state: no repeating sequences found')
self.log_debug('removing duplicate pattern detections')
duplicates = set()
maxlen = max(len(p) for p in patterns)
for pattern in sorted(patterns, key=len):
for k in range(2, maxlen // len(pattern) + 1):
repeated = pattern * k
if repeated in patterns:
duplicates.add(repeated)
patterns -= duplicates
self.log_debug(F'counting coverage of {len(patterns)} patterns')
pattern_count = {p: data.count(p) for p in patterns}
pattern_performance = dict(pattern_count)
for consecutive in (False, True):
if consecutive:
self.log_debug(F're-counting coverage of {len(patterns)} patterns')
patterns = {self._truncate_pattern(p) for p in patterns}
pattern_performance = {p: self._consecutive_count(data, p) for p in patterns}
self.log_debug('evaluating pattern performance')
for pattern, count in pattern_performance.items():
pattern_performance[pattern] = count * (len(pattern) ** weight)
best_performance = max(pattern_performance.values())
for pattern, performance in pattern_performance.items():
pattern_performance[pattern] = performance / best_performance
self.log_debug('removing patterns below performance threshold')
threshold = self.args.threshold
patterns = {p for p in patterns if pattern_performance[p] * 100 >= threshold}
if not self.args.consecutive:
break
if self.args.all:
for pattern in sorted(patterns, key=pattern_performance.get, reverse=True):
yield self.labelled(pattern, count=pattern_count[pattern])
return
best_patterns = [p for p in patterns if pattern_performance[p] == 1.0]
if len(best_patterns) > 1:
self.log_warn('could not determine unique best repeating pattern, returning the first of these:')
for k, pattern in enumerate(best_patterns):
self.log_warn(F'{k:02d}.: {pattern.hex()}')
yield best_patterns[0]
| [
"rattle@nullteilerfrei.de"
] | rattle@nullteilerfrei.de |
6694c98e3b9d6d13e32999cfd268c777850b2bcb | fa1953cb5c96b816b3d7e3df757cea0aa0f973b1 | /src/ensemble.py | d2014f12b77d18bd6b8be9d41617da60a77eca9f | [] | no_license | cttsai1985/Kaggle-Recursion-Cellular | 369aafd89f0ddfa4229f9b19fdba1317bfcf6cb8 | a91740a4ad984588c28a9369f303eba2e6b0bea0 | refs/heads/master | 2020-11-23T23:29:40.218838 | 2019-10-09T07:18:06 | 2019-10-09T07:18:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,654 | py | import pandas as pd
import numpy as np
import click
import json
from scipy.optimize import linear_sum_assignment
from scipy.special import softmax
@click.group()
def cli():
print("Ensemble")
def load_one_fold(predict_root, model_name, fold):
test_preds = []
for channel in [
"[1,2,3,4,5]",
"[1,2,3,4,6]",
"[1,2,3,5,6]",
"[1,2,4,5,6]",
"[1,3,4,5,6]",
"[2,3,4,5,6]",
]:
pred = np.load(f"{predict_root}/{channel}/fold_{fold}/{model_name}/pred_test.npy")
test_preds.append(pred)
test_preds = np.asarray(test_preds)
test_preds = test_preds.mean(axis=0)
return test_preds
def load_kfold(predict_root, model_name):
preds = 0
for fold in range(5):
preds += load_one_fold(predict_root, model_name, fold) / 5
return preds
@cli.command()
@click.option('--data_root', type=str, default='/data/')
@click.option('--predict_root', type=str, default='/logs/pseudo/')
@click.option('--group_json', type=str, default='group.json')
def ensemble(
data_root='/data/',
predict_root='/logs/pseudo/',
group_json="group.json",
):
model_names = ['se_resnext50_32x4d']
ensemble_preds = 0
for model_name in model_names:
ensemble_preds += load_kfold(predict_root, model_name)
# Just a maigc
ensemble_preds = ensemble_preds / 121
test_df = pd.read_csv(f"{data_root}/test.csv")
ensemble_preds = softmax(ensemble_preds, axis=1)
with open(group_json, 'r') as f:
m = json.load(f)
id_codes = test_df.id_code.values
test_plate_id_to_group_id = m["test_plate_id_to_group_id"]
label_group_list = m["label_group_list"]
plate_ids = [id_code[:-4] for id_code in id_codes]
start_indices = sorted([plate_ids.index(experiment_id) for experiment_id in set(plate_ids)])
start_indices.append(len(plate_ids))
sirnas = []
for i in range(len(start_indices) - 1):
start_id = start_indices[i]
end_id = start_indices[i + 1]
test_plate_id = id_codes[start_id][:-4]
label_group_id = test_plate_id_to_group_id[test_plate_id]
group_labels = label_group_list[label_group_id]
plate_prob = ensemble_preds[start_id:end_id, group_labels]
plate_prob = plate_prob / plate_prob.sum(axis=0, keepdims=True)
row_ind, col_ind = linear_sum_assignment(1 - plate_prob)
col_ind = np.array(group_labels)[col_ind]
sirnas.extend(col_ind)
sub = pd.DataFrame.from_dict(
data={"id_code": id_codes, "sirna": sirnas}
)
sub.to_csv(f"{predict_root}/submission.csv", index=False)
if __name__ == '__main__':
cli()
| [
"ngxbac.dt@gmail.com"
] | ngxbac.dt@gmail.com |
984d471f394ee4c497bf778e4fabb2af107c42c0 | da739696c3c88e760d0adb60f31fe069341ecb20 | /backend/home/migrations/0002_load_initial_data.py | 2ee56b403f0d4780b8f43156c355882de841197f | [] | no_license | crowdbotics-apps/app-23018 | 27b1cd94baeb226117429ecdf149bb1970e85c2d | 844910b8f1c6e50609bec1b2f8b2bdaf4f490230 | refs/heads/master | 2023-01-19T19:58:19.772854 | 2020-11-27T20:04:01 | 2020-11-27T20:04:01 | 316,589,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "app"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">app</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "app-23018.botics.co"
site_params = {
"name": "app",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
09bccd733a4e5bc6a895e0032bb27e2a2bd0ca26 | 9e1df555176bae216828c404ad7290c2eb030cbf | /pl_examples/bug_report_model.py | dbea2013d11104b37b432cca31d57808e7b7e328 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] | permissive | shijianjian/pytorch-lightning | e11be4d4926a1a0c8f605e596bec19926d476876 | b6f3cf5e52dddedec6f7b3e85c0702b75907452c | refs/heads/master | 2023-03-02T14:58:54.139540 | 2021-02-10T05:38:23 | 2021-02-10T05:38:23 | 318,134,795 | 1 | 0 | Apache-2.0 | 2020-12-03T09:05:46 | 2020-12-03T09:05:45 | null | UTF-8 | Python | false | false | 4,217 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------
# --------------------------------------------
# --------------------------------------------
# USE THIS MODEL TO REPRODUCE A BUG YOU REPORT
# --------------------------------------------
# --------------------------------------------
# --------------------------------------------
import os
import torch
from torch.utils.data import Dataset
from pytorch_lightning import Trainer, LightningModule
class RandomDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class BoringModel(LightningModule):
def __init__(self):
"""
Testing PL Module
Use as follows:
- subclass
- modify the behavior for what you want
class TestModel(BaseTestModel):
def training_step(...):
# do your own thing
or:
model = BaseTestModel()
model.training_epoch_end = None
"""
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
return self.layer(x)
def loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def step(self, x):
x = self.layer(x)
out = torch.nn.functional.mse_loss(x, torch.ones_like(x))
return out
def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_step_end(self, training_step_outputs):
return training_step_outputs
def training_epoch_end(self, outputs) -> None:
torch.stack([x["loss"] for x in outputs]).mean()
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"x": loss}
def validation_epoch_end(self, outputs) -> None:
torch.stack([x['x'] for x in outputs]).mean()
def test_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"y": loss}
def test_epoch_end(self, outputs) -> None:
torch.stack([x["y"] for x in outputs]).mean()
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
# NOTE: If you are using a cmd line to run your script,
# provide the cmd line as below.
# opt = "--max_epochs 1 --limit_train_batches 1".split(" ")
# parser = ArgumentParser()
# args = parser.parse_args(opt)
def run_test():
class TestModel(BoringModel):
def on_train_epoch_start(self) -> None:
print('override any method to prove your bug')
# fake data
train_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
val_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
test_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
# model
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model, train_data, val_data)
trainer.test(test_dataloaders=test_data)
if __name__ == '__main__':
run_test()
| [
"noreply@github.com"
] | shijianjian.noreply@github.com |
05fd5796d8e76b96a95e09044e3489a193ec8ee4 | c7d6c70132d626f7c572ecc3d74fc117cacc88bf | /eventory/ext/discord/compat.py | b3046a4e72145d75142a94e7c0c4e605b30afa0f | [
"MIT"
] | permissive | siku2/Eventory | b19a79e020fa8a25145889a64bba83c759e6c288 | 8a88b3328ae83b369a185a1ea266031e9eef04e8 | refs/heads/master | 2020-03-07T18:54:39.815670 | 2018-06-24T22:48:42 | 2018-06-24T22:48:42 | 127,656,364 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,690 | py | """This module serves to bridge the differences between the async and rewrite Discord.py versions."""
import logging
from typing import Callable, Dict, Union
import discord
from discord import Client, Colour, Embed, Message, User
from discord.embeds import EmptyEmbed
from discord.ext.commands import Context
from discord.ext.commands.view import StringView
log = logging.getLogger(__name__)
_REWRITE = discord.version_info[:3] >= (1, 0, 0)
if _REWRITE:
log.debug("Using the rewrite version of Discord.py, thank you!")
from discord import DMChannel, TextChannel
DiscordTextChannel = Union[TextChannel, DMChannel]
async def send_message(client: Client, channel: DiscordTextChannel, *args, **kwargs) -> Message:
return await channel.send(*args, **kwargs)
async def edit_message(client: Client, message: Message, *args, **kwargs) -> Message:
return await message.edit(*args, **kwargs)
async def wait_for_message(client: Client, check: Callable[[Message], bool] = None) -> Message:
return await client.wait_for("message", check=check)
async def get_context(client: Client, msg: Message) -> Context:
return await client.get_context(msg)
else:
import warnings
warnings.warn(
"It seems that you're not using the Discord.py rewrite. This extension is written for the rewrite version of Discord.py so it doesn't "
"necessarily run on your version", ImportWarning)
from discord import PrivateChannel, Channel
DiscordTextChannel = Union[PrivateChannel, Channel]
async def send_message(client: Client, channel: DiscordTextChannel, *args, **kwargs) -> Message:
return await client.send_message(channel, *args, **kwargs)
async def edit_message(client: Client, message: Message, *args, **kwargs) -> Message:
return await client.edit_message(message, *args, **kwargs)
async def wait_for_message(client: Client, check: Callable[[Message], bool] = None) -> Message:
return await client.wait_for_message(check=check)
async def get_context(client: Client, msg: Message) -> Context:
view = StringView(msg.content)
ctx = Context(prefix=None, view=view, bot=client, message=msg)
if client._skip_check(msg.author.id, client.user.id):
return ctx
prefix = await client._get_prefix(msg)
invoked_prefix = prefix
if isinstance(prefix, str):
if not view.skip_string(prefix):
return ctx
else:
invoked_prefix = discord.utils.find(view.skip_string, prefix)
if invoked_prefix is None:
return ctx
invoker = view.get_word()
ctx.invoked_with = invoker
ctx.prefix = invoked_prefix
ctx.command = client.all_commands.get(invoker)
return ctx
async def add_embed(client: Union[Client, Context], msg: Union[Context, Message, str] = None, description: Union[str, int] = EmptyEmbed,
colour: Union[int, Colour] = EmptyEmbed, *, author: Union[str, Dict, User] = None, footer: Union[str, Dict] = None, **kwargs):
"""Add an Embed to a message.
Args:
client: Discord client
msg: Message to attach the Embed to. You may also pass a Context for convenience.
description: Description of the Embed
colour: Colour for the Embed
author: Author of the Embed.
Providing a string merely sets the name of the author, the dictionary is fed directly to the set_author method and when provided with a
User it uses the name and the avatar_url.
footer: When provided with a string it uses it as the text for the footer and a dictionary is passed to the set_footer function.
"""
if isinstance(client, Context):
ctx = client
client = ctx.bot
if isinstance(msg, str):
colour = description
description = msg
msg = ctx.message
elif not msg:
msg = ctx.message
if isinstance(msg, Context):
msg = msg.message
em = Embed(description=description, colour=colour, **kwargs)
if author:
if isinstance(author, dict):
em.set_author(**author)
elif isinstance(author, User):
em.set_author(name=author.name, icon_url=author.avatar_url)
else:
em.set_author(name=author)
if footer:
if isinstance(footer, dict):
em.set_footer(**footer)
else:
em.set_footer(text=footer)
if msg.author.id == client.user.id:
await edit_message(client, msg, embed=em)
else:
await send_message(client, msg.channel, embed=em)
| [
"siku2@outlook.de"
] | siku2@outlook.de |
9a3fbbf5f56ecbc4200b39bba863e8bb5e2e19ac | 303c941a88a4f8a4bf2d958457719efb08a5a30f | /fail/bj_1115.py | df3aee67f38c8002932b7ea7d26c40ae9e3c69f2 | [] | no_license | Kim-Taesu/Algorithm_python | 4b8d8445a23463c3885ec75dd2a7c275f46c1f3f | 1d358eb7fe4bd04b6ed0ec3754e8be08eece9278 | refs/heads/master | 2020-06-10T17:36:07.260572 | 2020-03-16T05:53:01 | 2020-03-16T05:53:01 | 193,693,829 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | import sys
from itertools import permutations
sys.setrecursionlimit(10 ** 6)
input = sys.stdin.readline
N = int(input())
P = list(map(int, input().strip().split(' ')))
perfect_p_list = set(permutations([i for i in range(1, N)], N - 1))
min_count = sys.maxsize
def compute_origin_p(p_tmp):
result = [0] * N
result[0] = p_tmp[0]
for t in range(len(p_tmp) - 1):
result[p_tmp[t]] = p_tmp[t + 1]
return result
for perfect_p_tmp in perfect_p_list:
origin_p = compute_origin_p(perfect_p_tmp)
diff_count = 0
for index in range(N):
if origin_p[index] != P[index]:
diff_count += 1
min_count = min(min_count, diff_count)
print(min_count)
| [
"dkdldoafotn@naver.com"
] | dkdldoafotn@naver.com |
a213b5557a2c932b30db4d8c8d0090a87238a01b | 699abe83fa9d142dcff2cc551d4ac052f7778058 | /real_estate/spiders/otodom.py | 5c22c9b3fef5f231270b2c7ab20b391fdb0df6c8 | [] | no_license | whosethere/otodom_crawler_scraper | 055313b9b47c93454d3b795712a2fa5830562bbe | d97a34fa7ad0c7b05f89d30d5d6436cf6b329a34 | refs/heads/master | 2022-06-25T11:19:37.368177 | 2020-05-09T10:52:25 | 2020-05-09T10:52:25 | 262,549,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,284 | py | import scrapy
from scrapy.linkextractors import LinkExtractor
import json
from pymongo import MongoClient
import base64
import io
from real_estate.items import Otodom
from scrapy.loader import ItemLoader
import datetime
import time
import re
class OtodomSpider(scrapy.Spider):
name = 'otodom'
allowed_domains = ['otodom.pl']
start_urls = ['https://www.otodom.pl/wynajem/mieszkanie/?nrAdsPerPage=72']
def parse(self, response):
ogloszenie = response.xpath('//article/@data-url').extract()
for url_ogloszenia in ogloszenie:
yield scrapy.Request(url_ogloszenia,
callback=self.parse_ogloszenie,
meta={'url_ogloszenia':url_ogloszenia})
next_page = response.xpath('//li[@class="pager-next"]/a/@href').extract_first()
if next_page:
yield scrapy.Request(response.urljoin(next_page), callback = self.parse)
def parse_ogloszenie(self, response):
link_ogloszenia = response.meta['url_ogloszenia']
typ_oferty = response.xpath('//div[@class="css-1gjwmw9"]/text()').extract()
tytul_ogloszeia = response.xpath('//div[@class="css-1ld8fwi"]/text()').extract()
lokalizacja = response.xpath('//a[@class="css-12hd9gg"]/text()').extract() # to do przetwarzanie tekstu
cena = response.xpath('//div[@class="css-1vr19r7"]/text()').extract()
# odds_away = match.xpath('normalize-space(./div/div[2]/market-selections/rate-button[2]/button)').extract_first()
czynsz_dodatkowo = ""
liczba_pokoi =""
licza_pieter =""
ogrzewanie =""
kaucja =""
rodzaj_zabudowy =""
material_budynku =""
stan_wykonczenia =""
powierzchnia =""
pietro =""
okna =""
dostepne_od =""
szczegoly_ogloszenia = response.xpath('normalize-space(//div[@class="css-1ci0qpi"]/ul/li)')
for szczegol in response.xpath('(//div[@class="css-1ci0qpi"]/ul/li)'):
szczegol = szczegol.extract()
szczegol = szczegol.replace("<li>", "").replace("<strong>", "").replace("</li>", "").replace("</strong>", "")
co,jakie = szczegol.split(":")
if co == "Czynsz - dodatkowo":
czynsz = jakie
if co == "Kaucja":
kaucja = jakie
if co == "Powierzchnia":
powierzchnia = jakie
if co == "Liczba pokoi":
liczba_pokoi = jakie
if co == "Rodzaj zabudowy":
rodzaj_zabudowy = jakie
if co == "Piętro":
pietro = jakie
if co == "Liczba pięter":
liczba_pieter = jakie
if co == "Materiał budynku":
material_budynku = jakie
if co == "Okna":
okna = jakie
if co == "Ogrzewanie":
ogrzewanie = jakie
if co == "Stan wykończenia":
stan_wykonczenia = jakie
if co == "Dostępne od":
dostepne_od = jakie
numer_oferty = response.xpath('//div[@class="css-kos6vh"]/text()').extract_first()
numer_oferty = numer_oferty.split(":")[1]
kiedy_dodano = response.xpath('//div[@class="css-lh1bxu"]/text()')[0].extract()
kiedy_aktualizowano = response.xpath('//div[@class="css-lh1bxu"]/text()')[1].extract()
# tytul_ogloszeia = response.xpath('//div[@class="css-1ld8fwi"]/text()').extract()
# lokalizacja = response.xpaht('//a[@class="css-12hd9gg"]/text()').extract() # to do przetwarzanie tekstu
# cena = response.xpath('//div[@class="css-1vr19r7"]/text()').extract()
# czynsz_dodatkowo = ""
# liczba_pokoi =""
# licza_pieter =""
# ogrzewanie =""
# kaucja =""
# rodzaj_zabudowy =""
# material_budynku =""
# stan_wykonczenia =""
# powierzchnia =""
# pietro =""
# okna =""
# dostepne_od =""
loader = ItemLoader(item=Otodom(), response=response)
loader.add_value('numer_oferty', numer_oferty)
loader.add_value('kiedy_dodano', kiedy_dodano)
loader.add_value('kiedy_aktualizowano', kiedy_aktualizowano)
loader.add_value('typ_oferty', typ_oferty)
loader.add_value('okna', okna)
loader.add_value('tytul_ogloszeia', tytul_ogloszeia)
loader.add_value('lokalizacja', lokalizacja)
loader.add_value('cena', cena)
loader.add_value('czynsz_dodatkowo', czynsz_dodatkowo)
loader.add_value('liczba_pokoi', liczba_pokoi)
loader.add_value('licza_pieter', licza_pieter)
loader.add_value('pietro', pietro)
loader.add_value('ogrzewanie', ogrzewanie)
loader.add_value('kaucja', kaucja)
loader.add_value('rodzaj_zabudowy', rodzaj_zabudowy)
loader.add_value('material_budynku', material_budynku)
loader.add_value('stan_wykonczenia', stan_wykonczenia)
loader.add_value('powierzchnia', powierzchnia)
loader.add_value('dostepne_od', dostepne_od)
loader.add_value('link_ogloszenia', link_ogloszenia)
yield loader.load_item() | [
"you@example.com"
] | you@example.com |
6440c4965fb42a080990cb5cad4c6b0f20166912 | 518bf342bc4138982af3e2724e75f1d9ca3ba56c | /solutions/1441. Build an Array With Stack Operations/1441.py | c57039a1e6a850ebf9c94019ab817c14266d8309 | [
"MIT"
] | permissive | walkccc/LeetCode | dae85af7cc689882a84ee5011f0a13a19ad97f18 | a27be41c174565d365cbfe785f0633f634a01b2a | refs/heads/main | 2023-08-28T01:32:43.384999 | 2023-08-20T19:00:45 | 2023-08-20T19:00:45 | 172,231,974 | 692 | 302 | MIT | 2023-08-13T14:48:42 | 2019-02-23T15:46:23 | C++ | UTF-8 | Python | false | false | 350 | py | class Solution:
def buildArray(self, target: List[int], n: int) -> List[str]:
ans = []
i = 0 # Target pointer
num = 1 # Curr num
while i < len(target):
t = target[i]
if t == num:
ans.append("Push")
i += 1
else:
ans.append("Push")
ans.append("Pop")
num += 1
return ans
| [
"me@pengyuc.com"
] | me@pengyuc.com |
1c530fe372777c5ddae9fd3aae02fe09b03a7d9e | 0db05f7b843e8450bafd5ae23f8f70f9a9a8c151 | /Src/StdLib/Lib/site-packages/win32comext/shell/test/testShellFolder.py | 82d126408aa600a298de5cbeb8dbf491a19dd598 | [
"BSD-3-Clause",
"Python-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | IronLanguages/ironpython2 | 9c7f85bd8e6bca300e16f8c92f6384cecb979a6a | d00111890ce41b9791cb5bc55aedd071240252c4 | refs/heads/master | 2023-01-21T21:17:59.439654 | 2023-01-13T01:52:15 | 2023-01-13T01:52:15 | 91,620,472 | 1,171 | 288 | Apache-2.0 | 2023-01-13T01:52:16 | 2017-05-17T21:11:51 | Python | UTF-8 | Python | false | false | 581 | py | from win32com.shell import shell
from win32com.shell.shellcon import *
sf = shell.SHGetDesktopFolder()
print "Shell Folder is", sf
names = []
for i in sf: # Magically calls EnumObjects
name = sf.GetDisplayNameOf(i, SHGDN_NORMAL)
names.append(name)
# And get the enumerator manually
enum = sf.EnumObjects(0, SHCONTF_FOLDERS | SHCONTF_NONFOLDERS | SHCONTF_INCLUDEHIDDEN)
num = 0
for i in enum:
num += 1
if num != len(names):
print "Should have got the same number of names!?"
print "Found", len(names), "items on the desktop"
for name in names:
print name
| [
"pawel.jasinski@gmail.com"
] | pawel.jasinski@gmail.com |
8b86f8f4e9e3c5acf3431aa0b9eedf6fc4b41865 | 4e708d886792efad8e9398ace014e325b00d907d | /owo/client/enclosure/__main__.py | ba3958e87436afa6734eae36934a98e989c885a2 | [] | no_license | korkies22/OwO-core | 981ed8afb9f75432d5a4060f63c9fb591767d855 | 56f234db1f8fd18c78fd9dc4a390d37086ab671f | refs/heads/master | 2020-04-11T09:38:27.885985 | 2018-10-01T16:17:09 | 2018-10-01T16:17:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | # Copyright 2017 OwO AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from owo.client.enclosure import Enclosure
def main():
enclosure = Enclosure()
try:
enclosure.run()
except Exception as e:
print(e)
finally:
sys.exit()
if __name__ == "__main__":
main()
| [
"j.alban@uniandes.edu.co"
] | j.alban@uniandes.edu.co |
58c1d50482bd4ceff2380654a146d6c7476adab9 | 408f985c954ad6f0f3926e02dc18c31336bac194 | /precise_bbcode/core/utils.py | 8388589936970347e3e3e1dbc97d1e1a7f325542 | [
"BSD-3-Clause"
] | permissive | ellmetha/django-precise-bbcode | 176258d9436cc29002d59f29d4964a3bdd05721e | 24306622fc8ebd91c8c79543c18050de0b32f1f1 | refs/heads/main | 2023-08-28T13:35:07.027756 | 2023-07-27T01:07:15 | 2023-07-27T01:07:15 | 13,904,807 | 36 | 16 | BSD-3-Clause | 2023-08-19T18:44:47 | 2013-10-27T16:45:03 | Python | UTF-8 | Python | false | false | 322 | py | from functools import reduce
def replace(data, replacements):
"""
Performs several string substitutions on the initial ``data`` string using
a list of 2-tuples (old, new) defining substitutions and returns the resulting
string.
"""
return reduce(lambda a, kv: a.replace(*kv), replacements, data)
| [
"morgan.aubert@zoho.com"
] | morgan.aubert@zoho.com |
02f82c3bdce5f373fb441ff9f282e97254250537 | d138deda43e36f6c79c5e3a9ef1cc62c6a92e881 | /python/paddle/fluid/tests/unittests/test_dist_mnist_ring_allreduce.py | 4436064dc28ed1276481378c70aa3b306486e0c8 | [
"Apache-2.0"
] | permissive | seiriosPlus/Paddle | 51afd6f5c85c3ce41dd72953ee659d1539c19f90 | 9602a182b2a4979247c09df1ec283fc39cb4a981 | refs/heads/develop | 2021-08-16T16:05:10.848535 | 2020-12-27T15:15:19 | 2020-12-27T15:15:19 | 123,257,829 | 2 | 0 | Apache-2.0 | 2019-12-10T08:22:01 | 2018-02-28T08:57:42 | C++ | UTF-8 | Python | false | false | 1,177 | py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from test_dist_base import TestDistBase
import paddle
paddle.enable_static()
class TestDistMnistNCCL2(TestDistBase):
def _setup_config(self):
self._sync_mode = True
self._use_reduce = False
self._use_reader_alloc = False
self._nccl2_mode = True
def test_dist_train(self):
import paddle.fluid as fluid
if fluid.core.is_compiled_with_cuda():
self.check_with_place("dist_mnist.py", delta=1e-5)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | seiriosPlus.noreply@github.com |
fd3cf4d40487d172afc5c8e1119786c68a9a7f34 | 630804da2c327c6723a7a808c23381348b1c79be | /Week1/Day1/FirstAssignment.py | 0920e18f448ce3cb1dc2c941f453446b81d409bf | [] | no_license | AjayKarki/DWIT_Training | 9f1bc49ff19acbe96d121e115acd0d1ba9c05ff4 | e8ce49e2c73e29130a352bd9f5fcab4fe86ebf13 | refs/heads/master | 2020-09-23T13:01:31.056432 | 2019-12-25T15:09:39 | 2019-12-25T15:09:39 | 225,506,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py |
first_number = int(input("Enter a number "))
second_number = int(input("Enter another number "))
"""
This is an example of
Multiline
Comment
"""
print("Sum is ", first_number+second_number)
print("Diff is ", first_number-second_number)
print("Prod is ", first_number*second_number)
print("Div is ", first_number/second_number)
print("Mod is ", first_number % second_number) | [
"ajaykarki333@gmail.com"
] | ajaykarki333@gmail.com |
fb66ec4f4fe8f900b439e9ce33725c9fb5f5e3fb | ef4a12140c89ddd01c335000e15479dff85c48c6 | /kleinworth/spiders/spider.py | e969c9d462814f10ac9746d7f20af6c590fd4209 | [] | no_license | SimeonYS/kleinworth | 8f06e717f9f7c1a3377033ddf190b64bc7784f8e | c4575f0daeacf4771359e4b213c6cbf850fe750d | refs/heads/main | 2023-03-19T08:17:54.626925 | 2021-03-15T10:10:36 | 2021-03-15T10:10:36 | 347,920,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | import re
import scrapy
from scrapy.loader import ItemLoader
from ..items import KleinworthItem
from itemloaders.processors import TakeFirst
pattern = r'(\xa0)?'
class KleinworthSpider(scrapy.Spider):
name = 'kleinworth'
start_urls = ['https://www.kleinworthambros.com/en/tags/tag/press-releases/',
'https://www.kleinworthambros.com/en/tags/tag/news/'
]
def parse(self, response):
post_links = response.xpath('//div[contains(@id,"card2")]/@data-href | //div[@class="taxoWrap"]/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
def parse_post(self, response):
date = response.xpath('//div[@class="sgnews_single_date"]/text()').get()
title = response.xpath('(//h1//text())[last()]').get()
content = response.xpath('//div[@class="intro"]//text()').getall() + response.xpath('//div[@class="sgnews_single_content"]//text()').getall()
content = [p.strip() for p in content if p.strip()]
content = re.sub(pattern, "",' '.join(content))
item = ItemLoader(item=KleinworthItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('link', response.url)
item.add_value('content', content)
item.add_value('date', date)
yield item.load_item()
| [
"simeon.simeonov@ADPVT.com"
] | simeon.simeonov@ADPVT.com |
cef1aa623ceaeb8e1400245c7f160633065eb08c | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/pygame/pygame-physics/pygame_projectile_angle.py | b4956e8676459708bfbca383c3da3aa096cbd52d | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:018a844bc0bfcb3938201de01d763672c6bdce35b5b911524f4e2222f2b9b540
size 830
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
08910e614d2cf4644137974150405fa8943e7a99 | 3fe9d17c2151cb3853986a79e2ce2010fbc27b4d | /utils.py | 2a2a787daa66ee87fc9f20df7944bd59860bec87 | [] | no_license | coolmaksat/graembeds | 82c2d6e1fecb51e33aa0ace349a3d434b608534f | 35ecddb9d8ef1d9fb648186a6195fd0703d9b642 | refs/heads/master | 2020-03-27T20:18:08.315222 | 2018-09-06T13:44:14 | 2018-09-06T13:44:14 | 147,056,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,945 | py | import numpy as np
import math
class Dataset(object):
"""Dataset class for handling data operations.
It requires specific format for graph data files.
Graph should be stored in `data` folder under
its the folder with its name and should contain
three files `train.txt`, `valid.txt`, `test.txt`.
All three files should have the same format where
each line represents a triple (head, relation, tail)
"""
def __init__(self, name):
self.name = name
self.train_triples = []
self.valid_triples = []
self.test_triples = []
self.node_mappings = {}
self.relation_mappings = {}
for triple in self.load('train'):
self.train_triples.append(triple)
for triple in self.load('valid'):
self.valid_triples.append(triple)
for triple in self.load('test'):
self.test_triples.append(triple)
self.train_triple_set = set(self.train_triples)
self.triple_set = set(
self.train_triples + self.valid_triples + self.test_triples)
self.train_triples = np.array(self.train_triples)
self.test_triples = np.array(self.test_triples)
self.valid_triples = np.array(self.valid_triples)
self.train_size = len(self.train_triples)
self.valid_size = len(self.valid_triples)
self.test_size = len(self.test_triples)
self.nb_nodes = len(self.node_mappings)
self.nb_relations = len(self.relation_mappings)
def load(self, part):
"""Reads dataset files (train, valid, test) and
generates triples with integer ids.
If entity or relation is not in the mapping variables
a new id is created and entity or relation is put
to the corresponding mapping variable.
Arguments:
part: The name of dataset part (train, valid, test)
"""
with open('data/' + self.name + '/' + part + '.txt') as f:
for line in f:
it = line.strip().split()
if it[0] not in self.node_mappings:
self.node_mappings[it[0]] = len(self.node_mappings)
if it[2] not in self.node_mappings:
self.node_mappings[it[2]] = len(self.node_mappings)
if it[1] not in self.relation_mappings:
self.relation_mappings[it[1]] = len(self.relation_mappings)
yield (
self.node_mappings[it[0]],
self.relation_mappings[it[1]],
self.node_mappings[it[2]])
def batch_generator(self, part, batch_size=256):
triples = getattr(self, part + '_triples')
n = len(triples)
index = np.arange(n)
np.random.shuffle(index)
return Generator(
triples[index], self.nb_nodes, self.train_triple_set,
batch_size=batch_size)
| [
"coolmaksat@gmail.com"
] | coolmaksat@gmail.com |
a6e3d61630fcd661507496cd82b8e633ac857ffb | 539d003125eebf761ba320223566cd56eeefe247 | /mundiapi/models/get_split_response.py | 4d7b029a9500b863546dae4bbb9a066b8284df12 | [
"MIT"
] | permissive | mundipagg/MundiApi-NodeJS | 6e58afb33510a723574ee06bec107654409910af | f0c67e1f92471a7a0e2d0b0cb1765105f07fb8cb | refs/heads/master | 2023-06-25T23:04:42.429866 | 2023-06-19T16:10:31 | 2023-06-19T16:10:31 | 101,078,084 | 9 | 5 | NOASSERTION | 2023-06-01T17:50:21 | 2017-08-22T15:25:30 | JavaScript | UTF-8 | Python | false | false | 2,179 | py | # -*- coding: utf-8 -*-
"""
mundiapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
import mundiapi.models.get_recipient_response
class GetSplitResponse(object):
"""Implementation of the 'GetSplitResponse' model.
Split response
Attributes:
mtype (string): Type
amount (int): Amount
recipient (GetRecipientResponse): Recipient
gateway_id (string): The split rule gateway id
"""
# Create a mapping from Model property names to API property names
_names = {
"mtype":'type',
"amount":'amount',
"gateway_id":'gateway_id',
"recipient":'recipient'
}
def __init__(self,
mtype=None,
amount=None,
gateway_id=None,
recipient=None):
"""Constructor for the GetSplitResponse class"""
# Initialize members of the class
self.mtype = mtype
self.amount = amount
self.recipient = recipient
self.gateway_id = gateway_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
mtype = dictionary.get('type')
amount = dictionary.get('amount')
gateway_id = dictionary.get('gateway_id')
recipient = mundiapi.models.get_recipient_response.GetRecipientResponse.from_dictionary(dictionary.get('recipient')) if dictionary.get('recipient') else None
# Return an object of this model
return cls(mtype,
amount,
gateway_id,
recipient)
| [
"noreply@github.com"
] | mundipagg.noreply@github.com |
9d96b2dcf2e0ad95dd30fbddadd9690e92e19b73 | 9dc1c85e7d86d29400af79125e9cd89a82a9b8ab | /myproject/portfolio/migrations/0008_auto_20210411_1227.py | 1ff519e0632e898c671a29b31ad605be406f6c6f | [
"MIT"
] | permissive | borko81/simple_django | e284ff8f79b3e708b4903ba0b774e3a480de9190 | 9dbd2d848cbf0ff0c58e93471853c5b21c769758 | refs/heads/master | 2023-07-14T01:25:13.294095 | 2021-08-16T15:48:00 | 2021-08-16T15:48:00 | 349,369,208 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # Generated by Django 3.1.7 on 2021-04-11 09:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0007_auto_20210402_2045'),
]
operations = [
migrations.AlterField(
model_name='project',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='img/'),
),
]
| [
"bstoilov81@gmail.com"
] | bstoilov81@gmail.com |
47f64b5e8973a03abf00d861f93525f47544fea7 | 84166c246e819c19acc1dcd77a405b85ca554c9b | /app/models.py | 192cbcd040ed2ace4a75d75d5408515272105f1b | [
"MIT"
] | permissive | kenmutuma001/Blog | 992a98a605f7247bade6577b8e02a277b30db294 | 6b19a77b71694bbe9f5e84207de46c68f87ebc5e | refs/heads/master | 2020-05-18T13:32:34.689064 | 2019-05-01T16:55:32 | 2019-05-01T16:55:32 | 184,439,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,690 | py | from . import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255))
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
email = db.Column(db.String(255), unique=True, index=True)
password_hash = db.Column(db.String(255))
pass_secure = db.Column(db.String(255))
blogs = db.relationship('Blog', backref='blogs', lazy="dynamic")
# comments = db.relationship('Comment', backref='comments', lazy="dynamic")
@property
def password(self):
raise AttributeError('Cant read password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.pass_secure, password)
def __repr__(self):
return f'User {self.username}'
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
user_id = db.relationship('User', backref='users', lazy="dynamic")
def __repr__(self):
return f'User {self.name}'
class Blog(UserMixin, db.Model):
__tablename__ = 'blogs'
id = db.Column(db.Integer, primary_key=True)
post = db.Column(db.String(255))
body = db.Column(db.String(1000))
category = db.Column(db.String(1000))
date_posted = db.Column(db.DateTime, nullable=False,
default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
comments = db.relationship('Comment', backref='comments', lazy="dynamic")
def save_blog(self):
db.session.add(self)
db.session.commit()
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255))
comment = db.Column(db.String(1000))
date_posted = db.Column(db.DateTime, nullable=False,
default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey("blogs.id"))
pitch = db.Column(db.Integer, db.ForeignKey("users.id"))
def save_comment(self):
db.session.add(self)
db.session.commit()
class Popular:
'''
News class to define Objects
'''
def __init__(self, author, quote):
self.author = author
self.quote = quote
| [
"santa@northpole.com"
] | santa@northpole.com |
2fdc4462eba62b8677c1fc96b35656c7681cd085 | 14f085fe9db8179dd44c18f00c1184881dcfe21a | /testing/mos_2d.py | c174c00a97eef685b5fb02f156d04f6eef906d24 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"LGPL-2.1-or-later",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"BSL-1.0",
"BSD-2-Clause",
"MPL-2.0"
] | permissive | devsim/devsim | 7ba495952239d4e9c0170c0a5a89905aa9eb3e1e | 3d979d6a98685b2e51c15eebd20afdc1e643fc3a | refs/heads/main | 2023-08-31T10:40:41.346966 | 2023-08-30T16:42:56 | 2023-08-30T16:42:56 | 8,838,727 | 158 | 69 | Apache-2.0 | 2023-07-15T03:21:34 | 2013-03-17T18:01:17 | C++ | UTF-8 | Python | false | false | 2,939 | py | # Copyright 2013 DEVSIM LLC
#
# SPDX-License-Identifier: Apache-2.0
from devsim.python_packages.simple_physics import *
from devsim.python_packages.ramp import *
from devsim import *
import mos_2d_create
device = "mymos"
silicon_regions=("gate", "bulk")
oxide_regions=("oxide",)
regions = ("gate", "bulk", "oxide")
interfaces = ("bulk_oxide", "gate_oxide")
for i in regions:
CreateSolution(device, i, "Potential")
for i in silicon_regions:
SetSiliconParameters(device, i, 300)
CreateSiliconPotentialOnly(device, i)
for i in oxide_regions:
SetOxideParameters(device, i, 300)
CreateOxidePotentialOnly(device, i, "log_damp")
### Set up contacts
contacts = get_contact_list(device=device)
for i in contacts:
tmp = get_region_list(device=device, contact=i)
r = tmp[0]
print("%s %s" % (r, i))
CreateSiliconPotentialOnlyContact(device, r, i)
set_parameter(device=device, name=GetContactBiasName(i), value=0.0)
for i in interfaces:
CreateSiliconOxideInterface(device, i)
solve(type="dc", absolute_error=1.0e-13, relative_error=1e-12, maximum_iterations=30)
solve(type="dc", absolute_error=1.0e-13, relative_error=1e-12, maximum_iterations=30)
#
##write_devices -file gmsh_mos2d_potentialonly.flps -type floops
write_devices(file="gmsh_mos2d_potentialonly", type="vtk")
for i in silicon_regions:
CreateSolution(device, i, "Electrons")
CreateSolution(device, i, "Holes")
set_node_values(device=device, region=i, name="Electrons", init_from="IntrinsicElectrons")
set_node_values(device=device, region=i, name="Holes", init_from="IntrinsicHoles")
CreateSiliconDriftDiffusion(device, i, "mu_n", "mu_p")
for c in contacts:
tmp = get_region_list(device=device, contact=c)
r = tmp[0]
CreateSiliconDriftDiffusionAtContact(device, r, c)
solve(type="dc", absolute_error=1.0e30, relative_error=1e-5, maximum_iterations=30)
for r in silicon_regions:
node_model(device=device, region=r, name="logElectrons", equation="log(Electrons)/log(10)")
write_devices(file="mos_2d_dd.msh", type="devsim")
with open("mos_2d_params.py", "w", encoding="utf-8") as ofh:
ofh.write('import devsim\n')
for p in get_parameter_list():
if p in ('solver_callback', 'direct_solver', 'info'):
continue
v=repr(get_parameter(name=p))
ofh.write('devsim.set_parameter(name="%s", value=%s)\n' % (p, v))
for i in get_device_list():
for p in get_parameter_list(device=i):
v=repr(get_parameter(device=i, name=p))
ofh.write('devsim.set_parameter(device="%s", name="%s", value=%s)\n' % (i, p, v))
for i in get_device_list():
for j in get_region_list(device=i):
for p in get_parameter_list(device=i, region=j):
v=repr(get_parameter(device=i, region=j, name=p))
ofh.write('devsim.set_parameter(device="%s", region="%s", name="%s", value=%s)\n' % (i, j, p, v))
| [
"juan@tcad.com"
] | juan@tcad.com |
a5649bccb1465ea6b0584343b7c494035ac67e5a | 9b60beb5ef167dc57a700ddaa0481a173e9c24d8 | /my315ok/products/browser/multipgkuptabs.py | cc1156178510f15ad10b490a53baa4de5b455653 | [] | no_license | adam139/my315ok.products | 8e9460f649e6c57575b3a7838e92fa40f147a99d | d335ed679dd68cddc6aee006f873a038b5258508 | refs/heads/master | 2020-12-25T16:48:12.378815 | 2018-04-08T00:35:20 | 2018-04-08T00:35:20 | 23,514,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py |
from five import grok
from Acquisition import aq_inner
from Products.CMFCore.utils import getToolByName
from plone.dexterity.interfaces import IDexterityContent
class multipgview(grok.View):
grok.context(IDexterityContent)
grok.require('zope2.View')
grok.name('multipgview') | [
"yuejun.tang@gmail.com"
] | yuejun.tang@gmail.com |
daec25b7662ba33c27388b1ecb296a9bbbed7d46 | 6437a3a4a31ab9ad233d6b2d985beb50ed50de23 | /PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/markdown/extensions/sane_lists.py | fc36c0b8b890e3faacae1b8b8565f9b7039063fb | [] | no_license | sreyemnayr/jss-lost-mode-app | 03ddc472decde3c17a11294d8ee48b02f83b71e7 | 3ff4ba6fb13f4f3a4a98bfc824eace137f6aabaa | refs/heads/master | 2021-05-02T08:50:10.580091 | 2018-02-08T20:32:29 | 2018-02-08T20:32:29 | 120,813,623 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py |
"""
Sane List Extension for Python-Markdown
=======================================
Modify the behavior of Lists in Python-Markdown t act in a sane manor.
In standard Markdown sytex, the following would constitute a single
ordered list. However, with this extension, the output would include
two lists, the first an ordered list and the second and unordered list.
1. ordered
2. list
* unordered
* list
Copyright 2011 - [Waylan Limberg](http://achinghead.com)
"""
import re
import markdown
class SaneOListProcessor(markdown.blockprocessors.OListProcessor):
CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.))[ ]+(.*)')
SIBLING_TAGS = ['ol']
class SaneUListProcessor(markdown.blockprocessors.UListProcessor):
CHILD_RE = re.compile(r'^[ ]{0,3}(([*+-]))[ ]+(.*)')
SIBLING_TAGS = ['ul']
class SaneListExtension(markdown.Extension):
""" Add sane lists to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Override existing Processors. """
md.parser.blockprocessors['olist'] = SaneOListProcessor(md.parser)
md.parser.blockprocessors['ulist'] = SaneUListProcessor(md.parser)
def makeExtension(configs={}):
return SaneListExtension(configs=configs)
| [
"ryanmeyersweb@gmail.com"
] | ryanmeyersweb@gmail.com |
252bbfbe0e32a1b8e8dc584e470f0cfd6aec5562 | 431a1f738b1edfba7dad8d10a6b7520d51d917cb | /Samples/UserSamples/2017/jetFakes_Splits/Fakes5_Config.py | 8ef14d82ba983ee7a83490e4228eb327a6245ff4 | [] | no_license | aloeliger/DatacardCreator | 5ce702e46fbb77e843b44d8fe088c2645a4a8f66 | 5c7e890276a5be079ed3b677a471c1dcadcba52d | refs/heads/master | 2022-02-26T19:52:30.563747 | 2022-02-16T20:24:48 | 2022-02-16T20:24:48 | 215,602,523 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | from Samples.SampleDefinition import Sample
from Samples.Uncertainties.UserUncertainties.FakeFactorUncertainty import FakeFactorUncertainty
from Samples.EventDefinition.UserEventDictionaries.MuTauEventDictionary import MuTauEventDictionary
FakeSample = Sample()
FakeSample.name = 'jetFakes'
FakeSample.path = '/data/aloeliger/SMHTT_Selected_2017_Deep/'
FakeSample.files = ['Fake.root']
FakeSample.definition = ''
FakeSample.uncertainties = [
FakeFactorUncertainty()
]
FakeSample.eventDictionaryInstance = MuTauEventDictionary
FakeSample.CreateEventWeight = FakeSample.CreateEventWeight_Fake
FakeSample.startEntry = 1300000
FakeSample.endEntry = 1620000
| [
"aloelige@cern.ch"
] | aloelige@cern.ch |
17095e42f3f6435a3d69c24c293d79facc2991ee | 5679731cee36c537615d285ed72810f4c6b17380 | /049_GroupAnagrams.py | 6698f9742a7e6f69b93e644c08ed09f4dbb99aab | [] | no_license | manofmountain/LeetCode | 6b76105190a9b62df65a7b56b6def4120498b9fa | 718f688b3d316e8c10ef680d9c21ecd518d062f8 | refs/heads/master | 2021-01-12T03:41:48.318116 | 2017-07-18T12:35:58 | 2017-07-18T12:35:58 | 78,252,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
res = list()
indexTable = {}
index = 0
for str in strs:
keyStr = ''.join(sorted(str))
if keyStr not in indexTable:
res.append([str])
indexTable[keyStr] = index
index += 1
else:
res[indexTable[keyStr]].append(str)
return res
| [
"noreply@github.com"
] | manofmountain.noreply@github.com |
36a555ab5b94a8f951fdc5e96a744c84a3851024 | a8d9ed754db055f9ce6d573ddb5d90d2d91c5beb | /canal/data.py | 4873e6077ffe7ff8922ee71b5a07d6092505a494 | [] | no_license | linkcheng/spark_demo | 6c84ccf33c82045af0ab6c0547d3216a6d43d1df | db212ed138ac19e42d9c8bc9fa5d7e4d0facee6f | refs/heads/master | 2021-09-09T04:58:59.917278 | 2019-12-26T07:49:01 | 2019-12-26T07:49:01 | 145,976,836 | 0 | 0 | null | 2021-09-08T00:56:35 | 2018-08-24T10:15:15 | Jupyter Notebook | UTF-8 | Python | false | false | 3,440 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
@author: Link
@contact: zheng.long@sfy.com
@module: data
@date: 2019-01-02
bin/kafka-console-consumer.sh --bootstrap-server 192.168.30.141:6667,192.168.30.140:6667,192.168.30.139:6667 --topic example
"""
null = None
false = False
true = True
data = {
"data": [
{
"id": "1111111349",
"name": "张三",
"password_digest": "957f8fcecdc4bc8199d8c52c6d998719",
"mobile": "13512341234",
"email": "",
"created_time": "2015-08-27 23:54:08",
"created_ip": "11.11.11.11",
"last_login_time": "2018-11-08 22:59:19",
"last_login_ip": "22.22.22.22",
"old_id": "3056",
"weixin_open_id": null,
"gender": "3",
"person_id": "53633",
"utm_source": "",
"order_cnt": "3",
"bill_status": "2",
"console_remark": "",
"updated_time": "2018-12-26 14:25:27",
"app_source": null,
"is_get_authorize": "0",
"user_address_list": null,
"bankNumber": null,
"biz_event_status": null,
"biz_event_time": null,
"biz_event_data": null,
"invitation_code": null,
"used_invitation_code": null
}
],
"database": "test",
"es": 1545805527000,
"id": 19,
"isDdl": false,
"mysqlType": {
"id": "bigint(20) unsigned",
"name": "varchar(63)",
"password_digest": "varchar(63)",
"mobile": "varchar(63)",
"email": "varchar(127)",
"created_time": "datetime",
"created_ip": "varchar(63)",
"last_login_time": "datetime",
"last_login_ip": "varchar(63)",
"old_id": "bigint(20) unsigned",
"weixin_open_id": "varchar(63)",
"gender": "enum('male','female','other')",
"person_id": "bigint(20) unsigned",
"utm_source": "varchar(127)",
"order_cnt": "smallint(5) unsigned",
"bill_status": "enum('none','has_bill','has_overdue','overdue')",
"console_remark": "varchar(512)",
"updated_time": "timestamp",
"app_source": "varchar(63)",
"is_get_authorize": "tinyint(1)",
"user_address_list": "varchar(500)",
"bankNumber": "varchar(36)",
"biz_event_status": "varchar(32)",
"biz_event_time": "datetime",
"biz_event_data": "varchar(500)",
"invitation_code": "varchar(16)",
"used_invitation_code": "varchar(16)"
},
"old": null,
"sql": "",
"sqlType": {
"id": -5,
"name": 12,
"password_digest": 12,
"mobile": 12,
"email": 12,
"created_time": 93,
"created_ip": 12,
"last_login_time": 93,
"last_login_ip": 12,
"old_id": -5,
"weixin_open_id": 12,
"gender": 4,
"person_id": -5,
"utm_source": 12,
"order_cnt": 5,
"bill_status": 4,
"console_remark": 12,
"updated_time": 93,
"app_source": 12,
"is_get_authorize": -7,
"user_address_list": 12,
"bankNumber": 12,
"biz_event_status": 12,
"biz_event_time": 93,
"biz_event_data": 12,
"invitation_code": 12,
"used_invitation_code": 12
},
"table": "User",
"ts": 1545805527438,
"type": "INSERT"
}
| [
"zheng.long@shoufuyou.com"
] | zheng.long@shoufuyou.com |
cdc5750d92187c21877277e4d8b50b0c76485614 | 9e1bda53da4c5e98190f5f25235f528d692ee5a8 | /.history/my_app/views_20210405182534.py | 224bf0f237e70a0e4e86dd9a6c523ac1dcd617e5 | [] | no_license | Jumayev-A/Project-3 | 3d373181af6a87e3fe319a13d28fcd18941167b7 | 34ddd009726cbba9ae52e74a46d554fd735566e2 | refs/heads/main | 2023-06-10T11:02:06.446151 | 2021-07-07T06:19:11 | 2021-07-07T06:19:11 | 350,375,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,933 | py | from django.shortcuts import render, redirect
from django.urls import reverse_lazy, reverse
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db.models import Q
from django.views.generic.edit import DeleteView
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from my_app.models import CategoryModel, BlogModel
from my_app.forms import BlogForm
# Create your views here.
def home(request):
categories = CategoryModel.objects.all()
context = {
'categories':categories,
}
if request.method == 'POST':
q = request.POST.get('q')
if q:
queryset = BlogModel.objects.filter(Q(title__icontains=q) )
context['q']=queryset
print(context)
if len(queryset) == 0:
messages.success(request, 'tapylmady')
return render(request, 'home.html',context)
@login_required(login_url='/account/login/')
def create_category(request):
user = request.user
if request.method == 'POST':
name = request.POST.get('name')
title = request.POST.get('title')
file = request.FILES.get('file')
CategoryModel.objects.create(user=user, name=name, title=title, file=file).save()
print(user)
return redirect('my_app:home')
return render(request, 'create_category.html',{})
class CategoryDeleteView(DeleteView):
model = CategoryModel
success_url = reverse_lazy('my_app:home')
@login_required(login_url='/account/login/')
def update_category(request, pk):
model = CategoryModel.objects.get(id=pk)
if request.method == 'POST':
name = request.POST.get('name')
title = request.POST.get('title')
file = request.FILES.get('file')
model = CategoryModel.objects.get(id=pk)
model.name = name
model.title = title
model.file = file
model.save()
return redirect('my_app:home')
return render(request, 'update_category.html',{'model':model})
def view_blog(request, pk):
page = request.GET.get('page', 1)
blog = BlogModel.objects.filter(category_id=pk)
paginator = Paginator(blog, 3)
try:
blogs = paginator.page(page)
except EmptyPage:
blogs = paginator.page(paginator.num_pages)
except PageNotAnInteger:
blogs = paginator.page(1)
count_pag = paginator.page_range
return render(request, 'blog_list.html',{'count_pag':count_pag,'blogs':blogs,'pk':pk})
@login_required(login_url='/account/login/')
def create_blog(request, pk):
user = request.user
form = BlogForm()
if request.method == 'POST':
form = BlogForm(request.POST, request.FILES)
if form.is_valid():
form = form.save(commit=False)
form.category_id=pk
form.save()
return redirect('my_app:blog_list',pk)
return render(request, 'create_blog.html',{"pk":pk, 'form':form})
class BlogDeleteView(DeleteView):
model = BlogModel
print(model)
print(object)
def get_success_url(self):
return reverse('my_app:blog_list',args = {self.object.category_id})
@login_required(login_url='/account/login/')
def update_blog(request, pk):
model = BlogModel.objects.get(id=pk)
form = BlogForm(model)
# if request.method == 'POST':
# title = request.POST.get('title')
# description = request.POST.get('description')
# file = request.FILES.get('file')
# model = BlogModel.objects.get(id=pk)
# model.title = title
# model.description = description
# model.file = file
# model.save()
# return redirect('my_app:blog_list', model.category_id)
return render(request, 'update_blog.html',{'form':form})
def blog_detail(request, id):
blog = BlogModel.objects.get(id=id)
return render(request, 'blog_detail.html',{'blog':blog,'model.category_id'})
| [
"abdy.jumayev@gmail.com"
] | abdy.jumayev@gmail.com |
21fda4d68e16fb134142cf28caea0fd629dfbfd4 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_237/ch87_2019_06_06_23_04_02_860281.py | 7d6be3cdd3cdd575fd5232e368daa05fb936291d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | with open ("churras.txt", "r") as arquivo:
conteudo = arquivo.readlines()
quantidade = conteudo[1::3]
preco = conteudo[2::3]
soma = 0
len_listas = len(quantidade)
for e in range(1,len_listas):
soma += int(quantidade[e-1]) * float(preco[e-1][:5])
print(soma)
| [
"you@example.com"
] | you@example.com |
1368e7d59993e242bd8df5f4001e611ee15a8ea1 | 1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5 | /others/language/python/flask/projects/flask-api-swagger-doc/app.py | cde3f48bff736128aa64aada4524a75af42aa4b0 | [
"MIT"
] | permissive | sagarnikam123/learnNPractice | f0da3f8acf653e56c591353ab342765a6831698c | 1b3b0cb2cff2f478006626a4c37a99102acbb628 | refs/heads/master | 2023-02-04T11:21:18.211654 | 2023-01-24T14:47:52 | 2023-01-24T14:47:52 | 61,184,927 | 2 | 1 | MIT | 2022-03-06T11:07:18 | 2016-06-15T06:57:19 | Python | UTF-8 | Python | false | false | 1,853 | py | from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from apispec_webframeworks.flask import FlaskPlugin
from flask import Flask, jsonify, render_template, send_from_directory
from marshmallow import Schema, fields
app = Flask(__name__, template_folder='swagger/templates')
@app.route('/')
def hello_world():
return 'Hello World!'
spec = APISpec(
title='flask-api-swagger-doc',
version='1.0.0',
openapi_version='3.0.2',
plugins=[FlaskPlugin(), MarshmallowPlugin()]
)
@app.route('/api/swagger.json')
def create_swagger_spec():
return jsonify(spec.to_dict())
class ToDoResponseSchema(Schema):
id = fields.Int()
title = fields.Str()
status = fields.Boolean()
class TodoListResponseSchema(Schema):
todo_list = fields.List(fields.Nested(ToDoResponseSchema))
@app.route('/todo')
def todo():
"""Get List of Todo
---
get:
description: Get List of Todos
responses:
200:
description: Return a todo list
content:
application/json:
schema: TodoListResponseSchema
"""
dummy_data = [{
'id': 1,
'title': 'Finish this task',
'status': False
},
{
'id': 2,
'title': 'Finish that task',
'status': True
}
]
return TodoListResponseSchema().dump({'todo_list': dummy_data})
with app.test_request_context():
spec.path(view=todo)
@app.route('/docs')
@app.route('/docs/<path:path>')
def swagger_docs(path=None):
if not path or path == 'index.html':
return render_template('index.html', base_url='/docs')
else:
return send_from_directory('./swagger/static', path)
if __name__ == '__main__':
app.run(debug=True)
| [
"sagarnikam123@gmail.com"
] | sagarnikam123@gmail.com |
1e34501c813604e30eaf8cac50b7813ecb41e56c | 755c0476da2bde006303b355371270132541c63c | /data_structures/tree/preorder.py | f9422a08eba0279ca625225241e27470c0af6ef3 | [] | no_license | diegoami/hackerrank-exercises | d411164eb32f4a5ac36df1ca81fa40cc51ae6fab | 4daaa81273ec27278e530ab882445c040041cbd7 | refs/heads/master | 2021-03-24T13:04:26.105170 | 2017-08-27T20:07:25 | 2017-08-27T20:07:25 | 90,566,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | """
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.data (the value of the node)
"""
def scan(node):
return [str(node.data)] + scan(node.left) + scan(node.right) if node else []
def preOrder(root):
print(" ".join(scan(root)))
| [
"diego.amicabile@gmail.com"
] | diego.amicabile@gmail.com |
48dc96d39678bff752cc24ce6348a458d01041fd | 0ba65f7c0e36c6eacdc559ecf6df221a6be89365 | /myshop/__init__.py | 37b137633c6d9184212ac2616855685c3a232ec3 | [] | no_license | maciejbihun9/django_tutorial_shop | a9811352dbf3ae20bb88972e4d593e6b7b4c2b4f | f2c0154ec47ba81a9ce5901c82aca50968380aa2 | refs/heads/master | 2021-08-31T18:49:44.787321 | 2017-12-22T12:25:03 | 2017-12-22T12:25:03 | 115,113,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | # Import celery.
# Konieczne jest zaimportowanie modułu celery w pliku
# __init__.py projektu, aby mieć pewność,
# że zostanie wczytany podczas uruchamiania Django.
from .celery import app as celery_app | [
"mac.bih@wp.pl"
] | mac.bih@wp.pl |
73da36fb856c2e81d8b4903cbe62c6b5394362b7 | b2b79cc61101ddf54959b15cf7d0887d114fb4e5 | /web/pgadmin/tools/debugger/tests/test_debugger_poll_result.py | a19a968f21c3e24f8698a1fbe2cee3a5e4d98c57 | [
"PostgreSQL"
] | permissive | 99Percent/pgadmin4 | 8afe737eb2ec1400ab034ad1d8a4f7c4ba4c35c8 | 5e0c113c7bc4ffefbec569e7ca5416d9acf9dd8a | refs/heads/master | 2021-10-10T20:08:48.321551 | 2021-09-30T12:51:43 | 2021-09-30T12:51:43 | 165,702,958 | 0 | 0 | NOASSERTION | 2019-01-14T17:18:40 | 2019-01-14T17:18:39 | null | UTF-8 | Python | false | false | 3,016 | py | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2021, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
from pgadmin.utils.route import BaseTestGenerator
from regression.python_test_utils import test_utils as utils
from . import utils as debugger_utils
from unittest.mock import patch
from regression import parent_node_dict
from pgadmin.browser.server_groups.servers.databases.schemas.functions \
.tests import utils as funcs_utils
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as db_utils
class DebuggerPollResult(BaseTestGenerator):
""" This class will execute query in debugger."""
scenarios = utils.generate_scenarios('poll_result',
debugger_utils.test_cases)
def setUp(self):
super(DebuggerPollResult, self).setUp()
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.schema_id = self.schema_data['schema_id']
local_self = funcs_utils.set_up(self)
self.test_data['funcowner'] = self.server["username"]
function_info = debugger_utils.create_function(self, utils)
self.func_id = json.loads(function_info.data)['node']['_id']
if self.add_extension:
debugger_utils.add_extension(self, utils, db_utils=db_utils)
init_debugger = debugger_utils.init_debugger_function(self)
self.trans_id = json.loads(init_debugger.data)['data']['trans_id']
if self.init_target:
debugger_utils.initialize_target(self, utils)
debugger_utils.start_listener(self, utils, db_utils)
def execute_query(self):
return self.tester.get(
self.url + str(self.trans_id) + '/',
content_type='application/json')
def runTest(self):
"""
This function will initialize the debugger for function and procedures.
"""
if self.is_positive_test:
response = self.execute_query()
else:
if self.mocking_required:
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
response = self.execute_query()
else:
response = self.execute_query()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def tearDown(self):
"""This function delete the server from SQLite """
debugger_utils.close_debugger(self)
debugger_utils.delete_function(self, utils)
db_utils.disconnect_database(self, self.server_id, self.db_id)
| [
"akshay.joshi@enterprisedb.com"
] | akshay.joshi@enterprisedb.com |
8de46a477f2b4ba26e46c99e2c0cf70696d64f32 | 1310ca784c1b0b9238f2407eb59d0704b8ae5a08 | /NextGen/circuitpython/adafruit-circuitpython-bundle-6.x-mpy-20201114/examples/gps_echotest.py | 326255efd4551edee0ab37c713ecdbe28cbe8600 | [] | no_license | RyannDaGreat/LightWave | 6b89838bfd48dba010eb5229b84b206be4e8ccbb | d055b0c01b01b3795d9e6c28b6b70f969893ed97 | refs/heads/master | 2023-07-20T08:23:47.526629 | 2023-07-18T00:25:02 | 2023-07-18T00:25:02 | 123,113,725 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,712 | py | # Simple GPS module demonstration.
# Will print NMEA sentences received from the GPS, great for testing connection
# Uses the GPS to send some commands, then reads directly from the GPS
import time
import board
import busio
import adafruit_gps
# Create a serial connection for the GPS connection using default speed and
# a slightly higher timeout (GPS modules typically update once a second).
# These are the defaults you should use for the GPS FeatherWing.
# For other boards set RX = GPS module TX, and TX = GPS module RX pins.
uart = busio.UART(board.TX, board.RX, baudrate=9600, timeout=10)
# for a computer, use the pyserial library for uart access
# import serial
# uart = serial.Serial("/dev/ttyUSB0", baudrate=9600, timeout=10)
# If using I2C, we'll create an I2C interface to talk to using default pins
# i2c = board.I2C()
# Create a GPS module instance.
gps = adafruit_gps.GPS(uart) # Use UART/pyserial
# gps = adafruit_gps.GPS_GtopI2C(i2c) # Use I2C interface
# Initialize the GPS module by changing what data it sends and at what rate.
# These are NMEA extensions for PMTK_314_SET_NMEA_OUTPUT and
# PMTK_220_SET_NMEA_UPDATERATE but you can send anything from here to adjust
# the GPS module behavior:
# https://cdn-shop.adafruit.com/datasheets/PMTK_A11.pdf
# Turn on the basic GGA and RMC info (what you typically want)
gps.send_command(b"PMTK314,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0")
# Turn on just minimum info (RMC only, location):
# gps.send_command(b'PMTK314,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')
# Turn off everything:
# gps.send_command(b'PMTK314,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')
# Tuen on everything (not all of it is parsed!)
# gps.send_command(b'PMTK314,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0')
# Set update rate to once a second (1hz) which is what you typically want.
gps.send_command(b"PMTK220,1000")
# Or decrease to once every two seconds by doubling the millisecond value.
# Be sure to also increase your UART timeout above!
# gps.send_command(b'PMTK220,2000')
# You can also speed up the rate, but don't go too fast or else you can lose
# data during parsing. This would be twice a second (2hz, 500ms delay):
# gps.send_command(b'PMTK220,500')
# Main loop runs forever printing data as it comes in
timestamp = time.monotonic()
while True:
data = gps.read(32) # read up to 32 bytes
# print(data) # this is a bytearray type
if data is not None:
# convert bytearray to string
data_string = "".join([chr(b) for b in data])
print(data_string, end="")
if time.monotonic() - timestamp > 5:
# every 5 seconds...
gps.send_command(b"PMTK605") # request firmware version
timestamp = time.monotonic()
| [
"sqrtryan@gmail.com"
] | sqrtryan@gmail.com |
a02e37cb873e78b9f8c832b6479e5964babeccbb | 567123af2df15856e443f0202a1a44479bf77544 | /claf/model/multi_task/bert.py | 1245e11dd871a8db18c23ac61b7d1e7805677811 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | y1027/claf | 36d0d902dd06cbde868b8cdd9ebd9c7b4ff5d807 | 56422a8b0a91b9626425deeee5a57110b7b45605 | refs/heads/master | 2020-08-06T02:31:49.301180 | 2019-10-03T17:12:10 | 2019-10-03T17:12:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,776 | py |
from overrides import overrides
from pytorch_transformers import BertModel
import torch.nn as nn
from claf.data.data_handler import CachePath
from claf.decorator import register
from claf.model.base import ModelWithoutTokenEmbedder
from claf.model.multi_task.category import TaskCategory
from claf.model.multi_task.mixin import MultiTask
@register("model:bert_for_multi")
class BertForMultiTask(MultiTask, ModelWithoutTokenEmbedder):
"""
Implementation of Sentence Classification model presented in
BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
(https://arxiv.org/abs/1810.04805)
* Args:
token_embedder: used to embed the sequence
num_classes: number of classified classes
* Kwargs:
pretrained_model_name: the name of a pre-trained model
dropout: classification layer dropout
"""
def __init__(self, token_makers, tasks, pretrained_model_name=None, dropouts=None):
super(BertForMultiTask, self).__init__(token_makers)
self.use_pytorch_transformers = True # for optimizer's model parameters
self.tasks = tasks
assert len(tasks) == len(dropouts)
self.curr_task_category = None
self.curr_dataset = None
self.shared_layers = BertModel.from_pretrained(
pretrained_model_name, cache_dir=str(CachePath.ROOT)
)
self.task_specific_layers = nn.ModuleList()
for task, dropout in zip(tasks, dropouts):
task_layer = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(self.shared_layers.config.hidden_size, task["num_label"])
)
self.task_specific_layers.append(task_layer)
self._init_criterions(tasks)
def _init_criterions(self, tasks):
self.criterions = {}
for task_index, task in enumerate(tasks):
task_category = task["category"]
criterion = None
if task_category == TaskCategory.SEQUENCE_CLASSIFICATION or task_category == TaskCategory.READING_COMPREHENSION:
criterion = nn.CrossEntropyLoss()
elif task_category == TaskCategory.TOKEN_CLASSIFICATION:
ignore_tag_idx = task.get("ignore_tag_idx", 0)
criterion = nn.CrossEntropyLoss(ignore_index=ignore_tag_idx)
elif task_category == TaskCategory.REGRESSION:
criterion = nn.MSELoss()
else:
raise ValueError("Check task_category.")
self.criterions[task_index] = criterion
@overrides
def forward(self, features, labels=None):
"""
* Args:
features: feature dictionary like below.
{
"bert_input": {
"feature": [
[3, 4, 1, 0, 0, 0, ...],
...,
]
},
"token_type": {
"feature": [
[0, 0, 0, 0, 0, 0, ...],
...,
],
}
}
* Kwargs:
label: label dictionary like below.
{
"class_idx": [2, 1, 0, 4, 5, ...]
"data_idx": [2, 4, 5, 7, 2, 1, ...]
}
Do not calculate loss when there is no label. (inference/predict mode)
* Returns: output_dict (dict) consisting of
- sequence_embed: embedding vector of the sequence
- logits: representing unnormalized log probabilities
- class_idx: target class idx
- data_idx: data idx
- loss: a scalar loss to be optimized
"""
bert_inputs = features["bert_input"]["feature"]
token_type_ids = features["token_type"]["feature"]
attention_mask = (bert_inputs > 0).long()
outputs = self.shared_layers(
bert_inputs, token_type_ids=token_type_ids, attention_mask=attention_mask
)
pooled_output = outputs[1]
task_index = features["task_index"]
self.curr_task_category = self.tasks[task_index]["category"]
self.curr_dataset = self._dataset.task_datasets[task_index]
# TODO: add ReadingComprehension and TokenClassification forward
task_specific_layer = self.task_specific_layers[task_index]
logits = task_specific_layer(pooled_output)
output_dict = {
"task_index": task_index,
"sequence_embed": pooled_output,
"logits": logits,
}
if labels:
label_key = None
if self.curr_task_category == TaskCategory.SEQUENCE_CLASSIFICATION:
label_key = "class_idx"
elif self.curr_task_category == TaskCategory.REGRESSION:
label_key = "score"
else:
raise ValueError("task category error.")
label_value = labels[label_key]
data_idx = labels["data_idx"]
output_dict[label_key] = label_value
output_dict["data_idx"] = data_idx
# Loss
num_label = self.tasks[task_index]["num_label"]
criterion = self.criterions[task_index.item()]
logits = logits.view(-1, num_label)
if num_label == 1:
label_value = label_value.view(-1, 1)
loss = criterion(logits, label_value)
output_dict["loss"] = loss.unsqueeze(0) # NOTE: DataParallel concat Error
return output_dict
@overrides
def print_examples(self, index, inputs, predictions):
"""
Print evaluation examples
* Args:
index: data index
inputs: mini-batch inputs
predictions: prediction dictionary consisting of
- key: 'id' (sequence id)
- value: dictionary consisting of
- class_idx
* Returns:
print(Sequence, Sequence Tokens, Target Class, Predicted Class)
"""
task_index = inputs["features"]["task_index"]
task_dataset = self._dataset.task_datasets[task_index]
task_category = self.tasks[task_index]["category"]
data_idx = inputs["labels"]["data_idx"][index].item()
data_id = task_dataset.get_id(data_idx)
helper = task_dataset.helper
sequence_a = helper["examples"][data_id]["sequence_a"]
sequence_a_tokens = helper["examples"][data_id]["sequence_a_tokens"]
sequence_b = helper["examples"][data_id]["sequence_b"]
sequence_b_tokens = helper["examples"][data_id]["sequence_b_tokens"]
print()
print("Task(Dataset) name:", self.tasks[task_index]["name"])
print()
print("- Sequence a:", sequence_a)
print("- Sequence a Tokens:", sequence_a_tokens)
if sequence_b:
print("- Sequence b:", sequence_b)
print("- Sequence b Tokens:", sequence_b_tokens)
if task_category == TaskCategory.SEQUENCE_CLASSIFICATION:
target_class_text = helper["examples"][data_id]["class_text"]
pred_class_idx = predictions[data_id]["class_idx"]
pred_class_text = task_dataset.get_class_text_with_idx(pred_class_idx)
print("- Target:")
print(" Class:", target_class_text)
print("- Predict:")
print(" Class:", pred_class_text)
elif task_category == TaskCategory.REGRESSION:
target_score = helper["examples"][data_id]["score"]
pred_score = predictions[data_id]["score"]
print("- Target:")
print(" Score:", target_score)
print("- Predict:")
print(" Score:", pred_score)
print()
| [
"humanbrain.djlee@gmail.com"
] | humanbrain.djlee@gmail.com |
d97e5381d0709cc8ce03fd994e5b6cbcae03fc18 | dfc2c18053b8e7576f88e7b2524d7ca3a8f47282 | /ch01/section1/37.py | c8e30da41f75a18dde4404f0de40fdaed2599207 | [] | no_license | Xoozi/tchomework | a6eed3bbf697ff12af8d42249ec58a139aed0c4c | 627c98b0b652ef20fd93025a17341bba76fbfce6 | refs/heads/master | 2021-01-23T21:18:15.793703 | 2018-10-21T11:05:55 | 2018-10-21T11:05:55 | 57,583,655 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | #磨光发动机气缸使之变窄到横截面积为9平方英寸之前, 需要知道它和标准的气缸直径x0=3.385英寸的偏差有多少.
#容许所要求的9平方英寸面积有0.01平方英寸以内的误差. 为求容许偏差, 设A(x) = pi*(x/2)**2
#求区间, 在该区间内所有的x都有|A(x) - 9| <= 0.01
#
# |A(x) - 9| <= 0.01
# => -0.01 <= A(x) - 9 <= 0.01
# => 8.99 <= pi*(x/2)**2 <= 9.01
# => 2*sqrt(8.99/pi)<=x<=2*sqrt(9.01/pi)
def g(x):
return 2*sqrt(x/pi)
print "x ∈ [%f, %f]" % (g(8.99), g(9.01))
#得到的答案是x ∈ [3.383256, 3.387018]
#xoozi 这里比较重要的一点, 实际加工中, 为了保险起见
#会将区间的低端向上舍入, 高段向下舍入得到
#x ∈ [3.384, 3.387]
| [
"wwfxtt@gmail.com"
] | wwfxtt@gmail.com |
716aba24b7c3bfb62f58316d5ba3598a3923dbdf | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/DATA_STRUC_PYTHON_NOTES/python-prac/Overflow/_Learning/problems/problem_01_while_loop.py | 81d830d837ce843244290f0fe32399965dad1a0f | [
"MIT",
"Python-2.0"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 735 | py | # WHILE LOOP
#
# In this problem, write a function named "my_while_loop" that accepts an
# iterable of strings as a parameter and returns a new list with strings from
# the original list that are longer than five characters. The function must use
# a while loop in its implementation. The order of the strings in the new list
# must be in the same order they were in the old list.
#
# There are two sample data calls for you to use.
# WRITE YOUR FUNCTION HERE
def
# TEST DATA
test = ["nope", "yes this one", "not", "uhuh", "here's one", "narp"]
print(my_while_loop(test)) # > ["yes this one", "here's one"]
test = ["plop", "", "drop", "zop", "stop"]
print(my_while_loop(test)) # > []
test = []
print(my_while_loop(test)) # > []
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
2064722d83023f32a337a5d533cd2a04636219f4 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2603/61106/286389.py | 35e5a9b32b98fc8534ed27e662e9aad73c7de7b4 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | ori=input()
nums=[ori[i] for i in range(1,len(ori),2)]
k=int(input())
if nums!=[]:
for i in range(len(nums)):
nums[i]=int(nums[i])
nums.sort()
result=[]
for i in range(len(nums)):
n=i+1
while n<len(nums):
result.append(nums[n]-nums[i])
n += 1
result.sort()
print(result[k-1]) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
c678dfd352b74976c8f6fa986563092b5def9b0c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03327/s297227585.py | a516928e563406e47dec9baa926559fd14bc992f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | import sys
def input(): return sys.stdin.readline().rstrip()
def main():
N = int(input())
if N >= 1000:
print('ABD')
else:
print('ABC')
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e644614e732ce14a452280dc39f0a321b105376b | 05f9f4d091c1b79d524c3fc047b5209bbb27156c | /acoustics/standards/iso_9613_1_1993.py | 0f84ca937a3406bd677d90ad7be901d809aeb104 | [
"BSD-3-Clause",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | miftanurfarid/python-acoustics | 2c73b39c3e98690d313e07b7829561074cc76bbd | 03aacc0f36c83e19d33c9989d91048af1ab752e6 | refs/heads/master | 2020-03-11T14:05:55.396964 | 2018-04-08T11:21:23 | 2018-04-08T11:21:23 | 130,044,011 | 1 | 0 | null | 2018-04-18T10:12:53 | 2018-04-18T10:12:53 | null | UTF-8 | Python | false | false | 4,931 | py | """
ISO 9613-1:1993
===============
ISO 9613-1:1993 specifies an analytical method of calculating the attenuation of sound
as a result of atmospheric absorption for a variety of meteorological conditions.
"""
import numpy as np
SOUNDSPEED = 343.2
"""
Speed of sound.
"""
REFERENCE_TEMPERATURE = 293.15
"""
Reference temperature.
"""
REFERENCE_PRESSURE = 101.325
"""
International Standard Atmosphere in kilopascal.
"""
TRIPLE_TEMPERATURE = 273.16
""".
Triple point isotherm temperature.
"""
def soundspeed(temperature, reference_temperature=REFERENCE_TEMPERATURE):
"""
Speed of sound :math:`c`.
:param temperature: Ambient temperature :math:`T_0`
:param reference_temperature: Reference temperature :math:`T`
The speed of sound is calculated using
.. math:: c = 343.2 \\left( \\frac{T}{T_0} \\right)
"""
return 343.2 * np.sqrt(temperature / reference_temperature)
def saturation_pressure(temperature, reference_pressure=REFERENCE_PRESSURE, triple_temperature=TRIPLE_TEMPERATURE):
"""
Saturation vapour pressure :math:`p_{sat}`.
:param temperature: Ambient temperature :math:`T`
:param reference_pressure: Reference pressure :math:`p_r`
:param triple_temperature: Triple point temperature water :math:`T_{01}`
The saturation vapour pressure is calculated using
.. math:: p_{sat} = 10^C \cdot p_r
with exponent :math:`C` given by
.. math:: C = -6.8346 \cdot \\left( \\frac{T_{01}}{T} \\right)^{1.261} + 4.6151
"""
return reference_pressure * 10.0** (-6.8346 *(triple_temperature/temperature)**(1.261) + 4.6151)
def molar_concentration_water_vapour(relative_humidity, saturation_pressure, pressure):
"""
Molar concentration of water vapour :math:`h`.
:param relative_humidity: Relative humidity :math:`h_r`
:param saturation_pressure: Saturation pressure :math:`p_{sat}`
:param pressure: Ambient pressure :math:`p`
The molar concentration of water vapour is calculated using
.. math:: h = h_r \\frac{p_{sat}}{p_a}
"""
return relative_humidity * saturation_pressure / pressure
def relaxation_frequency_oxygen(pressure, h, reference_pressure=REFERENCE_PRESSURE):
"""
Relaxation frequency of oxygen :math:`f_{r,O}`.
:param pressure: Ambient pressure :math:`p_a`
:param reference_pressure: Reference pressure :math:`p_r`
:param h: Molar concentration of water vapour :math:`h`
The relaxation frequency of oxygen is calculated using
.. math:: f_{r,O} = \\frac{p_a}{p_r} \\left( 24 + 4.04 \cdot 10^4 h \\frac{0.02 + h}{0.391 + h} \\right)
"""
return pressure / reference_pressure * ( 24.0 + 4.04 * 10.0**4.0 * h * (0.02 + h) / (0.391 + h) )
def relaxation_frequency_nitrogen(pressure, temperature, h, reference_pressure=REFERENCE_PRESSURE, reference_temperature=REFERENCE_TEMPERATURE):
"""
Relaxation frequency of nitrogen :math:`f_{r,N}`.
:param pressure: Ambient pressure :math:`p_a`
:param temperature: Ambient temperature :math:`T`
:param h: Molar concentration of water vapour :math:`h`
:param reference_pressure: Reference pressure :math:`p_{ref}`
:param reference_temperature: Reference temperature :math:`T_{ref}`
The relaxation frequency of nitrogen is calculated using
.. math:: f_{r,N} = \\frac{p_a}{p_r} \\left( \\frac{T}{T_0} \\right)^{-1/2} \cdot \\left( 9 + 280 h \exp{\\left\{ -4.170 \\left[ \\left(\\frac{T}{T_0} \\right)^{-1/3} -1 \\right] \\right\} } \\right)
"""
return pressure / reference_pressure * (temperature/reference_temperature)**(-0.5) * (9.0 + 280.0 * h * np.exp(-4.170 * ((temperature/reference_temperature)**(-1.0/3.0) - 1.0 ) ) )
def attenuation_coefficient(pressure, temperature, reference_pressure, reference_temperature, relaxation_frequency_nitrogen, relaxation_frequency_oxygen, frequency):
"""
Attenuation coefficient :math:`\\alpha` describing atmospheric absorption in dB/m for the specified ``frequency``.
:param pressure: Ambient pressure :math:`T`
:param temperature: Ambient temperature :math:`T`
:param reference_pressure: Reference pressure :math:`p_{ref}`
:param reference_temperature: Reference temperature :math:`T_{ref}`
:param relaxation_frequency_nitrogen: Relaxation frequency of nitrogen :math:`f_{r,N}`.
:param relaxation_frequency_oxygen: Relaxation frequency of oxygen :math:`f_{r,O}`.
:param frequency: Frequencies to calculate :math:`\\alpha` for.
"""
return 8.686 * frequency**2.0 * ( ( 1.84 * 10.0**(-11.0) * (reference_pressure/pressure) * (temperature/reference_temperature)**(0.5)) + (temperature/reference_temperature)**(-2.5) * ( 0.01275 * np.exp(-2239.1 / temperature) * (relaxation_frequency_oxygen + (frequency**2.0/relaxation_frequency_oxygen))**(-1.0) + 0.1068 * np.exp(-3352.0/temperature) * (relaxation_frequency_nitrogen + (frequency**2.0/relaxation_frequency_nitrogen))**(-1.0) ) )
| [
"fridh@fridh.nl"
] | fridh@fridh.nl |
b94f13c430ff4e7bd671e0187dee59046813b3de | ae13e905feec06f2f94245481b31fcb605e485de | /practice/algorithms/implementation/the_bomberman_game.py | f731627a64180310dd3e39cc0f18d333133626d3 | [] | no_license | feadoor/hackerrank | e7a84bb20c01d420a3c37f0a7e5176ab0aac6604 | 8fa88b71d37ae83b0826a76499c9e69f947d0aeb | refs/heads/master | 2021-05-04T17:28:27.089671 | 2019-02-21T17:25:34 | 2019-02-21T17:25:34 | 120,271,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | #!/usr/local/bin/pypy
BOMB = 'O'
EMPTY = '.'
def neighbours(x, y):
return [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]
def next_value(board, x, y):
if board[x][y] == 0:
return 0
elif any(board[n_x][n_y] == 1 for n_x, n_y in neighbours(x, y) if 0 <= n_x < len(board) and 0 <= n_y < len(board[n_x])):
return 0
else:
return board[x][y] - 1
def add_bombs(board):
return [[3 if board[x][y] == 0 else board[x][y] for y in xrange(len(board[x]))] for x in xrange(len(board))]
def do_tick(board):
return [[next_value(board, x, y) for y in xrange(len(board[x]))] for x in xrange(len(board))]
def run(board, ticks):
for tick in xrange(ticks):
board = do_tick(board)
if tick % 2 == 1:
board = add_bombs(board)
return board
def get_state(board, ticks):
if ticks <= 5:
return run(board, ticks)
else:
return run(board, 4 + ticks % 4)
def read_board(rows):
return [[3 if c == BOMB else 0 for c in raw_input()] for _ in xrange(rows)]
def write_board(board):
for row in board:
print ''.join(BOMB if x > 0 else EMPTY for x in row)
def main():
rows, _, ticks = map(int, raw_input().strip().split(' '))
write_board(get_state(read_board(rows), ticks))
if __name__ == "__main__":
main() | [
"sam.capplemanlynes@gmail.com"
] | sam.capplemanlynes@gmail.com |
2fb73694654fc6905a84a964a772f70523ec131d | ecb7e109a62f6a2a130e3320ed1fb580ba4fc2de | /reference-code/lambda/cm-premembers-setting/github-to-create_CodePipeline/action_fail_notification.py | d324d7ae420ac5b8b6c9cb52538ab48ac8bae923 | [] | no_license | nisheeth84/prjs_sample | df732bc1eb58bc4fd4da6e76e6d59a2e81f53204 | 3fb10823ca4c0eb3cd92bcd2d5d4abc8d59436d9 | refs/heads/master | 2022-12-25T22:44:14.767803 | 2020-10-07T14:55:52 | 2020-10-07T14:55:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py | import logging
import os
import json
import boto3
logger = logging.getLogger()
log_level = os.getenv('LOG_LEVEL', default='INFO')
logger.setLevel(log_level)
lambda_client = None
def lambda_handler(event, context):
detail = event['detail']
state = detail['state']
logger.debug("Received event: " + json.dumps(event, indent=2))
if(state == 'FAILED'):
send_result_message(detail)
def send_result_message(detail):
global lambda_client
if not lambda_client:
lambda_client = boto3.client('lambda')
service_name = os.environ['SERVICE_NAME']
stage = os.environ['STAGE']
url_template = "https://ap-northeast-1.console.aws.amazon.com/codepipeline/home?region=ap-northeast-1#/view/{pipeline}"
message = "CodePipelineの処理にてエラーが発生しています。\n"
attachments_json = [{
"title": "AWS CodePipeline Management Console ",
"title_link": url_template.format(**detail),
"color": "danger",
"text": "詳細: {}".format(json.dumps(detail, indent=2))
}]
event = {
'message': message,
'attachments': attachments_json
}
lambda_client.invoke(
FunctionName='{0}-{1}-{2}'.format(service_name, stage, "push_slack"),
InvocationType="RequestResponse",
Payload=json.dumps(event))
| [
"phamkhachoabk@gmail.com"
] | phamkhachoabk@gmail.com |
3a80c05f6704a2de17aa722bded43b906b10f096 | 7f7213fe407f252b2323025c9b9e381a73474b7d | /analysis/summarize_results.py | 45276c2dda5e706f9078f707749e49e0772a3185 | [] | no_license | ahy3nz/graphene_build | 0ce62c2123b8c39248048d2cafbd0aafdd06ff9a | 44590b8db799136929fc06e490151f450ad30029 | refs/heads/master | 2021-06-20T07:28:20.958807 | 2019-07-03T19:48:24 | 2019-07-03T19:48:24 | 135,504,561 | 0 | 1 | null | 2018-08-06T21:47:17 | 2018-05-30T22:40:17 | Python | UTF-8 | Python | false | false | 1,104 | py | import os
import numpy as np
import pandas as pd
import pdb
trials = ['a', 'b','c']
constants = ['50', '100', '125']
#constants = ['250', '500', '1000']
angles = ['0']
#angles = ['0', '15', '30', '45']
df = pd.DataFrame()
curr_dir = os.getcwd()
for k in constants:
for a in angles:
all_forces = []
all_works = []
for trial in trials:
name = 'k{}_{}_{}'.format(k, a, trial)
os.chdir(os.path.join(curr_dir, name))
forces = np.loadtxt('pull_pullf.xvg', comments=['@', '#'])
max_force = np.max(forces[:,1])
work = np.loadtxt('work_profile.dat')
max_work = np.mean(work[int(0.9*work.shape[0]) : , 1])
all_forces.append(max_force)
all_works.append(max_work)
to_add = {'k': [k], 'angle': [a],
'max_force': [np.mean(all_forces)], 'max_force_std': [np.std(all_forces)],
'max_work': [np.mean(all_works)], 'max_work_std': [np.std(all_works)]}
df = df.append(pd.DataFrame.from_dict(to_add))
os.chdir(curr_dir)
df.to_csv('summary_weak.csv')
| [
"alexander.h.yang@vanderbilt.edu"
] | alexander.h.yang@vanderbilt.edu |
048e94b3a99ff1def1070784c1b9e04fbe6136a7 | 46684dd05ac738cdf99a63ab30b1b30544564098 | /math300/test.py | 6c0d76af421b583c97d8cb75a956801886ba8523 | [
"MIT"
] | permissive | johnnydevriese/wsu_courses | ed0e0a4c98defaea1b4dc467395101504ff075fc | b55efd501c2d8f0651891f422a486e32533f5aa0 | refs/heads/master | 2020-12-02T22:23:03.263500 | 2017-07-03T15:16:24 | 2017-07-03T15:16:24 | 96,124,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | #done on 12.15.15 an interesting way to make a 21 x 2 matrix.
from numpy import *
#~
#~ a = 2 * ones(21)
#~
#~ b = arange(1,19 + 1, 2)
#~
#~ c = zeros(21)
#~
#~ for i in arange(len(b)):
#~ c[i] = b[i]
#~
#~ z = append(a,c).reshape(2,21)
#~
#~ z = transpose(z)
#~
#~ print z
#perhaps a more traditional method using slices.
a = zeros((21, 2))
a[:,0] = 2
b = arange(1,19+1,2)
for i in arange(len(b)):
a[i,1] = b[i]
print a
| [
"johnnydevriese22@gmail.com"
] | johnnydevriese22@gmail.com |
312109917f757ebdf6b55ea0418859c583dbe428 | 8fc7fa4f10691d44563df09b093ff493b7d02e28 | /eoxserver/services/opensearch/v11/search.py | 6780066fa1764ba83bf0b0abd8ee36575312c4b6 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | constantinius/eoxserver_combined_merge_ours | f023126aefc83bd2e2dcb07bbc5028a2d3147628 | 0bc751fe4780d9095892b5be4baf11dcde2126a5 | refs/heads/master | 2021-01-17T06:39:10.461762 | 2016-04-15T14:32:11 | 2016-04-15T14:32:11 | 56,326,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,034 | py | #-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2015 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from collections import namedtuple
from django.http import Http404
from eoxserver.core import Component, ExtensionPoint
from eoxserver.core.decoders import kvp
from eoxserver.core.util.xmltools import NameSpaceMap
from eoxserver.resources.coverages import models
from eoxserver.services.opensearch.interfaces import (
SearchExtensionInterface, ResultFormatInterface
)
class SearchContext(namedtuple("SearchContext", [
"total_count", "start_index", "page_size", "count",
"parameters", "namespaces"
])):
@property
def page_count(self):
return self.total_count // (self.page_size or self.count)
@property
def current_page(self):
return self.start_index // (self.page_size or self.count)
class OpenSearch11SearchHandler(Component):
search_extensions = ExtensionPoint(SearchExtensionInterface)
result_formats = ExtensionPoint(ResultFormatInterface)
def handle(self, request, collection_id=None, format_name=None):
decoder = OpenSearch11BaseDecoder(request.GET)
if collection_id:
qs = models.Collection.objects.get(
identifier=collection_id
).eo_objects.all()
else:
qs = models.Collection.objects.all()
if decoder.search_terms:
# TODO: search descriptions, summary etc once available
qs = qs.filter(identifier__icontains=decoder.search_terms)
namespaces = NameSpaceMap()
all_parameters = {}
for search_extension in self.search_extensions:
# get all search extension related parameters and translate the name
# to the actual parameter name
params = dict(
(parameter["type"], request.GET[parameter["name"]])
for parameter in search_extension.get_schema()
if parameter["name"] in request.GET
)
qs = search_extension.filter(qs, params)
namespaces.add(search_extension.namespace)
all_parameters[search_extension.namespace.prefix] = params
total_count = len(qs)
if decoder.start_index and not decoder.count:
qs = qs[decoder.start_index:]
elif decoder.start_index and decoder.count:
qs = qs[decoder.start_index:decoder.start_index+decoder.count]
elif decoder.count:
qs = qs[:decoder.count]
try:
result_format = next(
result_format
for result_format in self.result_formats
if result_format.name == format_name
)
except StopIteration:
raise Http404("No such result format '%s'." % format_name)
search_context = SearchContext(
total_count, decoder.start_index, decoder.count, len(qs),
all_parameters, namespaces
)
return (
result_format.encode(request, collection_id, qs, search_context),
result_format.mimetype
)
def pos_int_zero(raw):
value = int(raw)
if value < 0:
raise ValueError("Value is negative")
return value
def pos_int(raw):
value = int(raw)
if value < 1:
raise ValueError("Value is negative or zero")
return value
class OpenSearch11BaseDecoder(kvp.Decoder):
search_terms = kvp.Parameter("q", num="?")
start_index = kvp.Parameter("startIndex", pos_int_zero, num="?", default=0)
count = kvp.Parameter("count", pos_int, num="?", default=None)
output_encoding = kvp.Parameter("outputEncoding", num="?", default="UTF-8")
| [
"fabian.schindler.strauss@gmail.com"
] | fabian.schindler.strauss@gmail.com |
c1dd32cf1fe3d0358e1900c84b2d4c2300278ace | 88e286474dd82c7e2e94dc4e9c7b729ef5a3a8ba | /票据打印配置修改.py | 7f5f081e07b02c6bcb340b9f970bf27c26aa02df | [] | no_license | FengZiQ/sp_gui | d02d06e94d9980c6f763039edc2de0272cb18a86 | f9c60f26f9d0dccc5c363cb422179152456e1d01 | refs/heads/master | 2020-03-28T05:04:12.877598 | 2018-12-18T11:03:37 | 2018-12-18T11:03:37 | 147,754,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,097 | py | # coding=utf-8
from gui_test_tool import *
from dataForTest import *
tool = GUITestTool()
def modify_receipt_config():
print_type = ['默认', '追加', '覆盖']
# 前置条件
add_receipt_printer_config('票据打印配置modifyTest')
# 点击支付配置管理标签
tool.click_action(
'//ul[@id="leftNav"]/li[4]',
'支付配置管理标签'
)
# 点击支付配置管理二级标签
tool.click_action(
'//a[@data-menucode="spReceiptPrint"]',
'票据打印配置二级标签',
response_time=5
)
for i in range(len(print_type)):
# 点击修改图标
tool.click_action(
'//a[@title="修改"]',
'修改图标'
)
# 配置名称输入框
tool.fill_action(
'configName',
'打印配置_' + print_type[i],
'配置名称输入框',
locator=By.ID
)
# 点击打印类型下拉列表
tool.click_action(
'//button[@data-id="printType"]',
'打印类型下拉列表',
response_time=1
)
# 打印类型选择
tool.click_action(
'//form/div[4]/div/div/div/ul/li['+str(i+1)+']',
'打印类型',
response_time=1
)
# 点击保存按钮
tool.click_action(
'saveBtn',
'保存按钮',
locator=By.CLASS_NAME,
response_time=1
)
# 断言
tool.equal_text_assert(
'msValue',
'消息提示',
'修改成功',
end='@结束@',
locator=By.CLASS_NAME
)
time.sleep(3)
if __name__ == '__main__':
modify_receipt_config()
tool.mark_status()
tool.finished()
# 清理环境
try:
config_id = get_receipt_printer_config_info('打印配置_覆盖').get('id')
del_receipt_printer_config(config_id)
except:
pass
unbind_device([d['id'] for d in device_info])
delete_customer(customer_info['id'])
| [
"feng1025352529@qq.com"
] | feng1025352529@qq.com |
9cdfdaedfc70944b3269a00af13bd9deefe4deaf | 9f9a9413e43d8c45f700b015cb6de664e5115c04 | /0x04-python-more_data_structures/100-weight_average.py | b406ee9ee1dc8637f9719921cecce5dcc67e88f5 | [] | no_license | JennyHadir/holbertonschool-higher_level_programming | d2bfc733800bee7fcca10a408a2d744af40b0d4b | c826d364665e40173e453048dce1ea5cb97b4075 | refs/heads/master | 2023-04-26T19:29:17.370132 | 2021-05-17T23:04:32 | 2021-05-17T23:04:32 | 319,390,421 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | #!/usr/bin/python3
def weight_average(my_list=[]):
if my_list:
weight = 0
score = 0
for i in my_list:
score += i[1]
weight += i[0] * i[1]
return weight / score
return 0
| [
"hadirjenny@hotmail.com"
] | hadirjenny@hotmail.com |
9af0273121518eccb7ee6a791785c8e7a6ea7a41 | a74b7a424159638508e4083aee927e6fca1e31ad | /vlbdiffwave/impl.py | 5bcf51d38183b552386c6a5e5e1dfb906d318bf7 | [
"MIT"
] | permissive | WN1695173791/jax-variational-diffwave | 77f17c44e6df9086754d15ad314a4a6cc58aff8f | 780f615d76cff6f6210d5db0b9790961fc716905 | refs/heads/main | 2023-07-15T14:58:27.550266 | 2021-09-06T15:07:22 | 2021-09-06T15:07:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,731 | py | from typing import Optional, Tuple
import flax
import flax.linen as nn
import jax.numpy as jnp
from .config import Config
from .diffwave import DiffWave
from .logsnr import LogSNR
class VLBDiffWave:
"""Model definition of VLB-Diffwave.
"""
def __init__(self, config: Config):
"""Initializer.
Args:
config: model configuration.
"""
self.diffwave = DiffWave(config=config)
self.logsnr = LogSNR(internal=config.internal)
def init(self,
key: jnp.ndarray,
signal: jnp.ndarray,
aux: jnp.ndarray,
mel: jnp.ndarray) -> flax.core.frozen_dict.FrozenDict:
"""Initialize model parameters.
Args:
signal: [float32; [B, T]], noise signal.
aux: [float32; [B]], timestep for logsnr, logSNR for diffwave.
mel: [float32; [B, T // H, M]], mel-spectrogram.
Returns:
model parameters.
"""
lparam = self.logsnr.init(key, aux)
dparam = self.diffwave.init(key, signal, aux, mel)
return flax.core.freeze({'diffwave': dparam, 'logsnr': lparam})
def snr(self, param: flax.core.frozen_dict.FrozenDict, time: jnp.ndarray) -> \
Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Compute SNR and alpha, sigma.
Args:
param: parameters of LogSNR.
time: [float32; [B]], current timestep.
Returns:
[float32; [B]], logSNR, normalized -logSNR, square of alpha and sigma.
"""
# [B], [B]
logsnr, norm_nlogsnr = self.logsnr.apply(param, time)
# [B]
alpha_sq, sigma_sq = nn.sigmoid(logsnr), nn.sigmoid(-logsnr)
return logsnr, norm_nlogsnr, alpha_sq, sigma_sq
def apply(self,
param: flax.core.frozen_dict.FrozenDict,
signal: jnp.ndarray,
mel: jnp.ndarray,
time: jnp.ndarray) -> Tuple[jnp.ndarray, Tuple[jnp.ndarray, jnp.ndarray]]:
"""Denoise signal w.r.t timestep on mel-condition.
Args:
param: model parameters.
signal: [float32; [B, T]], noised signal.
mel: [float32; [B, T // H, M]], mel-spectrogram.
timestep: [float32; [B]], current timestep.
Returns:
noise: [float32; [B, T]], estimated noise.
alpha_sq, sigma_sq: [float32; [B]], signal, noise rates.
"""
# [B] x 4
_, norm_nlogsnr, alpha_sq, sigma_sq = self.snr(param['logsnr'], time)
# [B, T]
noise = self.diffwave.apply(param['diffwave'], signal, norm_nlogsnr, mel)
return noise, (alpha_sq, sigma_sq)
def denoise(self,
param: flax.core.frozen_dict.FrozenDict,
signal: jnp.ndarray,
mel: jnp.ndarray,
t: jnp.ndarray,
s: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Denoise process.
Args:
param: model parameters.
signal: [float32; [B, T]], input signal.
mel: [float32; [B, T // H, M]], mel-spectrogram.
t: [float32; [B]], target time in range[0, 1].
s: [float32; [B]], start time in range[0, 1], s < t.
Returns:
mean: [float32; [B, T]], denoised signal mean.
std: [float32; [B]], standard deviation.
"""
# [B, T], [B], [B]
noise, (alpha_sq_t, sigma_sq_t) = self.apply(param, signal, mel, t)
# [B] x 2
_, _, alpha_sq_s, sigma_sq_s = self.snr(param['logsnr'], s)
# [B]
alpha_sq_tbars = alpha_sq_t / alpha_sq_s
sigma_sq_tbars = sigma_sq_t - alpha_sq_tbars * sigma_sq_s
# [B]
std = jnp.sqrt(sigma_sq_tbars * sigma_sq_s / sigma_sq_t)
# [B, T]
mean = 1 / jnp.sqrt(alpha_sq_tbars[:, None]) * (
signal - sigma_sq_tbars[:, None] / jnp.sqrt(sigma_sq_t[:, None]) * noise)
# [B, T], [B]
return mean, std
def diffusion(self,
param: flax.core.frozen_dict.FrozenDict,
signal: jnp.ndarray,
noise: jnp.ndarray,
s: jnp.ndarray,
t: Optional[jnp.ndarray] = None) -> \
Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Add noise to signal.
Args:
param: model parameters.
signal: [float32; [B, T]], input signal.
noise: [float32; [B, T]], gaussian noise.
s: [float32; [B]], start time in range[0, 1].
t: [float32; [B]], target time in range[0, 1], s < t.
if t is None, compute q(z_t|x), otherwise, q(z_t|z_s).
Returns:
alpha, sigma: [float32; [B]], signal, noise ratio.
noised: [float32; [B, T]], noised signal.
"""
# B
bsize = s.shape[0]
# [B']
time = s if t is None else jnp.concatenate([s, t], axis=0)
# [B'] x 4
_, _, alpha_sq, sigma_sq = self.snr(param['logsnr'], time)
if t is not None:
# [B]
alpha_sq_s, alpha_sq_t = alpha_sq[:bsize], alpha_sq[bsize:]
sigma_sq_s, sigma_sq_t = sigma_sq[:bsize], sigma_sq[bsize:]
# [B]
alpha_sq_tbars = alpha_sq_t / alpha_sq_s
sigma_sq_tbars = sigma_sq_t - alpha_sq_tbars * sigma_sq_s
# [B]
alpha_sq, sigma_sq = alpha_sq_tbars, sigma_sq_tbars
# [B]
alpha = jnp.sqrt(jnp.maximum(alpha_sq, 1e-5))
sigma = jnp.sqrt(jnp.maximum(sigma_sq, 1e-5))
# [B, T]
noised = alpha[:, None] * signal + sigma[:, None] * noise
# [B], [B], [B, T]
return alpha, sigma, noised
| [
"revsic99@gmail.com"
] | revsic99@gmail.com |
5bdf7a1ae800e202382e3d7d0b820fccc7907387 | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/docopt-docopts/allPythonContent.py | 27cda325177a0b927e2fbf1d1044a93b41b333f8 | [] | no_license | aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,806 | py | __FILENAME__ = language_agnostic_tester
#! /usr/bin/env python2
import sys, json, re, os
from subprocess import Popen, PIPE, STDOUT
fixtures = open(os.path.join(os.path.dirname(__file__), 'testcases.docopt'), 'r').read()
# remove comments
fixtures = re.sub('#.*$', '', fixtures, flags=re.M)
testee = (sys.argv[1] if len(sys.argv) >= 2 else
exit('Usage: language_agnostic_tester.py ./path/to/executable/testee [ID ...]'))
ids = [int(x) for x in sys.argv[2:]] if len(sys.argv) > 2 else None
summary = ''
index = 0
for fixture in fixtures.split('r"""'):
doc, _, body = fixture.partition('"""')
for case in body.split('$')[1:]:
index += 1
if ids is not None and index not in ids:
continue
argv, _, expect = case.strip().partition('\n')
prog, _, argv = argv.strip().partition(' ')
assert prog == 'prog', repr(prog)
p = Popen(testee + ' ' + argv,
stdout=PIPE, stdin=PIPE, stderr=STDOUT, shell=True)
result = p.communicate(input=doc)[0]
try:
py_result = json.loads(result)
py_expect = json.loads(expect)
except:
summary += 'J'
print (' %d: BAD JSON ' % index).center(79, '=')
print 'result>', result
print 'expect>', expect
continue
if py_result == py_expect:
summary += '.'
else:
print (' %d: FAILED ' % index).center(79, '=')
print 'r"""%s"""' % doc
print '$ prog %s\n' % argv
print 'result>', result
print 'expect>', expect
summary += 'F'
print (' %d / %d ' % (summary.count('.'), len(summary))).center(79, '=')
print summary
########NEW FILE########
| [
"dyangUCI@github.com"
] | dyangUCI@github.com |
768adacbe51e4ff930528aa640c60e9bac3172af | eee480b75e1454832a07bd02ab4ff5058aa8033e | /db/models.py | 8f68f729ef071c222eeabe329b133a153718b6b2 | [] | no_license | 545314690/wechat-spider | 92dac44f8559ab0347b681945541f0b42be994d5 | fe5cbac4aaedf32d974a01ab5fbb3fa33332c91d | refs/heads/master | 2021-01-01T17:45:21.451910 | 2017-07-27T09:40:44 | 2017-07-27T09:40:44 | 98,148,453 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,464 | py | # -*-coding:utf-8 -*-
from sqlalchemy import Column, Integer, String, Boolean, INTEGER, DateTime
from db.basic_db import Base, engine
from db.tables import *
class LoginInfo(Base):
# 登录账号表 login_info
__tablename__ = 'login_info'
id = Column(INTEGER, primary_key=True, autoincrement=True)
username = Column(String(255), nullable=False)
password = Column(String(50), nullable=False)
status = Column(INTEGER)
enable = Column(Boolean, default=1, server_default='1')
class User(Base):
# 用户表 wechat_user
__tablename__ = 'wechat_user'
id = Column(Integer, primary_key=True, autoincrement=True)
nickname = Column(String(100), default='', server_default='')
alias = Column(String(100), default='', server_default='')
service_type = Column("service_type", INTEGER, default=1, server_default='1')
fakeid = Column("fakeid", String(100), default='', server_default='')
description = Column("description", String(500), default='', server_default='')
round_head_img = Column("round_head_img", String(500), default='', server_default='')
is_crawled = Column("is_crawled", INTEGER, default=0, server_default='0')
is_monitored = Column("is_monitored", INTEGER, default=0, server_default='0')
enable = Column("enable", INTEGER, default=1, server_default='1')
# 这里需要设置默认值,否则空的话可能会存储None,可能会引发未catch的异常
class KeyWords(Base):
# 关键词搜索表 keywords
__tablename__ = 'keywords'
id = Column(INTEGER, primary_key=True, autoincrement=True)
keyword = Column("keyword", String(200), unique=True)
enable = Column("enable", INTEGER, default=1, server_default='1')
class WeChatData(Base):
# 微博信息表 weibo_data
__tablename__ = 'wechat_data'
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column("title", String(100), default='', server_default='')
content = Column("content", String(6000), default='', server_default='')
like_num = Column("like_num", INTEGER, default=0, server_default='0')
read_num = Column("read_num", INTEGER, default=0, server_default='0')
uname = Column("uname", String(20))
url = Column("url", String(300))
head_img = Column("head_img", String(500), default='', server_default='')
pub_time = Column("pub_time", DateTime)
Base.metadata.create_all(engine)#创建表
#Base.metadata.drop_all(engine) #删除表
| [
"“545314690@qq.com”"
] | “545314690@qq.com” |
216a2b17f45760c653834956d1a4df19aa73e94e | f9aa857868a2027eb6738e16318e84e2320c2947 | /cookiecutter_django_test/contrib/sites/migrations/0003_set_site_domain_and_name.py | a36c0ace8376e9f56496ffe0c5d8e0ca91082f3e | [
"MIT"
] | permissive | imsure/cookiecutter-django-test | 44ecda7cb9449dcf78dc878a73f8eb9ba9d89f1b | 853a46e6410fc9814cadbef828987f2c5b24fe4d | refs/heads/master | 2020-03-12T18:55:37.943646 | 2018-04-24T00:29:23 | 2018-04-24T00:29:23 | 130,773,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | """
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "www.metropia.com",
"name": "Cookiecutter Django Test",
},
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"}
)
class Migration(migrations.Migration):
dependencies = [("sites", "0002_alter_domain_unique")]
operations = [migrations.RunPython(update_site_forward, update_site_backward)]
| [
"imsure95@gmail.com"
] | imsure95@gmail.com |
9929059b4cc7d12192264b8307b4f37220bc83da | a1728a475b8c4e64a3a629d9c1f43294999eb769 | /oTree/__temp_migrations/slider_post/0011_auto_20210123_2211.py | e7115dbec75e88987bb50c95d1e413510375e0ba | [] | no_license | Tslilon/experimental_instruments_labor_project | 2fff71db7453b1d455c9f2984e899f6b3f92e5f2 | 489b85eacf145eec03999bd416c4394b4a4bbafa | refs/heads/master | 2023-03-19T02:21:06.279317 | 2021-02-23T11:49:38 | 2021-02-23T11:49:38 | 325,325,614 | 0 | 0 | null | 2021-02-23T11:49:39 | 2020-12-29T15:46:15 | Python | UTF-8 | Python | false | false | 538 | py | # Generated by Django 2.2.12 on 2021-01-24 06:11
from django.db import migrations
import otree.db.models
class Migration(migrations.Migration):
dependencies = [
('slider_post', '0010_auto_20210123_2210'),
]
operations = [
migrations.AlterField(
model_name='player',
name='self_rating',
field=otree.db.models.IntegerField(default=6.966836560555607, null=True, verbose_name='How well do you think you did in the task? (0 = Very Badly, 10 = Very Well)'),
),
]
| [
"chenxjiang@berkeley.edu"
] | chenxjiang@berkeley.edu |
82ae6ef5b855de4f23fc4565b954b99b47490386 | 26bc83ba9481257be8ec47ea032c554feb6cb2ba | /setup.py | 99a83a781060b2cdaa43e2d333a0ee84779b7231 | [] | no_license | irachex/zkpython | e8121e20b63db07dcc6e5ed2fcde94a0a1213a9b | a9a87747df9d316c2b26035f928900b72b9deb41 | refs/heads/master | 2021-01-23T12:16:43.143916 | 2014-10-26T14:22:04 | 2014-10-26T14:22:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.core import setup, Extension
zookeepermodule = Extension("zookeeper",
sources=["zookeeper.c"],
include_dirs=["/usr/include/c-client-src", "/usr/local/include/c-client-src", "/usr/local/include/zookeeper"],
libraries=["zookeeper_mt"],
)
setup( name="zkpython",
version = "0.4",
description = "ZooKeeper Python bindings",
ext_modules=[zookeepermodule] )
| [
"irachex@gmail.com"
] | irachex@gmail.com |
6f9b320f93d5f90a21da0f426a8d2714b87c4352 | 70433b90af33ed71fc9ab6c7ba15fe1b1ec52a90 | /tests/validation/operation/test_produces_validation.py | 5c5b9effd418860017904483a3b07bee22867320 | [
"MIT"
] | permissive | dhilton/flex | f6b54ae2c4c28e760fdcc02e00b7a4df420fba99 | e23e74e246f4ad89c8d8971b029dcdd2fa642526 | refs/heads/master | 2020-04-05T23:10:53.512712 | 2014-11-10T04:35:48 | 2014-11-10T04:35:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | import pytest
from flex.validation.operation import (
construct_operation_validators,
validate_operation,
)
from tests.factories import (
ResponseFactory,
SchemaFactory,
)
#
# produces mimetype validation.
#
def test_produces_validation_valid_mimetype_from_global_definition():
"""
Test that a response content_type that is in the global api produces
definitions is valid.
"""
response = ResponseFactory(content_type='application/json')
schema = SchemaFactory(
produces=['application/json'],
paths={
'/get': {'get': {'responses': {200: {'description': 'Success'}}}},
},
)
validators = construct_operation_validators(
'/get', schema['paths']['/get'], 'get', schema,
)
validate_operation(response, validators)
def test_produces_validation_invalid_mimetype_from_global_definition():
"""
Test that a response content_type that is in the global api produces
definitions is valid.
"""
from django.core.exceptions import ValidationError
response = ResponseFactory(content_type='application/json')
schema = SchemaFactory(
produces=['application/xml'],
paths={
'/get': {'get': {'responses': {200: {'description': 'Success'}}}},
},
)
validators = construct_operation_validators(
'/get', schema['paths']['/get'], 'get', schema,
)
with pytest.raises(ValidationError):
validate_operation(response, validators, inner=True)
def test_produces_validation_for_valid_mimetype_from_operation_definition():
"""
Test that when `produces` is defined in an operation definition, that the
local value is used in place of any global `produces` definition.
"""
response = ResponseFactory(content_type='application/json')
schema = SchemaFactory(
produces=['application/xml'],
paths={
'/get': {'get': {
'responses': {200: {'description': 'Success'}},
'produces': ['application/json'],
}},
},
)
validators = construct_operation_validators(
'/get', schema['paths']['/get'], 'get', schema,
)
validate_operation(response, validators)
def test_produces_validation_for_invalid_mimetype_from_operation_definition():
"""
Test the situation when the operation definition has overridden the global
allowed mimetypes, that that the local value is used for validation.
"""
from django.core.exceptions import ValidationError
response = ResponseFactory(content_type='application/xml')
schema = SchemaFactory(
produces=['application/xml'],
paths={
'/get': {'get': {
'responses': {200: {'description': 'Success'}},
'produces': ['application/json'],
}},
},
)
validators = construct_operation_validators(
'/get', schema['paths']['/get'], 'get', schema,
)
with pytest.raises(ValidationError):
validate_operation(response, validators, inner=True)
| [
"piper@simpleenergy.com"
] | piper@simpleenergy.com |
02883ec23d9f5f21ace7da9c6f6be38f2867bfea | e5889e31869f5c00335b6fefb48da4e79bea1801 | /week_4/01_02_delete_max_heap.py | 1a584705e1ab3e6092ff5f929e884704a774cd48 | [] | no_license | HoChangSUNG/sparta_algorithm | 2f4d9767eec1c9e3c17e420be0bc905713bbdb8c | 1572027a6278c5ba6b96f426c904d8739ae3649d | refs/heads/main | 2023-02-02T22:47:22.449648 | 2020-12-21T06:56:20 | 2020-12-21T06:56:20 | 323,255,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,767 | py | class MaxHeap:
def __init__(self):
self.items = [None]
def insert(self, value):
self.items.append(value)
cur_index = len(self.items) - 1
while cur_index > 1: # cur_index 가 1이 되면 정상을 찍은거라 다른 것과 비교 안하셔도 됩니다!
parent_index = cur_index // 2
if self.items[parent_index] < self.items[cur_index]:
self.items[parent_index], self.items[cur_index] = self.items[cur_index], self.items[parent_index]
cur_index = parent_index
else:
break
def delete(self):
self.items[1], self.items[-1] = self.items[-1], self.items[1]
prev_max = self.items.pop()
cur_index = 1
while cur_index <= len(self.items) - 1:
left_child_index = cur_index * 2
right_child_index = cur_index * 2 + 1
max_index = cur_index
if left_child_index <= len(self.items) - 1 and self.items[left_child_index] > self.items[max_index]:
max_index = left_child_index
if right_child_index <= len(self.items) - 1 and self.items[right_child_index] > self.items[max_index]:
max_index = right_child_index
if max_index == cur_index:
break
self.items[cur_index], self.items[max_index] = self.items[max_index], self.items[cur_index]
cur_index = max_index
return prev_max
max_heap = MaxHeap()
max_heap.insert(8)
max_heap.insert(7)
max_heap.insert(6)
max_heap.insert(2)
max_heap.insert(5)
max_heap.insert(4)
print(max_heap.items) # [None, 8, 7, 6, 2, 5, 4]
print(max_heap.delete()) # 8 을 반환해야 합니다!
print(max_heap.items) # [None, 7, 5, 6, 2, 4]
| [
"love47024702@naver.com"
] | love47024702@naver.com |
c38888fe571c3fb10c56219cfac5fd6893db8fc7 | 5e9576c368e98927e2965bd2fb23bd35d9993d69 | /featuretools/primitives/standard/aggregation/max_min_delta.py | 74dfcaafb3e71b4d1466672426d6f42b7b5f9afa | [
"BSD-3-Clause"
] | permissive | alteryx/featuretools | c6e319e063e8e84e7684bf232376f95dc5272160 | c284c2d27a95b81e0bae913ac90df2b02c8f3b37 | refs/heads/main | 2023-08-25T12:21:33.945418 | 2023-08-23T16:30:25 | 2023-08-23T16:30:25 | 102,908,804 | 1,783 | 201 | BSD-3-Clause | 2023-09-07T18:53:19 | 2017-09-08T22:15:17 | Python | UTF-8 | Python | false | false | 1,126 | py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import AggregationPrimitive
class MaxMinDelta(AggregationPrimitive):
"""Determines the difference between the max and min value.
Args:
skipna (bool): Determines if to use NA/null values.
Defaults to True to skip NA/null.
Examples:
>>> max_min_delta = MaxMinDelta()
>>> max_min_delta([7, 2, 5, 3, 10])
8
You can optionally specify how to handle NaN values
>>> max_min_delta_skipna = MaxMinDelta(skipna=False)
>>> max_min_delta_skipna([7, 2, None, 3, 10])
nan
"""
name = "max_min_delta"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def max_min_delta(x):
max_val = x.max(skipna=self.skipna)
min_val = x.min(skipna=self.skipna)
return max_val - min_val
return max_min_delta
| [
"noreply@github.com"
] | alteryx.noreply@github.com |
abc51ee3da4656a2513b1a39da9a2afc1f616f46 | 349893015430768ac3ad0f8ca6fa8007c2e88f65 | /test_appium/cases/test_contact.py | 4501706e8731a6ca095ba7bf88622cfeffc3cc02 | [] | no_license | github653224/HogwartsLG7 | 226aa5640d1862b26f2ddc938c84b50a9e49c6a3 | 17b20d72bab376ce85d76d7ee0cab03f4e176a01 | refs/heads/main | 2023-06-07T08:33:47.293126 | 2021-06-29T14:07:41 | 2021-06-29T14:07:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | """
__author__ = 'hogwarts_xixi'
__time__ = '2021/4/16 8:48 下午'
"""
from time import sleep
import sys
sys.path.append('..')
from test_appium.pages.app import App
class TestContact:
def setup_class(self):
# 打开应用,进入到首页
self.app = App()
def setup(self):
self.main = self.app.start().goto_main()
def teardown_class(self):
self.app.stop()
def test_addcontact(self):
username = "hogwarts05"
phonenum = "13911111115"
self.main.goto_contactlist().goto_addmemberpage() \
.addmember_bymenual().edit_member(username, phonenum).verify_ok()
def test_addcontact1(self):
username = "hogwarts04"
phonenum = "13911111114"
self.main.goto_contactlist().goto_addmemberpage() \
.addmember_bymenual().edit_member(username, phonenum).verify_ok()
| [
"xujuan418974188@163.com"
] | xujuan418974188@163.com |
855f056f1316e0015b0e1b25a1ec3771972c8647 | a46e23e8266f7569e059f4df0cc22f3752965c77 | /slack/leetcode-challenge-alarm.py | 1c5b4c28f593f1572146cae2130200e124d8e0ef | [] | no_license | jhy979/all-solved-club | 758ae94f8b144deb8af0eabfd8445445ea43f182 | cc1494b85bc7600cf242cf6ab1a7f5e78698a126 | refs/heads/main | 2023-07-15T22:19:11.084766 | 2021-08-24T03:15:58 | 2021-08-24T03:15:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | import os
from slack_sdk.webhook import WebhookClient
url = os.environ.get("SLACK_WEBHOOK_URL")
webhook = WebhookClient(url)
response = webhook.send(text="LeetCode에서 새로운 챌린지 문제가 출제되었습니다. - https://leetcode.com")
assert response.status_code == 200
assert response.body == "ok"
| [
"jwp0530@gmail.com"
] | jwp0530@gmail.com |
16d50188284e895addc4ad7bb1afa19b64421511 | b09ea1e78e1a1f5d053817af35599c9464e755fe | /filter_utterances.py | cce497ee940a03394c4e60d4ae149f586b515e8e | [] | no_license | aeoling/reddit_tools | 46f0e321bf72889c82b100eae5418ac7a71b9701 | f7fa9f32e547ddb529d55017a7784d00542c3ca0 | refs/heads/master | 2021-06-13T17:57:40.606586 | 2017-03-10T01:34:43 | 2017-03-10T01:34:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,959 | py | from argparse import ArgumentParser
from os import path, makedirs, listdir
from collections import defaultdict
import numpy as np
from nltk import word_tokenize
MIN_CONTENT_WORD_FREQUENCY = 10
def load_questions(in_stream):
result = set([])
for line in in_stream:
result.add(line.strip().lower())
return result
def build_freq_dictionary(in_questions_set):
dictionary = defaultdict(lambda: 0)
for question in in_questions_set:
for token in word_tokenize(question):
dictionary[token] += 1
return dictionary
def filter_dictionary(in_freq_dict):
frequencies = in_freq_dict.values()
mean, variance = np.mean(frequencies), np.std(frequencies)
min_frequency = max(MIN_CONTENT_WORD_FREQUENCY, mean - 2 * variance)
max_frequency = mean + 2 * variance
filtered_dictionary = {
word: frequency
for word, frequency in in_freq_dict.iteritems()
if min_frequency <= frequency <= max_frequency
}
return filtered_dictionary
def build_argument_parser():
parser = ArgumentParser()
parser.add_argument('src_root')
return parser
def main(in_text_root):
all_questions = set([])
for questions_file in listdir(in_text_root):
with open(path.join(in_text_root, questions_file)) as questions_in:
questions = load_questions(questions_in)
all_questions.update(questions)
freq_dict = build_freq_dictionary(all_questions)
filtered_dictionary = filter_dictionary(freq_dict)
filtered_questions = []
for question in all_questions:
tokens = word_tokenize(question)
for token in tokens:
if token in filtered_dictionary:
filtered_questions.append(question)
break
return filtered_questions
if __name__ == '__main__':
parser = build_argument_parser()
args = parser.parse_args()
for question in main(args.src_root):
print question
| [
"ishalyminov@gmail.com"
] | ishalyminov@gmail.com |
9884cd78a96910ea51c7b7436511c56069946ac6 | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/discord/message/attachment/fields.py | a18f011507b4f126fe1cfce5f33d927754dca5ce | [
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | Python | UTF-8 | Python | false | false | 3,683 | py | __all__ = ()
from ...field_parsers import (
bool_parser_factory, entity_id_parser_factory, flag_parser_factory, float_parser_factory,
force_string_parser_factory, int_parser_factory, nullable_string_parser_factory
)
from ...field_putters import (
bool_optional_putter_factory, entity_id_putter_factory, flag_optional_putter_factory, float_optional_putter_factory,
force_string_putter_factory, int_putter_factory, nullable_string_optional_putter_factory,
nullable_string_putter_factory, url_optional_putter_factory
)
from ...field_validators import (
bool_validator_factory, entity_id_validator_factory, flag_validator_factory, float_conditional_validator_factory,
force_string_validator_factory, int_conditional_validator_factory, nullable_string_validator_factory,
url_optional_validator_factory, url_required_validator_factory
)
from .constants import DESCRIPTION_LENGTH_MAX
from .flags import AttachmentFlag
# content_type
parse_content_type = nullable_string_parser_factory('content_type')
put_content_type_into = nullable_string_putter_factory('content_type')
validate_content_type = nullable_string_validator_factory('content_type', 0, 1024)
# description
parse_description = nullable_string_parser_factory('description')
put_description_into = nullable_string_putter_factory('description')
validate_description = nullable_string_validator_factory('description', 0, DESCRIPTION_LENGTH_MAX)
# duration
parse_duration = float_parser_factory('duration_sec', 0.0)
put_duration_into = float_optional_putter_factory('duration_sec', 0.0)
validate_duration = float_conditional_validator_factory(
'duration',
0.0,
lambda duration : duration >= 0.0,
'>= 0.0',
)
# flags
parse_flags = flag_parser_factory('flags', AttachmentFlag)
put_flags_into = flag_optional_putter_factory('flags', AttachmentFlag())
validate_flags = flag_validator_factory('flags', AttachmentFlag)
# height
parse_height = int_parser_factory('height', 0)
put_height_into = int_putter_factory('height')
validate_height = int_conditional_validator_factory(
'height',
0,
lambda height : height >= 0,
'>= 0',
)
# id
parse_id = entity_id_parser_factory('id')
put_id_into = entity_id_putter_factory('id')
validate_id = entity_id_validator_factory('id')
# name
parse_name = force_string_parser_factory('filename')
put_name_into = force_string_putter_factory('filename')
validate_name = force_string_validator_factory('name', 0, 1024)
# proxy_url
parse_proxy_url = nullable_string_parser_factory('proxy_url')
put_proxy_url_into = url_optional_putter_factory('proxy_url')
validate_proxy_url = url_optional_validator_factory('proxy_url')
# size
parse_size = int_parser_factory('size', 0)
put_size_into = int_putter_factory('size')
validate_size = int_conditional_validator_factory(
'size',
0,
lambda size : size >= 0,
'>= 0',
)
# temporary
parse_temporary = bool_parser_factory('ephemeral', False)
put_temporary_into = bool_optional_putter_factory('ephemeral', False)
validate_temporary = bool_validator_factory('temporary', False)
# url
parse_url = force_string_parser_factory('url')
put_url_into = url_optional_putter_factory('url')
validate_url = url_required_validator_factory('url')
# waveform
parse_waveform = nullable_string_parser_factory('waveform')
put_waveform_into = nullable_string_optional_putter_factory('waveform')
validate_waveform = nullable_string_validator_factory('waveform', 0, 4096)
# width
parse_width = int_parser_factory('width', 0)
put_width_into = int_putter_factory('width')
validate_width = int_conditional_validator_factory(
'width',
0,
lambda width : width >= 0,
'>= 0',
)
| [
"re.ism.tm@gmail.com"
] | re.ism.tm@gmail.com |
13f67ec412e8d337686f43866268cea66429a4fd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03634/s109235916.py | 458b74a4f790d735865b22558bf7c8d196e50401 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py | from collections import deque
def bfs(start, g, visited):
q = deque([])
k_neibs = g[start]
visited[start] = (0,start)
for neib, dist in k_neibs:
q.append((neib, neib)) # node, root parent (k neib)
visited[neib] = (dist, neib) # dist, root parent (k neib)
while q:
curr_node, root_pare = q.popleft()
for next_node, dist in g[curr_node]:
if visited[next_node] != (-1,-1): continue
curr_dist = visited[curr_node][0]
visited[next_node] = (curr_dist+dist, root_pare)
q.append((next_node, root_pare))
def main():
n = int(input())
g = [ [] for _ in range(n+1)]
for _ in range(n-1):
a, b, c = map(int, input().split())
g[a].append((b,c))
g[b].append((a,c))
q,k = map(int, input().split())
visited = [(-1,-1)] * (n+1) # dist, root parent (k neib)
bfs(k, g, visited)
ansl = []
for _ in range(q):
x,y = map(int, input().split())
xd, xp = visited[x]
yd, yp = visited[y]
# めっちゃ無駄なことしてたーーーーーーーー!
if xp == yp:
ans = xd+yd
ansl.append(ans)
else:
ans = xd+yd
ansl.append(ans)
for a in ansl: print(a)
if __name__ == "__main__":
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c2fc1b0031863d92f615f5df5b863e07c4964440 | 0b2e54f19875e51519950c06c6110a8144d85cb4 | /python_textbook_questions/Graph/graph_dfs__adjList_defaultdictlist.py | 71c4e40cbb2c117d9fd4200802711d1817811a91 | [] | no_license | hemangbehl/Data-Structures-Algorithms_practice | b1f5d5995d4e02de2d3807e18ac0639b900d35af | e125ebd42dd4083701b13a319c368f4a622ca669 | refs/heads/master | 2020-08-06T09:03:42.253388 | 2020-07-10T23:55:39 | 2020-07-10T23:55:39 | 212,916,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list)
self.V = vertices
def add_edge(self, src, dest):
self.graph[src].append(dest)
def printAllEdges(self):
print("all edges:")
for i in self.graph:
print("vertex #", i, ": ", end='')
for j in self.graph[i]:
print(j, end='-> ')
print("")
def DFS(self, v):
stack = list()
stack.append(v)
visited = set()
visited.add(v)
while stack:
vertex = stack.pop()
print(vertex, end=' ')
for adjVertex in self.graph[vertex]:
if adjVertex not in visited:
visited.add(adjVertex)
stack.append(adjVertex)
print("")
#driver code
g = Graph(5)
g.add_edge(1, 0)
g.add_edge(2, 1)
g.add_edge(4, 0)
g.add_edge(3, 4)
g.add_edge(0, 3)
g.add_edge(0, 2)
g.DFS(0)
g.printAllEdges()
| [
"44247931+hemangbehl@users.noreply.github.com"
] | 44247931+hemangbehl@users.noreply.github.com |
a1961d38a64e8fefe13f6f62bcd6d0ee1dd43572 | 101b0ae046501b9eb0990be2d86e5b5538aacd7a | /src/pcg/pcg/grids/square.py | f0f4c09fd1f7fa1e8b04ab8a2b3e8923c0ed3d3c | [
"BSD-3-Clause"
] | permissive | Tiendil/pcg-python | ebd12dd7abba43d758c746dad8057130f61ac720 | 02b54932bdec4a13c179ebeee76fc8ed1b8eba34 | refs/heads/master | 2022-12-15T05:32:02.970053 | 2020-09-15T10:47:18 | 2020-09-15T10:47:18 | 263,430,688 | 2 | 0 | null | 2020-09-15T10:41:29 | 2020-05-12T19:20:30 | Python | UTF-8 | Python | false | false | 3,575 | py | import math
import dataclasses
from PIL import Image
from pcg import colors
from pcg import drawer
from pcg.topologies import BaseArea
from pcg.geometry import Point, BoundingBox
@dataclasses.dataclass(frozen=True, order=True)
class Cell:
__slots__ = ('x', 'y')
x: int
y: int
def __add__(self, cell: 'Cell'):
return Cell(self.x + cell.x,
self.y + cell.y)
def __sub__(self, cell: 'Cell'):
return Cell(self.x - cell.y,
self.y - cell.y)
def scale(self, scale: float):
return Cell(self.x * scale,
self.y * scale)
def cells_rectangle(width, height):
for y in range(height):
for x in range(width):
yield Cell(x, y)
def cell_center(cell):
return Point(cell.x + 0.5, cell.y + 0.5)
def cell_bounding_box(cell):
return BoundingBox(x_min=cell.x,
y_min=cell.y,
x_max=cell.x + 1,
y_max=cell.y + 1)
def cells_bounding_box(cells):
box = cell_bounding_box(cells[0])
for cell in cells[1:]:
box += cell_bounding_box(cell)
return box
def area_template(min_distance, max_distance, distance):
area = []
for dx in range(-max_distance, max_distance + 1):
for dy in range(-max_distance, max_distance + 1):
cell = Cell(dx, dy)
if min_distance <= distance(cell) <= max_distance:
area.append(cell)
return area
def area(topology, distance, min_distance, max_distance):
cache = [None] * topology.size()
template = area_template(min_distance, max_distance, distance)
for center, index in topology.indexes.items():
points = [center + point for point in template]
cache[index] = topology.area_indexes(points)
return cache
class Euclidean(BaseArea):
__slots__ = ()
def connectome(self, topology, min_distance, max_distance):
return area(topology, self.distance, min_distance, max_distance)
def distance(self, a, b=Cell(0, 0)):
return math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2)
class Manhattan(BaseArea):
__slots__ = ()
def connectome(self, topology, min_distance, max_distance):
return area(topology, self.distance, min_distance, max_distance)
def distance(self, a, b=Cell(0, 0)):
return abs(a.x-b.x) + abs(a.y-b.y)
class Ring(BaseArea):
__slots__ = ()
def connectome(self, topology, min_distance, max_distance):
return area(topology, self.distance, min_distance, max_distance)
def distance(self, a, b=Cell(0, 0)):
return max(abs(a.x-b.x), abs(a.y-b.y))
################################
# drawers
################################
@dataclasses.dataclass
class Sprite:
color: colors.Color = colors.BLACK
image: Image = dataclasses.field(default=None, init=False, compare=False)
def prepair(self, cell_size):
self.image = Image.new('RGBA', cell_size.xy, self.color.ints)
class Drawer(drawer.Drawer):
__slots__ = ('cell_size',)
def __init__(self, cell_size, **kwargs):
super().__init__(**kwargs)
self.cell_size = cell_size
def prepair_sprite(self, sprite):
sprite.prepair(self.cell_size)
def node_position(self, node, canvas_size):
return cell_center(node.coordinates) * self.cell_size - self.cell_size / 2
def calculate_canvas_size(self, nodes):
coordinates = [node.coordinates for node in nodes]
return (cells_bounding_box(coordinates).size * self.cell_size).round_up()
| [
"a.eletsky@gmail.com"
] | a.eletsky@gmail.com |
b8d13be6eb295ce9d73ae66e5400dde099535a2c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03946/s886596927.py | 8ddaedd848e91074fcbb5e84cb91d83065d340d1 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | n, t = map(int, input().split())
ns = list(map(int, input().split()))
min_n = 10 ** 10
max_n = 0
max_l = []
for i in ns[::-1]:
max_n = max(max_n, i)
max_l.append(max_n)
max_l.reverse()
l = []
for i in range(n):
l.append(ns[i]-min_n)
min_n = min(min_n, ns[i])
print(l.count(max(l))) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9e4820dd94bd2272bd3c98ea0ed4ad3f032a4622 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp-obt/sblp_ut=3.5_rd=0.8_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=25/params.py | fff0f4ebe57c15d0241b34e8b819f2e1e8e6ba77 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.602405',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.8',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 25,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
11d569aa09d84966613f28d3980d5e2949f5b07c | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/justdark_dml/dml-master/dml/CLUSTER/kmeans.py | 67e0ce8d2b50cfe1a715e5a08df442b1f2ffb503 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 1,095 | py | from __future__ import division
import numpy as np
import scipy as sp
import pylab as py
from scipy.cluster.vq import kmeans2,whiten
'''
the scipy contain a kmeans,so there is no need to write one
bu for the convenience of using,I pack it with my code
I don't know how to translate the input space to whitened space
so If you need please add white
'''
class KMEANSC:
def __init__(self,X,K):
self.X=np.array(X)
self.K=K
self.labels=[]
self.centroids=[]
pass
def train(self,white=False):
'''
each train change everything
'''
if (white):
self.centroids,self.labels=kmeans2(whiten(self.X),self.K,minit='random', missing='warn')
else:
self.centroids,self.labels=kmeans2(self.X,self.K,minit='random', missing='warn')
def result(self):
return self.centroids,self.labels
def bfWhiteCen(self):
''' if you use whiten on self.X in train,you need this to get the real controids
'''
Wcentroid=self.centroids
print Wcentroid
for i in range(self.K):
Wcentroid[i]=np.sum(self.X[self.labels==i],axis=0)/list(self.labels).count(i)
return Wcentroid | [
"659338505@qq.com"
] | 659338505@qq.com |
2fc943c950de622134f2630688a4ea910d09ef57 | 918963bcb425328076ce17400966378bd66f5e7e | /python-openshift-release/openshift_release/models/com_github_vfreex_release_apiserver_pkg_apis_art_v1alpha1_build_list.py | 9de3643f43abfbf8f53738b297933a91c21d3ece | [] | no_license | vfreex/release-apiserver | 884d5ce6c1ef936fc746b668884e8f97a27305d8 | a6e1df25a38f808fc194397fcd63628a999fed95 | refs/heads/master | 2023-03-09T19:05:38.011434 | 2020-05-21T09:25:47 | 2020-05-21T09:25:47 | 247,671,819 | 2 | 0 | null | 2023-02-24T23:14:38 | 2020-03-16T10:20:42 | Go | UTF-8 | Python | false | false | 7,822 | py | # coding: utf-8
"""
Api
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openshift_release.configuration import Configuration
class ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1Build]',
'kind': 'str',
'metadata': 'IoK8sApimachineryPkgApisMetaV1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList. # noqa: E501
:return: The items of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList. # noqa: E501
:rtype: list[ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1Build]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList.
:param items: The items of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList. # noqa: E501
:type: list[ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1Build]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList. # noqa: E501
:return: The metadata of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList. # noqa: E501
:rtype: IoK8sApimachineryPkgApisMetaV1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList.
:param metadata: The metadata of this ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList. # noqa: E501
:type: IoK8sApimachineryPkgApisMetaV1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ComGithubVfreexReleaseApiserverPkgApisArtV1alpha1BuildList):
return True
return self.to_dict() != other.to_dict()
| [
"yuxzhu@redhat.com"
] | yuxzhu@redhat.com |
c6444febe09bdfdf0db44534477486ada7928f63 | 286c7b7dd9bd48c73fd94f8e89bde99a8d3f74c5 | /modelscript/interfaces/modelc/execution.py | ac1e9752664b2e49d14a9f0f73e4a059fc6b1343 | [
"MIT"
] | permissive | ScribesZone/ModelScript | e7738471eff24a74ee59ec88d8b66a81aae16cdc | a36be1047283f2e470dc2dd4353f2a714377bb7d | refs/heads/master | 2023-03-18T02:43:57.953318 | 2021-03-08T15:26:40 | 2021-03-08T15:26:40 | 31,960,218 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,599 | py | # coding=utf-8
""""""
__all__ = (
'ExecutionContext'
)
import os
from typing import List, Any, Dict, Optional, ClassVar
from collections import OrderedDict
import argparse
# initialize the megamodel with metamodels and scripts
from modelscript.base.files import filesInTree
from modelscript.interfaces.modelc.options import getOptions
from modelscript.megamodels import Megamodel
from modelscript.base.issues import WithIssueList, OrderedIssueBoxList
class ExecutionContext(WithIssueList):
"""Execution context of modelscript session."""
args: ClassVar[List[str]]
"""The list of command line arguments"""
options: argparse.Namespace
"""The options derived from args."""
sourceMap: ClassVar[Dict[str, Optional['SourceFile']]]
"""For each source file name, the corresponding SourceFile
or None if there was an error
"""
issueBoxList: OrderedIssueBoxList
def __init__(self, args):
super(ExecutionContext, self).__init__()
assert args is not None
# extract the command options from the command line arguments
self.args = args
self.options = getOptions(args)
# self.hasManySourceFiles=len(self.options.sources)>=2
self.sourceMap = OrderedDict()
self._execute()
self.issueBoxList = OrderedIssueBoxList(
self.allSourceFileList
+ [self])
def _displayVersion(self):
print(('ModelScript - version %s' % Megamodel.model.version))
def _processSource(self, path):
"""Process a given source file or a given directory.
If a directory is given, then get all source files in
this directory recursively."""
if os.path.isdir(path):
# A directory is given: process all nested source files.
extensions = Megamodel.model.metamodelExtensions()
filenames = filesInTree(path, suffix=extensions)
if self.options.verbose:
print(('%s/ %i model files found.'
% (path, len(filenames))))
print((' '+'\n '.join(filenames)))
for filename in filenames:
self._processSource(filename)
else:
# Load a given source file
source = Megamodel.loadFile(path, self)
self.sourceMap[path] = source
def _execute(self):
# --- deal with --version -----------------------------------------
if self.options.version:
self._displayVersion()
# --- deal with --mode --------------------------------------------
print((
{'justAST': 'Checking syntax',
'justASTDep': 'Checking syntax and dependencies',
'full': 'Checking models'}
[self.options.mode]))
Megamodel.analysisLevel = self.options.mode
# --- deal with source files or source dir
for path in self.options.sources:
self._processSource(path)
@property
def validSourceFiles(self):
return (
s for s in list(self.sourceMap.values())
if s is not None)
@property
def nbIssues(self):
return self.issueBoxList.nbIssues
@property
def allSourceFileList(self):
"""
The list of all source files involved in this build,
directly or not. The list is in a topological order.
"""
return Megamodel.sourceFileList(
origins=self.validSourceFiles)
def label(self):
return 'executionContext'
def display(self, styled=True):
print((self.issueBoxList.str(styled=styled)))
# displayIssueBoxContainers(
# self.allSourceFileList+[self]
# )
# for source in self.allSourceFileList:
# print(source.issues.str(
# summary=False,
# styled=True,
# pattern='{origin}:{level}:{line}:{message}'))
# if self.hasIssues:
# print(self.issues.str(
# summary=False,
# styled=True,
# pattern='{origin}:{level}:{line}:{message}'
# ))
# # TODO:3 move this to issue.py
# def displayIssueBoxContainers(containerList):
# for container in containerList:
# if container.hasIssues:
# print(container.issues.str(
# summary=False,
# styled=True,
# pattern='{origin}:{level}:{line}:{message}'))
# else:
# if not isinstance(container, ExecutionContext):
# cprint(container.label+':'+'OK', 'green')
| [
"escribis@users.noreply.github.com"
] | escribis@users.noreply.github.com |
a0b3f3473e96a829362eb4a6e2795c317ea237ae | ae7ba9c83692cfcb39e95483d84610715930fe9e | /bmw9t/nltk/ch_three/34.py | 887616cb690d8b5f25ba28819605a36c6f85e6a4 | [] | no_license | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | # ◑ Write code to convert nationality adjectives like Canadian and Australian to their corresponding nouns Canada and Australia (see http://en.wikipedia.org/wiki/List_of_adjectival_forms_of_place_names).
import re
import nltk
import pycountry
countries = [country.name for country in pycountry.countries]
def convert(word):
"""converts an adjectival nationality to its corresponding noun form."""
# list of regex things to check
patterns = ['ese', 'ian', 'an', 'ean', 'n', 'ic', 'ern']
#list of suffixes for appending to country names that get damaged when they are split.
suffixes = ['a', 'o']
# for every potential way of forming a nationality adjective, test them.
for pattern in patterns:
tup = re.findall(r'^(.*)(' + pattern + ')', word)
#if the regex finds a pattern, set the country to the stem of the word.
if tup:
country = tup[0][0]
# check to see if the country is in the list of countries returned by pycountry. If it is, return it.
if country in countries:
return country
# if the stem is not a country, try adding suffixes to it to see if you can pull out a real country.
else:
for suffix in suffixes:
new_country = country + suffix
if new_country in countries:
return new_country
print(convert('Mexican'))
| [
"xenron@outlook.com"
] | xenron@outlook.com |
a87c4be49b0e91e03f99718dedf309a616007fbe | c1d5ca6194e4cf1bf3d3a3dfe758a95dfb87a8bc | /Python-Itertools-Itertools_Combinations_with_replacement.py | ee8e460ebbc0f7edab0aae542397cea4e79f5c83 | [] | no_license | maheshkrishnagopal/HackerRankSolutions | 6924b9ea92f86cab0885016482e08e5b4f801dad | ca16aa8fe726e4a9dac149d407cfde3620cdf96a | refs/heads/master | 2020-08-02T10:51:46.568749 | 2019-06-21T06:15:15 | 2019-06-21T06:15:15 | 211,324,575 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | # itertools.combinations_with_replacement(iterable, r)
# This tool returns length subsequences of elements from the input iterable allowing individual elements to be repeated more than once.
# Combinations are emitted in lexicographic sorted order. So, if the input iterable is sorted, the combination tuples will be produced in sorted order.
# Task
# You are given a string .
# Your task is to print all possible size replacement combinations of the string in lexicographic sorted order.
# Input Format
# A single line containing the string and integer value separated by a space.
# Constraints
# The string contains only UPPERCASE characters.
# Output Format
# Print the combinations with their replacements of string on separate lines.
# Sample Input
# HACK 2
# Sample Output
# AA
# AC
# AH
# AK
# CC
# CH
# CK
# HH
# HK
# KK
"""
-----------------------------------------------------------------------------------------------------------
"""
from itertools import combinations_with_replacement
string, num = input().split()
for i in combinations_with_replacement(sorted(string),int(num)):
print(''.join(i))
| [
"noreply@github.com"
] | maheshkrishnagopal.noreply@github.com |
ae2420c44c43955f3134b17593b131ef1ca3bda9 | 9522d6962376dccf073f59e447ce20c993dff5f2 | /src/api/views/articles.py | a66c0bcc5d9f03bb49410ea4f65f1accfc9292bf | [] | no_license | bolekhan1508/django_intro | 6f51085905306989f3fc86c72e01b752d1917d4e | 147bbeef2dc3f3c93771f757b44689a4103bf618 | refs/heads/master | 2022-10-30T09:06:46.476738 | 2020-06-18T10:02:21 | 2020-06-18T10:02:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import generics, mixins
from api.serializers.v1.article import ArticleSerializer, ArticleModelSerializer
from apps.articles.models import Article
class ArticleApiView(APIView):
def get(self, request, **kwargs):
serializer = ArticleModelSerializer(instance=Article.objects.last())
return Response(serializer.data)
def post(self, request, **kwargs):
serializer = ArticleModelSerializer(data=request.data)
if serializer.is_valid():
article = serializer.create(serializer.validated_data)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ArticleGenericView(mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):
queryset = Article.objects.all()
serializer_class = ArticleModelSerializer
| [
"odarchenko@ex.ua"
] | odarchenko@ex.ua |
209938778c9851b71332d8689c820720afece9d7 | 0add7953d3e3ce2df9e8265102be39b758579753 | /built-in/TensorFlow/Research/cv/image_classification/Cars_for_TensorFlow/automl/vega/algorithms/nas/sm_nas/smnas_trainer_callback.py | 8cfff6490cdcbc92ef01f25108da398c4f8e260f | [
"Apache-2.0",
"MIT"
] | permissive | Huawei-Ascend/modelzoo | ae161c0b4e581f8b62c77251e9204d958c4cf6c4 | df51ed9c1d6dbde1deef63f2a037a369f8554406 | refs/heads/master | 2023-04-08T08:17:40.058206 | 2020-12-07T08:04:57 | 2020-12-07T08:04:57 | 319,219,518 | 1 | 1 | Apache-2.0 | 2023-03-24T22:22:00 | 2020-12-07T06:01:32 | Python | UTF-8 | Python | false | false | 5,125 | py | """Trainer for SMNas."""
import logging
import os
import mmcv
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from vega.core.trainer.callbacks import Callback
from vega.core.common.class_factory import ClassFactory, ClassType
from vega.core.common.file_ops import FileOps
@ClassFactory.register(ClassType.CALLBACK)
class SMNasTrainerCallback(Callback):
"""Trainer for SMNas."""
disable_callbacks = ["ModelStatistics", "MetricsEvaluator", "ModelCheckpoint", "PerformanceSaver",
"LearningRateScheduler", "ProgressLogger", "ReportCallback"]
def __init__(self):
super(SMNasTrainerCallback, self).__init__()
self.alg_policy = None
def set_trainer(self, trainer):
"""Set trainer object for current callback."""
self.trainer = trainer
self.trainer._train_loop = self._train_process
self.cfg = self.trainer.config
self._worker_id = self.trainer._worker_id
self.gpus = self.cfg.gpus
if hasattr(self.cfg, "kwargs") and "smnas_sample" in self.cfg.kwargs:
self.sample_result = self.cfg.kwargs["smnas_sample"]
self.local_worker_path = self.trainer.get_local_worker_path()
self.output_path = self.trainer.local_output_path
config_path = os.path.join(self.local_worker_path, 'config.py')
with open(config_path, 'w') as f:
f.write(self.trainer.model.desc)
self.config_path = config_path
self.cost_value = self.trainer.model.cost if self.trainer.model is not None else 0.0
dir_path = os.path.dirname(os.path.abspath(__file__))
self._train_script = os.path.join(dir_path, 'tools/dist_train.sh')
self._eval_script = os.path.join(dir_path, 'tools/dist_test.sh')
self.epochs = self.cfg.epochs
def _train_process(self):
"""Process of train and test."""
logging.info("start training")
self._train()
torch.cuda.empty_cache()
logging.info("start evaluation")
performance = self._valid()
performance.append(self.cost_value)
self.save_performance(performance)
def _train(self):
"""Train the network."""
cmd = ['bash', self._train_script, self.config_path, str(self.gpus),
'--total_epochs', str(self.epochs),
'--work_dir', self.local_worker_path]
cmd_str = ''
for item in cmd:
cmd_str += (item + ' ')
logging.info(cmd_str)
os.system(cmd_str)
def _valid(self):
"""Get performance on validate dataset."""
checkpoint_path = os.path.join(self.local_worker_path, 'latest.pth')
eval_prefix = os.path.join(self.local_worker_path, 'eval.pkl')
cmd = ['bash', self._eval_script, self.config_path, checkpoint_path,
str(self.gpus),
'--out', eval_prefix, '--eval', 'bbox']
cmd_str = ''
for item in cmd:
cmd_str += (item + ' ')
logging.info(cmd_str)
os.system(cmd_str)
eval_file = os.path.join(self.local_worker_path, 'eval.pkl.bbox.json')
model_desc = mmcv.Config.fromfile(self.config_path)
try:
performance = self.coco_eval(
eval_file, model_desc.data.test.anno_file)
except BaseException:
performance = 0.0
return [performance]
def save_performance(self, performance):
"""Save performance results."""
if isinstance(performance, int) or isinstance(performance, float):
performance_dir = os.path.join(self.local_worker_path,
'performance')
if not os.path.exists(performance_dir):
FileOps.make_dir(performance_dir)
with open(os.path.join(performance_dir, 'performance.txt'),
'w') as f:
f.write("{}".format(performance))
elif isinstance(performance, list):
performance_dir = os.path.join(self.local_worker_path,
'performance')
if not os.path.exists(performance_dir):
FileOps.make_dir(performance_dir)
with open(os.path.join(performance_dir, 'performance.txt'),
'w') as f:
for p in performance:
if not isinstance(p, int) and not isinstance(p, float):
logging.error("performance must be int or float!")
return
f.write("{}\n".format(p))
def coco_eval(self, result_file, coco):
"""Eval result_file by coco."""
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
cocoEval = COCOeval(coco, coco_dets, 'bbox')
cocoEval.params.imgIds = img_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
return cocoEval.stats[0] * 100
| [
"1571856591@qq.com"
] | 1571856591@qq.com |
443d05667fa143590ce4377290bb9f55e10a6f94 | 70433b90af33ed71fc9ab6c7ba15fe1b1ec52a90 | /tests/validation/response/test_request_parameter_validation.py | 4a1f85474beb93ce6a2064d0611a4370fa5a897c | [
"MIT"
] | permissive | dhilton/flex | f6b54ae2c4c28e760fdcc02e00b7a4df420fba99 | e23e74e246f4ad89c8d8971b029dcdd2fa642526 | refs/heads/master | 2020-04-05T23:10:53.512712 | 2014-11-10T04:35:48 | 2014-11-10T04:35:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | import pytest
from flex.validation.response import (
validate_response,
)
from flex.error_messages import MESSAGES
from flex.constants import (
PATH,
QUERY,
STRING,
INTEGER,
)
from tests.factories import (
SchemaFactory,
ResponseFactory,
)
from tests.utils import assert_error_message_equal
def test_response_parameter_validation():
"""
Test that request validation does parameter validation. This is largely a
smoke test to ensure that parameter validation is wired into request
validation correctly.
"""
from django.core.exceptions import ValidationError
schema = SchemaFactory(
paths={
'/get/{id}/': {
'parameters': [
{
'name': 'id',
'in': PATH,
'description': 'id',
'required': True,
'type': STRING,
'format': 'uuid',
},
{
'name': 'page',
'in': QUERY,
'type': INTEGER,
},
],
'get': {
'responses': {200: {'description': "Success"}},
},
},
},
)
response = ResponseFactory(url='http://www.example.com/get/32/?page=abcd')
with pytest.raises(ValidationError) as err:
validate_response(
response,
paths=schema['paths'],
base_path=schema.get('base_path', ''),
context=schema,
inner=True,
)
assert 'request' in err.value.messages[0]
assert 'parameters' in err.value.messages[0]['request'][0][0]
assert 'path' in err.value.messages[0]['request'][0][0]['parameters'][0]
assert 'id' in err.value.messages[0]['request'][0][0]['parameters'][0]['path'][0]
assert 'format' in err.value.messages[0]['request'][0][0]['parameters'][0]['path'][0]['id'][0]
assert_error_message_equal(
err.value.messages[0]['request'][0][0]['parameters'][0]['path'][0]['id'][0]['format'][0],
MESSAGES['format']['invalid_uuid'],
)
assert 'query' in err.value.messages[0]['request'][0][0]['parameters'][0]
assert 'page' in err.value.messages[0]['request'][0][0]['parameters'][0]['query'][0]
assert 'type' in err.value.messages[0]['request'][0][0]['parameters'][0]['query'][0]['page'][0]
assert_error_message_equal(
err.value.messages[0]['request'][0][0]['parameters'][0]['query'][0]['page'][0]['type'][0],
MESSAGES['type']['invalid'],
)
| [
"piper@simpleenergy.com"
] | piper@simpleenergy.com |
b3b02cd1d0710b21962d9239ab72e90a5965cb4a | fdc0b72a3782a06952df4d723783dfa1bae65753 | /query_deluxe/models/query_deluxe.py | 4719ada08e5ea3a5d8cde3dbb94ce8c223b2cde6 | [] | no_license | Denbho/vendor_portal | 0878ad82bf3c40d38f6e123f6b25a358bfebce4f | 341a7ca77cbd310f3835d4b43de5012354a307c5 | refs/heads/main | 2023-04-19T21:26:56.115346 | 2021-05-17T04:16:53 | 2021-05-17T04:16:53 | 364,744,567 | 2 | 0 | null | 2021-05-06T04:34:51 | 2021-05-06T00:52:09 | Python | UTF-8 | Python | false | false | 3,605 | py | from odoo import api, fields, models, _
from odoo.exceptions import UserError
class QueryDeluxe(models.Model):
_name = "querydeluxe"
_description = "Postgres queries from Odoo interface"
_inherit = ['mail.thread', 'mail.activity.mixin']
tips = fields.Many2one('tipsqueries', string="Examples")
tips_description = fields.Text(related='tips.description')
rowcount = fields.Text(string='Rowcount')
html = fields.Html(string='HTML')
name = fields.Char(string='Type a query : ')
valid_query_name = fields.Char()
show_raw_output = fields.Boolean(string='Show the raw output of the query')
raw_output = fields.Text(string='Raw output')
def print_result(self):
return {
'name': _("Select orientation of the PDF's result"),
'view_mode': 'form',
'res_model': 'pdforientation',
'type': 'ir.actions.act_window',
'target': 'new',
'context': {
'default_query_name': self.valid_query_name
},
}
def copy_query(self):
if self.tips:
self.name = self.tips.name
def execute(self):
self.show_raw_output = False
self.raw_output = ''
self.rowcount = ''
self.html = '<br></br>'
self.valid_query_name = ''
if self.name:
self.tips = False
self.message_post(body=str(self.name))
headers = []
datas = []
try:
self.env.cr.execute(self.name)
except Exception as e:
raise UserError(e)
try:
no_fetching = ['update', 'delete', 'create', 'insert', 'alter', 'drop']
max_n = len(max(no_fetching))
is_insides = [(o in self.name.lower().strip()[:max_n]) for o in no_fetching]
if True not in is_insides:
headers = [d[0] for d in self.env.cr.description]
datas = self.env.cr.fetchall()
except Exception as e:
raise UserError(e)
rowcount = self.env.cr.rowcount
self.rowcount = "{0} row{1} processed".format(rowcount, 's' if 1 < rowcount else '')
if headers and datas:
self.valid_query_name = self.name
self.raw_output = datas
header_html = "".join(["<th style='border: 1px solid'>"+str(header)+"</th>" for header in headers])
header_html = "<tr>"+"<th style='background-color:white !important'/>"+header_html+"</tr>"
body_html = ""
i = 0
for data in datas:
i += 1
body_line = "<tr>"+"<td style='border-right: 3px double; border-bottom: 1px solid; background-color: yellow'>{0}</td>".format(i)
for value in data:
body_line += "<td style='border: 1px solid; background-color: {0}'>{1}</td>".format('cyan' if i%2 == 0 else 'white', str(value) if (value is not None) else '')
body_line += "</tr>"
body_html += body_line
self.html = """
<table style="text-align: center">
<thead style="background-color: lightgrey">
{0}
</thead>
<tbody>
{1}
</tbody>
</table>
""".format(header_html, body_html)
class TipsQueries(models.Model):
_name = 'tipsqueries'
_description = "Tips for queries"
_order = 'create_date desc, id'
name = fields.Char(string='Query', required=True)
description = fields.Text(string="Description")
| [
"dennisboysilva@gmail.com"
] | dennisboysilva@gmail.com |
0f9430ff53318e3f34a0143b0e232380dcbd2abf | eb722922339781fa6bd9937e69383fcd06256738 | /day1/kapua-python-client/swagger_client/models/access_permission.py | 10bb5782a236f4f03f16b8c20aed316153d4307e | [
"MIT"
] | permissive | mrsrinivas/diec | 6a0c5da26ff23170b71217bfbc810bb98a897a83 | ae9a5203b506d5cc18cb381666351bf9ce6b9b6c | refs/heads/master | 2021-01-05T05:41:19.394898 | 2020-01-15T06:24:33 | 2020-01-15T06:24:33 | 240,901,175 | 1 | 0 | MIT | 2020-02-16T13:59:53 | 2020-02-16T13:59:52 | null | UTF-8 | Python | false | false | 7,327 | py | # coding: utf-8
"""
Eclipse Kapua REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.permission import Permission # noqa: F401,E501
class AccessPermission(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'access_info_id': 'str',
'type': 'str',
'permission': 'Permission',
'scope_id': 'str',
'id': 'str',
'created_on': 'datetime',
'created_by': 'str'
}
attribute_map = {
'access_info_id': 'accessInfoId',
'type': 'type',
'permission': 'permission',
'scope_id': 'scopeId',
'id': 'id',
'created_on': 'createdOn',
'created_by': 'createdBy'
}
def __init__(self, access_info_id=None, type=None, permission=None, scope_id=None, id=None, created_on=None, created_by=None): # noqa: E501
"""AccessPermission - a model defined in Swagger""" # noqa: E501
self._access_info_id = None
self._type = None
self._permission = None
self._scope_id = None
self._id = None
self._created_on = None
self._created_by = None
self.discriminator = None
if access_info_id is not None:
self.access_info_id = access_info_id
if type is not None:
self.type = type
if permission is not None:
self.permission = permission
if scope_id is not None:
self.scope_id = scope_id
if id is not None:
self.id = id
if created_on is not None:
self.created_on = created_on
if created_by is not None:
self.created_by = created_by
@property
def access_info_id(self):
"""Gets the access_info_id of this AccessPermission. # noqa: E501
:return: The access_info_id of this AccessPermission. # noqa: E501
:rtype: str
"""
return self._access_info_id
@access_info_id.setter
def access_info_id(self, access_info_id):
"""Sets the access_info_id of this AccessPermission.
:param access_info_id: The access_info_id of this AccessPermission. # noqa: E501
:type: str
"""
self._access_info_id = access_info_id
@property
def type(self):
"""Gets the type of this AccessPermission. # noqa: E501
:return: The type of this AccessPermission. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this AccessPermission.
:param type: The type of this AccessPermission. # noqa: E501
:type: str
"""
self._type = type
@property
def permission(self):
"""Gets the permission of this AccessPermission. # noqa: E501
:return: The permission of this AccessPermission. # noqa: E501
:rtype: Permission
"""
return self._permission
@permission.setter
def permission(self, permission):
"""Sets the permission of this AccessPermission.
:param permission: The permission of this AccessPermission. # noqa: E501
:type: Permission
"""
self._permission = permission
@property
def scope_id(self):
"""Gets the scope_id of this AccessPermission. # noqa: E501
:return: The scope_id of this AccessPermission. # noqa: E501
:rtype: str
"""
return self._scope_id
@scope_id.setter
def scope_id(self, scope_id):
"""Sets the scope_id of this AccessPermission.
:param scope_id: The scope_id of this AccessPermission. # noqa: E501
:type: str
"""
self._scope_id = scope_id
@property
def id(self):
"""Gets the id of this AccessPermission. # noqa: E501
:return: The id of this AccessPermission. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this AccessPermission.
:param id: The id of this AccessPermission. # noqa: E501
:type: str
"""
self._id = id
@property
def created_on(self):
"""Gets the created_on of this AccessPermission. # noqa: E501
:return: The created_on of this AccessPermission. # noqa: E501
:rtype: datetime
"""
return self._created_on
@created_on.setter
def created_on(self, created_on):
"""Sets the created_on of this AccessPermission.
:param created_on: The created_on of this AccessPermission. # noqa: E501
:type: datetime
"""
self._created_on = created_on
@property
def created_by(self):
"""Gets the created_by of this AccessPermission. # noqa: E501
:return: The created_by of this AccessPermission. # noqa: E501
:rtype: str
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""Sets the created_by of this AccessPermission.
:param created_by: The created_by of this AccessPermission. # noqa: E501
:type: str
"""
self._created_by = created_by
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AccessPermission, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AccessPermission):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | mrsrinivas.noreply@github.com |
0da4a4157e589feceb1276afdc4affd5eff7eb9c | 58ca273d2a9ee7f75b4bde3990513f74441143f0 | /Python/Cellular Automata and FSMs/Turing Machine 1.0.py | 253213abdb96bfc82a197a43c610dfd0947ce9bc | [] | no_license | Al153/Programming | 6db4b3c9d34747e1248aed2b0ee7fb7a35cef8d2 | 26de0b5607c6329bed2d6454090f00b098bc837f | refs/heads/master | 2021-04-12T04:13:00.945021 | 2019-07-12T17:47:08 | 2019-07-12T17:47:08 | 12,439,227 | 2 | 2 | null | 2014-08-13T19:21:46 | 2013-08-28T16:47:08 | Game Maker Language | UTF-8 | Python | false | false | 1,414 | py | #Turing machine I
def str_list(a):
b =[]
for i in range(len(a)):
c = a[i]
b.append (str(c))
return b
R,r = 1,1
L,l = -1,-1
HALT, Halt, halt = 'halt','halt','halt'
tape = [0,0,1,0,0,0,1,1,1,0,1,0,1,1,0,1,0,0,0]
program = (
([R,0,0],[R,1,1]),
([R,0,1],[L,0,2]),
([R,1,3],[R,1,3]),
([R,0,4],[L,0,2]),
([R,0,5],[L,0,2]),
([halt,0,0],[L,0,2]),
)
done = 0
state = 0
pointer = 0
while done == 0:
data = tape[pointer]
stateTemp = program[state][data]
if stateTemp[0] == 'halt': #Halt sequence
print 'HALT'
done = 1
else:
printList = str_list(tape)
state = stateTemp[2]
tape[pointer] = stateTemp[1]
stateTemp2 = str(stateTemp[1])
stateTemp2 = '[' + stateTemp2 + ']'
printList[pointer] = stateTemp2
pointer = pointer + stateTemp[0]
if pointer == -1: #Extensision of tape
tape.append(0)
for i in range (len(tape)-1):
t = len(tape)-(i+1)
tape[t] = tape[t-1]
pointer = 0
tape[0] = 0
if pointer > (len(tape) -1):
tape.append(0)
go = raw_input()
printList = ''.join(printList)
print printList
| [
"al.taylor1105@gmail.com"
] | al.taylor1105@gmail.com |
4d76334adfcd5e688e9261bdf384c7c71abf7550 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/pointcloud/hoverlabel/font/_size.py | 0f201eeb18dd469693d4636f5f0b526e02c1f9b0 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 509 | py | import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="pointcloud.hoverlabel.font", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", 1),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
f7f6381a89dd15a2e03160b3ce89d1620f1846dd | 38382e23bf57eab86a4114b1c1096d0fc554f255 | /hazelcast/protocol/codec/map_remove_codec.py | 61850d484dc922697b6864594cb5d06d9be6f29c | [
"Apache-2.0"
] | permissive | carbonblack/hazelcast-python-client | e303c98dc724233376ab54270832bfd916426cea | b39bfaad138478e9a25c8a07f56626d542854d0c | refs/heads/gevent-3.12.3.1 | 2023-04-13T09:43:30.626269 | 2020-09-18T17:37:17 | 2020-09-18T17:37:17 | 110,181,474 | 3 | 1 | Apache-2.0 | 2020-12-01T17:45:42 | 2017-11-10T00:21:55 | Python | UTF-8 | Python | false | false | 1,196 | py | from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.codec.map_message_type import *
REQUEST_TYPE = MAP_REMOVE
RESPONSE_TYPE = 105
RETRYABLE = False
def calculate_size(name, key, thread_id):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += calculate_size_data(key)
data_size += LONG_SIZE_IN_BYTES
return data_size
def encode_request(name, key, thread_id):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, key, thread_id))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_data(key)
client_message.append_long(thread_id)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
if not client_message.read_bool():
parameters['response'] = to_object(client_message.read_data())
return parameters
| [
"arslanasim@gmail.com"
] | arslanasim@gmail.com |
e46b45ed01d05ceb5c23f7d30e315dacbc49ecb7 | 0c6990136391d72d3768d5a8a4a6919bd2f6ce6a | /0x16-api_advanced/2-recurse.py | 3de8da02f221e1d898e945b3e379747e81ed5132 | [] | no_license | s0m35h1t/holberton-system_engineering-devops | eef99bcf0297938a20470a72b12e9d603c6153ab | 3aea10e71c49b2a97c7ed02bfae2231fcede1a92 | refs/heads/master | 2020-07-24T02:51:02.279869 | 2020-05-28T22:33:31 | 2020-05-28T22:33:31 | 207,779,574 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | #!/usr/bin/python3
"""
Define: recurse function
"""
import requests
def recurse(subreddit, hot_list=[], after=None):
"""get a list of all hot post titles for a subreddit
Arguments:
subreddit, hot_list, after
Returns:
(list)
"""
if subreddit is None or type(subreddit) is not str:
return None
url = "http://www.reddit.com/r/{}/hot.json".format(subreddit)
headers = {
'User-Agent': 'Python/requests:api.advanced:v1.0.0 (by /u/aleix)'}
params = {"after": after, "limit": 100}
req = requests.get(url, headers=headers, params=params).json()
after = req.get('data', {}).get('after', None)
posts = req.get('data', {}).get('children', None)
if posts is None or (len(posts) > 0 and posts[0].get('kind') != 't3'):
if len(hot_list) == 0:
return None
return hot_list
else:
for post in posts:
hot_list.append(post.get('data', {}).get('title', None))
if after is None:
if len(hot_list) == 0:
return None
return hot_list
else:
return recurse(subreddit, hot_list, after)
| [
"adib.grouz@gmail.com"
] | adib.grouz@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.