blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
09e4f881f8f065510e9a5f1375a9ab2d0bc39cd9
|
1aa99ce0775508c3f2bbe321874c24957da5e880
|
/python/pyspark/sql/tests/test_group.py
|
6de1b8ea0b3cea5cfba394a2910164a781d09d46
|
[
"CDDL-1.1",
"CC0-1.0",
"Apache-2.0",
"BSD-3-Clause",
"MPL-1.1",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"Python-2.0",
"CDDL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LicenseRef-scancode-other-copyleft",
"CPL-1.0",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"CC-BY-SA-3.0",
"CC-PDDC",
"LicenseRef-scancode-unicode",
"LicenseRef-scancode-generic-cla",
"GPL-2.0-only",
"LicenseRef-scancode-free-unknown",
"LGPL-2.0-or-later",
"EPL-1.0",
"Classpath-exception-2.0",
"GCC-exception-3.1",
"NAIST-2003",
"LicenseRef-scancode-unknown"
] |
permissive
|
zzvara/spark-dynamic
|
c06a8b885646d9e611cdca3591824fcf0fa0ccc2
|
00b4a8644ca89789af1fa47fa6ed871ad902154e
|
refs/heads/master
| 2022-12-14T12:57:53.236482
| 2019-05-24T13:40:12
| 2019-05-27T15:31:03
| 96,672,852
| 3
| 0
|
Apache-2.0
| 2022-11-16T11:37:30
| 2017-07-09T09:03:45
|
Scala
|
UTF-8
|
Python
| false
| false
| 1,818
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql import Row
from pyspark.testing.sqlutils import ReusedSQLTestCase
class GroupTests(ReusedSQLTestCase):
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approx_count_distinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_group import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
[
"gurwls223@apache.org"
] |
gurwls223@apache.org
|
86a788c889be2a5e5552a4c61cb4670d20fc02dd
|
07c1d8eb58b34a8c17c6a9deef73094b6e077b4d
|
/action/action_buddy_contact_progress.py
|
5720d3f3b06bc1d21dbf391caf352666d52bc449
|
[] |
no_license
|
robot-nan/GameLogParse
|
061f8d0448c5945bec61b55380d9f2cd883defcf
|
151f5dd167b106640cd178373a59b2458e43d80e
|
refs/heads/master
| 2021-11-07T21:27:22.354060
| 2015-09-23T15:32:32
| 2015-09-23T15:32:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
# -*- coding:utf-8 -*-
"""
好友 与好友通讯进度奖励
"""
from action import action_base
from util import game_define
def log(user, add_free_draw, item_str):
"""
输出日志
"""
action = game_define.EVENT_ACTION_REWARD_BUDDY_CONTACT
cur_free_draw = user.player.get_free_draw_material()
log_lst = action_base.log_base(user)
log_lst.append(str(action))
log_lst.append(str(add_free_draw))
log_lst.append(str(cur_free_draw))
log_lst.append(str(item_str))
log_str = '$$'.join(log_lst)
return log_str
def parse(log_part_lst):
"""
解析
"""
result = dict()
result['action'] = int(log_part_lst[0])
result['add_free_draw'] = int(log_part_lst[1])
result['cur_free_draw'] = int(log_part_lst[2])
result['add_item_list'] = action_base.list_parse(log_part_lst[3])
result['old_free_draw'] = result['cur_free_draw'] - result['add_free_draw']
return result
|
[
"a.robot.n@gmail.com"
] |
a.robot.n@gmail.com
|
033abe55ea3aeb335fead36f85e46744aa5f66d3
|
87119ec9cea61be175f2a1f16f0e37d060cde9af
|
/django/django_celery/myproject/celery_config/apps.py
|
d21110a7e22dfd38681d3edc22dc2af5737e15e9
|
[] |
no_license
|
atkins126/sample_nullpobug
|
bce9c1bf2a31921ac665a18dc2a62be3bdef493e
|
b2ba65f42f717f0ceb2cf14fe28e90c460bfde87
|
refs/heads/master
| 2023-02-16T11:37:05.290069
| 2021-01-18T14:43:40
| 2021-01-18T14:43:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
from django.apps import AppConfig
class CeleryConfig(AppConfig):
name = 'celery_config'
def ready(self):
from myproject.celery_app import get_celery_app
get_celery_app()
|
[
"tokibito@gmail.com"
] |
tokibito@gmail.com
|
a7b7f440ee7dbc2c11a1270096228f3417cc1960
|
35dbd536a17d7127a1dd1c70a2903ea0a94a84c2
|
/src/sentry/services/hybrid_cloud/lost_password_hash/impl.py
|
e4a57fc261e32c357c600b8110580aea07c1721a
|
[
"Apache-2.0",
"BUSL-1.1"
] |
permissive
|
nagyist/sentry
|
efb3ef642bd0431990ca08c8296217dabf86a3bf
|
d9dd4f382f96b5c4576b64cbf015db651556c18b
|
refs/heads/master
| 2023-09-04T02:55:37.223029
| 2023-01-09T15:09:44
| 2023-01-09T15:09:44
| 48,165,782
| 0
| 0
|
BSD-3-Clause
| 2022-12-16T19:13:54
| 2015-12-17T09:42:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
import datetime
from sentry.models import LostPasswordHash
from sentry.services.hybrid_cloud.lost_password_hash import (
APILostPasswordHash,
LostPasswordHashService,
)
class DatabaseLostPasswordHashService(LostPasswordHashService):
def get_or_create(
self,
user_id: int,
) -> APILostPasswordHash:
# NOTE(mattrobenolt): Some security people suggest we invalidate
# existing password hashes, but this opens up the possibility
# of a DoS vector where then password resets are continually
# requested, thus preventing someone from actually resetting
# their password.
# See: https://github.com/getsentry/sentry/pull/17299
password_hash, created = LostPasswordHash.objects.get_or_create(user_id=user_id)
if not password_hash.is_valid():
password_hash.date_added = datetime.datetime.now()
password_hash.set_hash()
password_hash.save()
return self.serialize_lostpasswordhash(password_hash)
def close(self) -> None:
pass
|
[
"noreply@github.com"
] |
nagyist.noreply@github.com
|
1d300458ff0cf33fd7c879e31a0162efaa72ae1c
|
e1efc8e0b0e4629dea61504fbc816c0527691bd9
|
/6.redis/redis7-数据类型内部编码规则.py
|
d206eee2aea9fb4c23d71e8adf6c329ce0a4e930
|
[] |
no_license
|
xiongmengmeng/xmind-technology
|
2bb67a0bf92cfd660cac01f8ab3a2454423ccba5
|
e2fdb6987ef805a65f0a4feb52d84383853f4b77
|
refs/heads/main
| 2023-07-31T07:10:29.868120
| 2021-09-11T08:18:17
| 2021-09-11T08:18:17
| 307,636,242
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,377
|
py
|
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="redis"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("hash内部编码规则")
r2=s2.getRootTopic()
r2.setTitle("hash内部编码规则")
content={
'内部编码方式':[
'ziplist',
'dict'
],
'ziplist':[
'紧凑的编码格式,牺牲了部分读取性能以换取极高的空间利用率,适合在元素少时使用',
{'结构':[
{'zlbytes':[
'4字节',
'记录压缩列表总共占用的字节数'
]},
{'zltail':[
'4字节',
'定位list的末尾节点',
'最后一项(entry)在ziplist中的偏移字节数',
'方便尾端快速地执行push或pop操作'
]},
{'zllen':[
'2字节',
'记录ziplist中数据项(entry)的个数'
]},
{'zlentry':[
'存放真实数据,长度不定',
{'4部分':[
'prerawlen:前一个entry的数据长度',
'len:entry中数据的长度)',
'data:真实数据存储',
]}
]},
{'zlend':[
'1字节',
'结束标记,值固定为255'
]}
]}
],
'dict':[
'散列表,redis中哈希表称为dict',
'O(1)时间复杂度的赋值取值',
{'dict':[
{'type':[
]},
{'privdata':[
]},
{'ht[2]':[
'采用双哈希表,用来扩容',
{'dictht[0]':[
'table:数组,数组的节点为dictEntry',
'size:数组长度',
'sizemask:数组长度-1',
'used:已存节点'
]}
]},
{'rehashidx':[
]}
]},
{'dictEntry':[
'key',
'value',
'next'
]}
],
'ziplist转为dict的条件':[
{'hash-max-ziplist-entries=512':[
'ziplist元素个数超过512'
]},
{'hash-max-ziplist-value=64':[
'单个元素大小超过64byte'
]}
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
|
[
"xiongmengmeng@qipeipu.com"
] |
xiongmengmeng@qipeipu.com
|
e3dcddf28d9cfb6ce8a4fe41d148715f88134be0
|
ffc3cf8a1ed64a86a8e92f738a2bf4b10afc63ac
|
/chainercv/transforms/image/random_rotate.py
|
64977ab8c4718cf763aefa775923ffee291960c9
|
[
"MIT"
] |
permissive
|
lichnak/chainercv
|
04be206b5bda2e601a86b6e7a8684ba1b03698ee
|
845a600dd8722788233f2b8e5085a44790bb2be4
|
refs/heads/master
| 2020-05-23T04:47:12.531295
| 2017-03-12T11:36:58
| 2017-03-12T11:36:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 779
|
py
|
import numpy as np
def random_rotate(img, return_rotation=False):
"""Randomly rotate images by 90, 180, 270 or 360 degrees.
Args:
img (numpy.ndarray): Arrays that
are flipped.
return_rotation (bool): returns information of rotation.
Returns:
If :obj:`return_rotation = True`, return tuple of the transformed
array and an integer that represents number of times the array
is rotated by 90 degrees.
If :obj:`return_rotation = False`, return the transformed array
only.
"""
k = np.random.randint(4)
img = np.transpose(img, axes=(1, 2, 0))
img = np.rot90(img, k)
img = np.transpose(img, axes=(2, 0, 1))
if return_rotation:
return img, k
else:
return img
|
[
"yuyuniitani@gmail.com"
] |
yuyuniitani@gmail.com
|
c932bfbb92d2084d65f33c7bad9db63a939d9d64
|
81db4221ab007659d4f117c9320c28c00b0902a7
|
/python_repos.py
|
02755831cd6da4502e4b411b6b87f57e56f6fc1d
|
[] |
no_license
|
thewchan/python_data_viz
|
69f8c0a502a9ec5ee44b78e1d2db19c8f4fdf9ba
|
3226bd133a72fc11e8ee09775665a509b6a5efba
|
refs/heads/master
| 2022-04-16T13:48:55.610209
| 2020-04-14T16:15:13
| 2020-04-14T16:15:13
| 255,659,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
import requests
# Make an API call and store the response.
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'
headers = {'Accept': 'application/vnd.github.v3+json'}
r = requests.get(url, headers=headers)
print(f"Status code: {r.status_code}")
# Store API response in a variable.
response_dict = r.json()
print(f"Total repositories: {response_dict['total_count']}")
# Explore information about the repositories.
repo_dicts = response_dict['items']
print(f"Repositories returned {len(repo_dicts)}")
print("\nSelected information about each repository:")
for repo_dict in repo_dicts:
print(f"Name: {repo_dict['name']}")
print(f"Owner {repo_dict['owner']['login']}")
print(f"Stars: {repo_dict['stargazers_count']}")
print(f"Repository: {repo_dict['html_url']}")
print(f"Description: {repo_dict['description']}")
|
[
"thewchan@gmail.com"
] |
thewchan@gmail.com
|
86053fa3163ebbc4322bb8e014523f01fd9291b8
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_266/ch59_2019_03_29_21_49_10_038358.py
|
8cb109d5a7ec70aed36067d83e642eb2d9a75f9e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
def conta_a(palavra):
i=0
contador = 0
while i<len(palavra):
if palavra[i] == 'a':
contador+=1
i+=1
return contador
|
[
"you@example.com"
] |
you@example.com
|
8ebfb829b1d0def60e4c43777cb8f3bf68601fa8
|
4c458854c0b5672a098b496d871a2b4f1d8e828d
|
/tools/py_bulit_in/getattr_module.py
|
4c2d67c27b0d03499119524cbf097660dd38e9c8
|
[] |
no_license
|
claire1234995/code
|
ae918aebd0fb87f50d1ac0ee434e4976e8682b23
|
1bb9f5aaad1ac801e912cd13537de2ebfe9dcb1c
|
refs/heads/master
| 2022-02-20T05:28:40.303611
| 2019-10-11T05:28:11
| 2019-10-11T05:28:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from importlib import import_module
defa = import_module('.'.join(["default"]))
# gettattr(defa, 'info') 是 a function
attr = getattr(defa, 'info')
print(attr())
|
[
"mxxhcm@gmail.com"
] |
mxxhcm@gmail.com
|
4b3f4ce7d3466af21161b49b5de4bb3c3dae016e
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/p38a_input/L2Z/2Z-2Y_wat_20Abox/set_1ns_equi.py
|
caf34fdac8c7fd631e0581c3c13b6dd1db8f0e62
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/p38a/L2Z/wat_20Abox/ti_one-step/2Z_2Y/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_pbs = filesdir + 'temp_1ns_equi.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../2Z-2Y_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
178cb057717e1ae8d262ad395a5abd1a2506036e
|
71ad2a6587cc7c0a7149465287b2659d81f406e7
|
/morpheus_chair_pkg/scripts/simple_firmata.py
|
d282b902a57547d624cac95b58488394e34f9c8c
|
[] |
no_license
|
ArifSohaib/morpheus_chair_arduino
|
389a091ad00535992260ed8eeb6d897d33c08010
|
be4e4892a87f09cc86c8832a12b2ccc06172756f
|
refs/heads/master
| 2020-04-29T09:02:15.593714
| 2019-12-07T05:35:10
| 2019-12-07T05:35:10
| 176,008,958
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,531
|
py
|
from pyfirmata import Arduino, util
import os
if os.name == 'nt':
from pynput import keyboard
import time
"""defining pins"""
#define ENB 5
#define IN1 7
#define IN2 8
#define IN3 9
#define IN4 11
#define ENA 6
ENB = 5
IN1 = 7
IN2 = 8
IN3 = 9
IN4 = 11
ENA = 6
def forward():
"""
ORIGINAL function
void forward(){
digitalWrite(ENA, HIGH); //enable L298n A channel
digitalWrite(ENB, HIGH); //enable L298n B channel
digitalWrite(IN1, HIGH); //set IN1 hight level
digitalWrite(IN2, LOW); //set IN2 low level
digitalWrite(IN3, LOW); //set IN3 low level
digitalWrite(IN4, HIGH); //set IN4 hight level
Serial.println("Forward"); //send message to serial monitor
}"""
board.digital[ENA].write(1)
board.digital[ENB].write(1)
board.digital[IN1].write(1)
board.digital[IN2].write(0)
board.digital[IN3].write(0)
board.digital[IN4].write(1)
def back():
"""
void back(){
digitalWrite(ENA, HIGH);
digitalWrite(ENB, HIGH);
digitalWrite(IN1, LOW);
digitalWrite(IN2, HIGH);
digitalWrite(IN3, HIGH);
digitalWrite(IN4, LOW);
Serial.println("Back");
}"""
board.digital[ENA].write(1)
board.digital[ENB].write(1)
board.digital[IN1].write(0)
board.digital[IN2].write(1)
board.digital[IN3].write(1)
board.digital[IN4].write(0)
def left():
"""
void left(){
digitalWrite(ENA, HIGH);
digitalWrite(ENB, HIGH);
digitalWrite(IN1, LOW);
digitalWrite(IN2, HIGH);
digitalWrite(IN3, LOW);
digitalWrite(IN4, HIGH);
Serial.println("Left");
}"""
board.digital[ENA].write(1)
board.digital[ENB].write(1)
board.digital[IN1].write(0)
board.digital[IN2].write(1)
board.digital[IN3].write(0)
board.digital[IN4].write(1)
def right():
"""
void right(){
digitalWrite(ENA, HIGH);
digitalWrite(ENB, HIGH);
digitalWrite(IN1, HIGH);
digitalWrite(IN2, LOW);
digitalWrite(IN3, HIGH);
digitalWrite(IN4, LOW);
Serial.println("Right");
}
"""
board.digital[ENA].write(1)
board.digital[ENB].write(1)
board.digital[IN1].write(1)
board.digital[IN2].write(0)
board.digital[IN3].write(1)
board.digital[IN4].write(0)
def stop():
board.digital[ENA].write(0)
board.digital[ENB].write(0)
board.digital[IN1].write(0)
board.digital[IN2].write(0)
board.digital[IN3].write(0)
board.digital[IN4].write(0)
def on_press(key):
try:
print('alphanumeric key {0} pressed'.format(key.char))
if key.char == 'w':
forward()
elif key.char == 's':
back()
elif key.char == 'a':
left()
elif key.char == 'd':
right()
elif key.char == 'x':
stop();
board.exit()
return False
except AttributeError:
try:
board.exit()
except:
print("board not connected")
print('special key {0} pressed'.format(
key))
def on_release(key):
print('{0} released'.format(key))
if key == keyboard.Key.esc:
# Stop listener
return False
#define globals
try:
board = Arduino("COM9")
except:
board = Arduino("/dev/ttyACM0")
iterator = util.Iterator(board)
iterator.start()
def main():
# connect(board)
# Collect events until released
with keyboard.Listener(on_press=on_press,on_release=on_release) as listener:listener.join()
# import keyboard # using module keyboard
# while True: # making a loop
# try: # used try so that if user pressed other than the given key error will not be shown
# if keyboard.is_pressed('q'): # if key 'q' is pressed
# print('You Pressed A Key!')
# try:
# board.exit()
# except:
# print("not connected")
# break # finishing the loop
# else:
# pass
# except:
# try:
# board.exit()
# except:
# print("not connected")
# break # if user pressed a key other than the given key the loop will break
def main_simple():
import time
forward()
time.sleep(10)
back()
time.sleep(10)
stop()
if __name__ == "__main__":
if os.name != 'nt':
main_simple()
else:
main()
|
[
"arif_sohaib@outlook.com"
] |
arif_sohaib@outlook.com
|
90aab359ae9197e2d3629214164cb1cc90ca7031
|
b86608b6de44642ed29cd88bba4acbbdd31a0b04
|
/examples/bq_file_load_benchmark/tests/test_parquet_util.py
|
55fff7d7e6872a3077acaea7ee3a1e2d5624fa6b
|
[
"Apache-2.0"
] |
permissive
|
MCRen88/professional-services
|
a514a926dd23e3c4ac6dadb656faed22c3d91d5d
|
d7bc3b194159ffdb149c9507890bb1fbae7a8d88
|
refs/heads/master
| 2020-12-15T16:38:17.860940
| 2020-01-06T19:29:47
| 2020-01-06T19:29:47
| 235,181,173
| 1
| 0
|
Apache-2.0
| 2020-01-20T19:26:15
| 2020-01-20T19:26:14
| null |
UTF-8
|
Python
| false
| false
| 1,423
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pyarrow as pa
import unittest
from google.cloud import bigquery
from bq_file_load_benchmark.generic_benchmark_tools import parquet_util
class TestParquetUtil(unittest.TestCase):
"""Tests functionality of load_benchmark_tools.parquet_util.ParquetUtil.
Attributes:
parquet_util(load_benchmark_tools.ParquetUtil): parquet utility class to be
tested.
"""
def setUp(self):
"""Sets up resources for tests.
"""
bq_schema = [
bigquery.SchemaField('string1', 'STRING', 'REQUIRED'),
bigquery.SchemaField('numeric1', 'NUMERIC', 'REQUIRED')
]
self.parquet_util = parquet_util.ParquetUtil(
bq_schema=bq_schema
)
def test_get_parquet_translated_schema(self):
"""Tests ParquetUtil.get_pa_translated_schema().
Tests ParquetUtil's ability to translate a BigQuery schema to PyArrow
schema for parquet.
Returns:
True if test passes, else False.
"""
parquet_translated_schema = self.parquet_util.get_pa_translated_schema()
expected_pa_schema = pa.schema([
pa.field('string1', pa.string()),
pa.field('numeric1', pa.int64())
])
assert parquet_translated_schema == expected_pa_schema
|
[
"jferriero@google.com"
] |
jferriero@google.com
|
b5ce1f91b4d8d62e404127000eb05f3225a5afd7
|
04c343a4b6ba0cee3873a17833ac910e930e27ce
|
/goals/migrations/0053_add_field_theme_to_sector.py
|
16cdfaaa0b865ae88c90451028af51503525f230
|
[
"Unlicense"
] |
permissive
|
tehamalab/dgs
|
f10f45440494aa3404da068cfef69ad2f7385033
|
46de3cdaced5e4afef46fa46c7a3303d53df0da0
|
refs/heads/master
| 2021-03-16T05:13:57.548503
| 2017-11-24T05:45:49
| 2017-11-24T05:46:08
| 93,390,835
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-28 05:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goals', '0052_add_field_caption_on_plan'),
]
operations = [
migrations.AddField(
model_name='sector',
name='themes',
field=models.ManyToManyField(related_name='sectors', to='goals.Theme', verbose_name='Themes'),
),
]
|
[
"bmachaku@gmail.com"
] |
bmachaku@gmail.com
|
125065bc258175c73d7cc32c947d987f6a4b70e0
|
e5de11874e3d68ebd48f22d75d0c5b37ed058ae9
|
/src/env/dm_control/dm_control/suite/common/settings.py
|
b6ace94933a01f6f632e88350d11796f7e6ec6e9
|
[
"Apache-2.0"
] |
permissive
|
xiaolonw/policy-adaptation-during-deployment
|
4436e568439e3e7df48b7ed529304e9d11e979f0
|
e3f98eef83608bb78a55a63405e17106e4dee814
|
refs/heads/master
| 2022-11-17T19:09:00.967055
| 2020-07-09T00:34:10
| 2020-07-09T00:34:10
| 279,119,345
| 1
| 0
| null | 2020-07-12T17:58:50
| 2020-07-12T17:58:50
| null |
UTF-8
|
Python
| false
| false
| 5,146
|
py
|
import os
import numpy as np
from dm_control.suite import common
from dm_control.utils import io as resources
import xmltodict
_SUITE_DIR = os.path.dirname(os.path.dirname(__file__))
_FILENAMES = [
"./common/materials.xml",
"./common/skybox.xml",
"./common/visual.xml",
]
def get_model_and_assets_from_setting_kwargs(model_fname, setting_kwargs=None):
""""Returns a tuple containing the model XML string and a dict of assets."""
assets = {filename: resources.GetResource(os.path.join(_SUITE_DIR, filename))
for filename in _FILENAMES}
if setting_kwargs is None:
return common.read_model(model_fname), assets
# Convert XML to dicts
model = xmltodict.parse(common.read_model(model_fname))
materials = xmltodict.parse(assets['./common/materials.xml'])
skybox = xmltodict.parse(assets['./common/skybox.xml'])
# Edit lighting
if 'light_pos' in setting_kwargs:
assert isinstance(setting_kwargs['light_pos'], (list, tuple, np.ndarray))
light_pos = f'{setting_kwargs["light_pos"][0]} {setting_kwargs["light_pos"][1]} {setting_kwargs["light_pos"][2]}'
if 'light' in model['mujoco']['worldbody']:
model['mujoco']['worldbody']['light']['@pos'] = light_pos
elif 'light' in model['mujoco']['worldbody']['body']:
model['mujoco']['worldbody']['body']['light']['@pos'] = light_pos
else:
raise NotImplementedError('model xml does not contain entity light')
# Edit camera
if 'cam_pos' in setting_kwargs:
assert isinstance(setting_kwargs['cam_pos'], (list, tuple, np.ndarray))
cam_pos = f'{setting_kwargs["cam_pos"][0]} {setting_kwargs["cam_pos"][1]} {setting_kwargs["cam_pos"][2]}'
if 'camera' in model['mujoco']['worldbody']:
model['mujoco']['worldbody']['camera'][0]['@pos'] = cam_pos
elif 'camera' in model['mujoco']['worldbody']['body']:
model['mujoco']['worldbody']['body']['camera'][0]['@pos'] = cam_pos
else:
raise NotImplementedError('model xml does not contain entity camera')
# Edit distractor
if 'distractor_pos' in setting_kwargs:
assert isinstance(setting_kwargs['distractor_pos'], (list, tuple, np.ndarray))
distractor_pos = f'{setting_kwargs["distractor_pos"][0]} {setting_kwargs["distractor_pos"][1]} {setting_kwargs["distractor_pos"][2]}'
assert model['mujoco']['worldbody']['body'][-1]['@name'] == 'distractor', 'distractor must be in worldbody'
model['mujoco']['worldbody']['body'][-1]['geom']['@pos'] = distractor_pos
# Edit grid floor
if 'grid_rgb1' in setting_kwargs:
assert isinstance(setting_kwargs['grid_rgb1'], (list, tuple, np.ndarray))
materials['mujoco']['asset']['texture']['@rgb1'] = \
f'{setting_kwargs["grid_rgb1"][0]} {setting_kwargs["grid_rgb1"][1]} {setting_kwargs["grid_rgb1"][2]}'
if 'grid_rgb2' in setting_kwargs:
assert isinstance(setting_kwargs['grid_rgb2'], (list, tuple, np.ndarray))
materials['mujoco']['asset']['texture']['@rgb2'] = \
f'{setting_kwargs["grid_rgb2"][0]} {setting_kwargs["grid_rgb2"][1]} {setting_kwargs["grid_rgb2"][2]}'
if 'grid_texrepeat' in setting_kwargs:
assert isinstance(setting_kwargs['grid_texrepeat'], (list, tuple, np.ndarray))
materials['mujoco']['asset']['material'][0]['@texrepeat'] = \
f'{setting_kwargs["grid_texrepeat"][0]} {setting_kwargs["grid_texrepeat"][1]}'
if 'grid_reflectance' in setting_kwargs:
materials['mujoco']['asset']['material'][0]['@reflectance'] = \
str(setting_kwargs["grid_reflectance"])
# Edit self
if 'self_rgb' in setting_kwargs:
assert isinstance(setting_kwargs['self_rgb'], (list, tuple, np.ndarray))
materials['mujoco']['asset']['material'][1]['@rgba'] = \
f'{setting_kwargs["self_rgb"][0]} {setting_kwargs["self_rgb"][1]} {setting_kwargs["self_rgb"][2]} 1'
# Edit skybox
if 'skybox_rgb' in setting_kwargs:
assert isinstance(setting_kwargs['skybox_rgb'], (list, tuple, np.ndarray))
skybox['mujoco']['asset']['texture']['@rgb1'] = \
f'{setting_kwargs["skybox_rgb"][0]} {setting_kwargs["skybox_rgb"][1]} {setting_kwargs["skybox_rgb"][2]}'
if 'skybox_rgb2' in setting_kwargs:
assert isinstance(setting_kwargs['skybox_rgb2'], (list, tuple, np.ndarray))
skybox['mujoco']['asset']['texture']['@rgb2'] = \
f'{setting_kwargs["skybox_rgb2"][0]} {setting_kwargs["skybox_rgb2"][1]} {setting_kwargs["skybox_rgb2"][2]}'
if 'skybox_markrgb' in setting_kwargs:
assert isinstance(setting_kwargs['skybox_markrgb'], (list, tuple, np.ndarray))
skybox['mujoco']['asset']['texture']['@markrgb'] = \
f'{setting_kwargs["skybox_markrgb"][0]} {setting_kwargs["skybox_markrgb"][1]} {setting_kwargs["skybox_markrgb"][2]}'
# Convert back to XML
model_xml = xmltodict.unparse(model)
assets['./common/materials.xml'] = xmltodict.unparse(materials)
assets['./common/skybox.xml'] = xmltodict.unparse(skybox)
return model_xml, assets
|
[
"hello@nicklashansen.com"
] |
hello@nicklashansen.com
|
d90cd378e2025596ceacb07d965c298a1359589d
|
719da820d1aad1d352544badc022e0422f1f7588
|
/tools/demo.py
|
e8a5d14474411fbf6d0447c7a38e38b2c4bd3789
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
yuhan210/py-faster-rcnn
|
b0886628ba7c0f01fd4ccbd6b35b14835dfbd922
|
dbf36cc2a327d6d58b92ce4b973fdca45cf9d14e
|
refs/heads/master
| 2021-01-10T00:57:05.152454
| 2016-03-14T16:38:11
| 2016-03-14T16:38:11
| 53,615,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,090
|
py
|
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.savefig('test.jpg')
plt.draw()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
#print prototxt, caffemodel
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
'001763.jpg', '004545.jpg']
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
demo(net, im_name)
plt.show()
|
[
"yuhan210@gmail.com"
] |
yuhan210@gmail.com
|
c6e1e16d3f9cac75006e39850baa427f272dee11
|
05b3d499424e0ac49a1c7489e1455a48b02439de
|
/playhouse/dataset.py
|
09d63fb2688a5af414cd2d8be4bce1f936cda952
|
[
"MIT"
] |
permissive
|
manipuladordedados/peewee
|
c5d6e0debd33e8163bfbe41e1107003734be0d7f
|
82a71566b1f0d76430ac5efccb2bc09f491faedc
|
refs/heads/master
| 2020-12-11T05:56:20.679084
| 2014-11-05T09:17:16
| 2014-11-05T09:17:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,375
|
py
|
import csv
import datetime
from decimal import Decimal
import json
import operator
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import sys
from peewee import *
from playhouse.db_url import connect
from playhouse.migrate import migrate
from playhouse.migrate import SchemaMigrator
from playhouse.reflection import Introspector
if sys.version_info[0] == 3:
basestring = str
from functools import reduce
class DataSet(object):
def __init__(self, url):
self._url = url
parse_result = urlparse(url)
self._database_path = parse_result.path[1:]
# Connect to the database.
self._database = connect(url)
self._database.connect()
# Introspect the database and generate models.
self._introspector = Introspector.from_database(self._database)
self._models = self._introspector.generate_models()
self._migrator = SchemaMigrator.from_database(self._database)
class BaseModel(Model):
class Meta:
database = self._database
self._base_model = BaseModel
self._export_formats = self.get_export_formats()
def __repr__(self):
return '<DataSet: %s>' % self._database_path
def get_export_formats(self):
return {
'csv': CSVExporter,
'json': JSONExporter}
def __getitem__(self, table):
return Table(self, table, self._models.get(table))
@property
def tables(self):
return self._database.get_tables()
def __contains__(self, table):
return table in self.tables
def connect(self):
self._database.connect()
def close(self):
self._database.close()
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self._database.is_closed():
self.close()
def query(self, sql, params=None, commit=True):
return self._database.execute_sql(sql, params, commit)
def transaction(self):
if self._database.transaction_depth() == 0:
return self._database.transaction()
else:
return self._database.savepoint()
def freeze(self, query, format='csv', filename=None, file_obj=None,
**kwargs):
if filename and file_obj:
raise ValueError('file is over-specified. Please use either '
'filename or file_obj, but not both.')
if not filename and not file_obj:
raise ValueError('A filename or file-like object must be '
'specified.')
if format not in self._export_formats:
valid_formats = ', '.join(sorted(self._export_formats.keys()))
raise ValueError('Unsupported format "%s". Use one of %s.' % (
format, valid_formats))
if filename:
file_obj = open(filename, 'w')
exporter = self._export_formats[format](query)
exporter.export(file_obj, **kwargs)
if filename:
file_obj.close()
class Table(object):
def __init__(self, dataset, name, model_class):
self.dataset = dataset
self.name = name
if model_class is None:
model_class = self._create_model()
model_class.create_table()
self.dataset._models[name] = model_class
self.model_class = model_class
def __repr__(self):
return '<Table: %s>' % self.name
def __len__(self):
return self.find().count()
def __iter__(self):
return iter(self.find().iterator())
def _create_model(self):
return type(str(self.name), (self.dataset._base_model,), {})
def create_index(self, columns, unique=False):
self.dataset._database.create_index(
self.model_class,
columns,
unique=unique)
def _guess_field_type(self, value):
if isinstance(value, basestring):
return TextField
if isinstance(value, (datetime.date, datetime.datetime)):
return DateTimeField
elif value is True or value is False:
return BooleanField
elif isinstance(value, int):
return IntegerField
elif isinstance(value, float):
return FloatField
elif isinstance(value, Decimal):
return DecimalField
return TextField
@property
def columns(self):
return self.model_class._meta.get_field_names()
def _migrate_new_columns(self, data):
new_keys = set(data) - set(self.model_class._meta.fields)
if new_keys:
operations = []
for key in new_keys:
field_class = self._guess_field_type(data[key])
field = field_class(null=True)
operations.append(
self.dataset._migrator.add_column(self.name, key, field))
field.add_to_class(self.model_class, key)
migrate(*operations)
def insert(self, **data):
self._migrate_new_columns(data)
return self.model_class.insert(**data).execute()
def _apply_where(self, query, filters, conjunction=None):
conjunction = conjunction or operator.and_
if filters:
expressions = [
(self.model_class._meta.fields[column] == value)
for column, value in filters.items()]
query = query.where(reduce(conjunction, expressions))
return query
def update(self, columns=None, conjunction=None, **data):
self._migrate_new_columns(data)
filters = {}
if columns:
for column in columns:
filters[column] = data.pop(column)
return self._apply_where(
self.model_class.update(**data),
filters,
conjunction).execute()
def _query(self, **query):
return self._apply_where(self.model_class.select(), query)
def find(self, **query):
return self._query(**query).dicts()
def find_one(self, **query):
try:
return self.find(**query).get()
except self.model_class.DoesNotExist:
return None
def all(self):
return self.find()
def delete(self, **query):
return self._apply_where(self.model_class.delete(), query).execute()
class Exporter(object):
def __init__(self, query):
self.query = query
def export(self, file_obj):
raise NotImplementedError
class JSONExporter(Exporter):
@staticmethod
def default(o):
if isinstance(o, (datetime.datetime, datetime.date, datetime.time)):
return o.isoformat()
elif isinstance(o, Decimal):
return str(o)
raise TypeError('Unable to serialize %r as JSON.' % o)
def export(self, file_obj, **kwargs):
json.dump(
list(self.query),
file_obj,
default=JSONExporter.default,
**kwargs)
class CSVExporter(Exporter):
def export(self, file_obj, header=True, **kwargs):
writer = csv.writer(file_obj, **kwargs)
if header and hasattr(self.query, '_select'):
writer.writerow([field.name for field in self.query._select])
for row in self.query.tuples():
writer.writerow(row)
|
[
"coleifer@gmail.com"
] |
coleifer@gmail.com
|
7ef62aaf8b814aafa9e209c301de90e59413b19d
|
84b81ad47af6a4f40c1f2fa7b513b9ede260d038
|
/MyDiary_Backend/test_user.py
|
93f3fd647bbf531a6f5a990eba6ee1d81235dd9f
|
[] |
no_license
|
michael-basweti/michael-basweti.github.io
|
ebc219f69943f55779a888ae7b54cae14a5b09de
|
34581d8560b2f7f60a0cf67d7e631e1ef9b89d7e
|
refs/heads/flask_api
| 2022-12-24T21:57:02.396825
| 2018-10-26T10:19:08
| 2018-10-26T10:19:08
| 140,665,411
| 0
| 1
| null | 2022-07-29T22:33:35
| 2018-07-12T05:32:05
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 993
|
py
|
"""
nose tests for the api
"""
from nose.tools import assert_true
import requests
def test_get_all():
"""
test all returns
:return:
"""
response = requests.get('http://127.0.0.1:5000/mydiary/api/v1.0/entries/get')
assert_true(response.ok)
def test_post():
"""
test post
:return:
"""
response = requests.post('http://127.0.0.1:5000/mydiary/api/v1.0/entries/post')
assert_true(response.ok)
def test_get_one():
"""
test get one
:return:
"""
response = requests.get('http://127.0.0.1:5000/mydiary/api/v1.0/entries/get/1')
assert_true(response.ok)
def test_edit_one():
"""
test editing
:return:
"""
response = requests.put('http://127.0.0.1:5000/mydiary/api/v1.0/entries/edit/1')
assert_true(response.ok)
def test_delete_one():
"""
test delete
:return:
"""
response = requests.delete('http://127.0.0.1:5000/mydiary/api/v1.0/entries/delete/1')
assert_true(response.ok)
|
[
"baswetima@gmail.com"
] |
baswetima@gmail.com
|
26b00c725a10bf052e14ac1f9e8a0e04d851fb2b
|
e3ce9a14ba58eaf7a684f2b6088a6172fa08bf41
|
/02 Estructuras de control/condicional_anidados.py
|
9528be679e9b51cafd89aa1cd194fdf697c8c464
|
[] |
no_license
|
edwinhrojasm/python_basico_20203
|
f2134170f9ffe7d42fad590f795a8201293771c7
|
54fa3da2d9df3684bd3c07c8b95118ad5f0b7684
|
refs/heads/master
| 2022-12-30T17:16:31.406874
| 2020-10-22T00:45:27
| 2020-10-22T00:45:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
x = int(input("Ingrese un valor entero: "))
print("Usted ingreso el: " + str(x))
if x % 4 == 0:
x /= 4
else:
if x % 2 == 0:
x /= 2
else:
if x % 3 == 0:
x /= 3
else:
x += 1
print("Ahora el valor es: " + str(x))
|
[
"apdaza@gmail.com"
] |
apdaza@gmail.com
|
8e729bdeca6812df6854b6e2437387c139da97d3
|
223fde0acac1b6100277e8ad2a7cb0233b4fbce7
|
/src/pyMission/multipoint_derivatives.py
|
172ace25b7fa21169998aa55a84d802e626152d2
|
[] |
no_license
|
hwangjt/pyMission-1
|
26cf5646bf79560a7d52d2db56ecde0b535bdc85
|
316c738afde8b29225586a6a13eea0b97aaa894d
|
refs/heads/master
| 2020-12-25T10:08:39.327912
| 2015-04-02T20:42:20
| 2015-04-02T20:42:20
| 30,985,983
| 0
| 0
| null | 2015-02-26T19:28:24
| 2015-02-18T20:45:25
|
OpenEdge ABL
|
UTF-8
|
Python
| false
| false
| 5,825
|
py
|
''' Analysis with multiple mission segments in parallel '''
import time
import numpy as np
from openmdao.main.api import set_as_top, Driver, Assembly
from openmdao.main.mpiwrap import MPI
from openmdao.main.test.simpledriver import SimpleDriver
from openmdao.lib.casehandlers.api import BSONCaseRecorder
from pyoptsparse_driver.pyoptsparse_driver import pyOptSparseDriver
from pyMission.segment import MissionSegment
# Same discretization for each segment for now.
num_elem = 250
num_cp = 50
model = set_as_top(Assembly())
#------------------------
# Mission Segment 1
#------------------------
x_range = 9000.0 # nautical miles
# define bounds for the flight path angle
gamma_lb = np.tan(-35.0 * (np.pi/180.0))/1e-1
gamma_ub = np.tan(35.0 * (np.pi/180.0))/1e-1
takeoff_speed = 83.3
landing_speed = 72.2
altitude = 10 * np.sin(np.pi * np.linspace(0,1,num_elem+1))
start = time.time()
x_range *= 1.852
x_init = x_range * 1e3 * (1-np.cos(np.linspace(0, 1, num_cp)*np.pi))/2/1e6
M_init = np.ones(num_cp)*0.82
h_init = 10 * np.sin(np.pi * x_init / (x_range/1e3))
model.add('seg1', MissionSegment(num_elem=num_elem, num_cp=num_cp,
x_pts=x_init, surr_file='crm_surr'))
# Initial value of the parameter
model.seg1.h_pt = h_init
model.seg1.M_pt = M_init
model.seg1.set_init_h_pt(altitude)
# Calculate velocity from the Mach we have specified.
model.seg1.SysSpeed.v_specified = False
# Initial design parameters
model.seg1.S = 427.8/1e2
model.seg1.ac_w = 210000*9.81/1e6
model.seg1.thrust_sl = 1020000.0/1e6
model.seg1.SFCSL = 8.951*9.81
model.seg1.AR = 8.68
model.seg1.oswald = 0.8
# Flag for making sure we run serial if we do an mpirun
model.seg1.driver.system_type = 'serial'
model.seg1.coupled_solver.system_type = 'serial'
#------------------------
# Mission Segment 2
#------------------------
x_range = 7000.0 # nautical miles
# define bounds for the flight path angle
gamma_lb = np.tan(-35.0 * (np.pi/180.0))/1e-1
gamma_ub = np.tan(35.0 * (np.pi/180.0))/1e-1
takeoff_speed = 83.3
landing_speed = 72.2
altitude = 10 * np.sin(np.pi * np.linspace(0,1,num_elem+1))
start = time.time()
x_range *= 1.852
x_init = x_range * 1e3 * (1-np.cos(np.linspace(0, 1, num_cp)*np.pi))/2/1e6
M_init = np.ones(num_cp)*0.82
h_init = 10 * np.sin(np.pi * x_init / (x_range/1e3))
model.add('seg2', MissionSegment(num_elem=num_elem, num_cp=num_cp,
x_pts=x_init, surr_file='crm_surr'))
# Initial value of the parameter
model.seg2.h_pt = h_init
model.seg2.M_pt = M_init
model.seg2.set_init_h_pt(altitude)
# Calculate velocity from the Mach we have specified.
model.seg2.SysSpeed.v_specified = False
# Initial design parameters
model.seg2.S = 427.8/1e2
model.seg2.ac_w = 210000*9.81/1e6
model.seg2.thrust_sl = 1020000.0/1e6
model.seg2.SFCSL = 8.951*9.81
model.seg2.AR = 8.68
model.seg2.oswald = 0.8
# Flag for making sure we run serial if we do an mpirun
model.seg2.driver.system_type = 'serial'
model.seg2.coupled_solver.system_type = 'serial'
#------------------------
# Mission Segment 3
#------------------------
x_range = 5000.0 # nautical miles
# define bounds for the flight path angle
gamma_lb = np.tan(-35.0 * (np.pi/180.0))/1e-1
gamma_ub = np.tan(35.0 * (np.pi/180.0))/1e-1
takeoff_speed = 83.3
landing_speed = 72.2
altitude = 10 * np.sin(np.pi * np.linspace(0,1,num_elem+1))
start = time.time()
x_range *= 1.852
x_init = x_range * 1e3 * (1-np.cos(np.linspace(0, 1, num_cp)*np.pi))/2/1e6
M_init = np.ones(num_cp)*0.82
h_init = 10 * np.sin(np.pi * x_init / (x_range/1e3))
model.add('seg3', MissionSegment(num_elem=num_elem, num_cp=num_cp,
x_pts=x_init, surr_file='crm_surr'))
# Initial value of the parameter
model.seg3.h_pt = h_init
model.seg3.M_pt = M_init
model.seg3.set_init_h_pt(altitude)
# Calculate velocity from the Mach we have specified.
model.seg3.SysSpeed.v_specified = False
# Initial design parameters
model.seg3.S = 427.8/1e2
model.seg3.ac_w = 210000*9.81/1e6
model.seg3.thrust_sl = 1020000.0/1e6
model.seg3.SFCSL = 8.951*9.81
model.seg3.AR = 8.68
model.seg3.oswald = 0.8
# Flag for making sure we run serial if we do an mpirun
model.seg3.driver.system_type = 'serial'
model.seg3.coupled_solver.system_type = 'serial'
#----------------------
# Prepare to Run
#----------------------
model.driver.workflow.add(['seg1', 'seg2', 'seg3'])
#model._setup()
#from openmdao.util.dotgraph import plot_system_tree
#plot_system_tree(model._system)
model.replace('driver', SimpleDriver())
model.driver.add_objective('seg1.fuelburn + seg2.fuelburn + seg3.fuelburn')
model.driver.add_constraint('seg1.h[0] = 0.0')
model.driver.add_constraint('seg2.h[0] = 0.0')
model.driver.add_constraint('seg3.h[0] = 0.0')
model.driver.add_constraint('seg1.h[-1] = 0.0')
model.driver.add_constraint('seg2.h[-1] = 0.0')
model.driver.add_constraint('seg3.h[-1] = 0.0')
model.driver.add_constraint('seg1.Tmin < 0.0')
model.driver.add_constraint('seg2.Tmin < 0.0')
model.driver.add_constraint('seg3.Tmin < 0.0')
model.driver.add_constraint('seg1.Tmax < 0.0')
model.driver.add_constraint('seg2.Tmax < 0.0')
model.driver.add_constraint('seg3.Tmax < 0.0')
model.driver.add_parameter('seg1.h_pt', low=0.0, high=14.1)
model.driver.add_parameter('seg2.h_pt', low=0.0, high=14.1)
model.driver.add_parameter('seg3.h_pt', low=0.0, high=14.1)
model.driver.gradient_options.iprint = 1
model.driver.gradient_options.lin_solver = 'linear_gs'
model.driver.gradient_options.maxiter = 1
#model.driver.gradient_options.lin_solver = 'petsc_ksp'
start = time.time()
model.run()
J = model.driver.workflow.calc_gradient(return_format='dict')
print "."
if MPI:
J = model.driver.workflow._system.get_combined_J(J)
if MPI.COMM_WORLD.rank == 0:
print J
else:
print "J", J
print 'Simulation TIME:', time.time() - start
|
[
"kenneth.t.moore-1@nasa.gov"
] |
kenneth.t.moore-1@nasa.gov
|
d6ed7c947efd9d5100b44e9a063b25dac284ccc0
|
b68c2043016baec884e8fb7edcda53243a9807b8
|
/src/storage/storage.py
|
fd44d652b0d77394ca916177b3f979d69662dff8
|
[] |
no_license
|
INP-Group/SystemJ
|
7a1dcf4867591c663e96be0c41089151cc31887d
|
c48ada6386653402ed1b6a8895ae223846873158
|
refs/heads/master
| 2020-04-05T23:20:11.972621
| 2015-07-19T03:34:03
| 2015-07-19T03:34:03
| 39,321,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
# -*- encoding: utf-8 -*-
from project.settings import POSTGRESQL_DB, \
POSTGRESQL_HOST, POSTGRESQL_PASSWORD, POSTGRESQL_TABLE, \
POSTGRESQL_USER, ZEROMQ_HOST, ZEROMQ_PORT
from src.storage.berkeley import BerkeleyStorage
from src.storage.postgresql import PostgresqlStorage
from src.storage.zeromqserver import ZeroMQServer
class Storage(object):
def __init__(self):
self.sql_storage = PostgresqlStorage(database=POSTGRESQL_DB,
user=POSTGRESQL_USER,
password=POSTGRESQL_PASSWORD,
tablename=POSTGRESQL_TABLE,
host=POSTGRESQL_HOST)
self.berkeley_db = BerkeleyStorage(sql_storage=self.sql_storage)
self.zeromq = ZeroMQServer(host=ZEROMQ_HOST,
port=ZEROMQ_PORT,
berkeley_db=self.berkeley_db)
def start(self):
self.zeromq.start()
|
[
"sapronov.alexander92@gmail.com"
] |
sapronov.alexander92@gmail.com
|
3731cf807cb614a2393e21e6bdc30eece5399cf1
|
690cdb4acc7becf3c18955e3c2732ec5b0735b2c
|
/python-demos/concurrent_parallel_demos/cpu_bound_parallel.py
|
b0e9e2b7975ba3b0740bc754929bc91af41d7692
|
[
"MIT"
] |
permissive
|
t4d-classes/advanced-python_04192021
|
30ba51cad9bc6af96fae2b9e2998fdb60244d6ba
|
a30cd924d918bf41c0775a1235eef849746a5f3d
|
refs/heads/master
| 2023-04-08T03:22:01.145375
| 2021-04-23T22:35:18
| 2021-04-23T22:35:18
| 357,016,064
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
""" cpu bound single demo """
from collections.abc import Generator
import itertools
import time
import multiprocessing as mp
def fibonacci() -> Generator[int, None, None]:
""" generate an infinite fibonacci sequence """
num_1 = 0
num_2 = 1
yield num_1
yield num_2
while True:
next_num = num_1 + num_2
yield next_num
num_1 = num_2
num_2 = next_num
def calc_fib_total(p_results: list[int]) -> None:
""" calc fib total and add to list """
total = 0
for num in itertools.islice(fibonacci(), 0, 500000):
total += num
p_results.append(total)
if __name__ == "__main__":
start_time = time.time()
with mp.Manager() as manager:
results: list[int] = manager.list()
processes: list[mp.Process] = []
for _ in range(8):
a_process = mp.Process(target=calc_fib_total, args=(results,))
a_process.start()
processes.append(a_process)
for a_process in processes:
a_process.join()
time_elapsed = time.time() - start_time
print(len(results))
print(time_elapsed)
|
[
"eric@t4d.io"
] |
eric@t4d.io
|
c7050b95df428a9e2e0aba7c94567211af4cf38f
|
bb6ce2f4fc53dc9d2fc7a701cd4683b23ecf30b2
|
/tests/py/test_browsing.py
|
4897ee674d4f844978c35362aa7bff14e17d8d0c
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
liberapay/liberapay.com
|
2640b9f830efaadd17009e3aed72eadd19c0f94e
|
a02eea631f89b17ac8179bdd37e5fa89ac54ede8
|
refs/heads/master
| 2023-08-31T10:44:04.235477
| 2023-08-27T07:29:47
| 2023-08-27T07:29:47
| 36,075,352
| 1,586
| 282
| null | 2023-09-03T09:23:26
| 2015-05-22T14:03:52
|
Python
|
UTF-8
|
Python
| false
| false
| 4,611
|
py
|
import os
import re
from pando import Response
import pytest
from liberapay.billing.payday import Payday
from liberapay.testing import EUR, Harness
from liberapay.utils import find_files
overescaping_re = re.compile(r'&(#[0-9]+|#x[0-9a-f]+|[a-z0-9]+);')
class BrowseTestHarness(Harness):
@classmethod
def setUpClass(cls):
super().setUpClass()
i = len(cls.client.www_root)
def f(spt):
if spt[spt.rfind('/')+1:].startswith('index.'):
return spt[i:spt.rfind('/')+1]
return spt[i:-4]
urls = {}
for url in sorted(map(f, find_files(cls.client.www_root, '*.spt'))):
url = url.replace('/%username/membership/', '/team/membership/') \
.replace('/team/membership/%action', '/team/membership/join') \
.replace('/%username/news/%action', '/%username/news/subscribe') \
.replace('/for/%name/', '/for/wonderland/') \
.replace('/for/wonderland/%action', '/for/wonderland/leave') \
.replace('/%platform', '/github') \
.replace('/%user_name/', '/liberapay/') \
.replace('/%redirect_to', '/giving') \
.replace('/%back_to', '/') \
.replace('/%provider', '/stripe') \
.replace('/%payment_id', '/') \
.replace('/%payin_id', '/') \
.replace('/payday/%id', '/payday/') \
.replace('/%type', '/receiving.js')
urls[url.replace('/%username/', '/david/')] = None
urls[url.replace('/%username/', '/team/')] = None
cls.urls = list(urls)
def browse_setup(self):
self.david = self.make_participant('david')
self.team = self.make_participant('team', kind='group')
c = self.david.create_community('Wonderland')
self.david.upsert_community_membership(True, c.id)
self.team.add_member(self.david)
self.org = self.make_participant('org', kind='organization')
self.invoice_id = self.db.one("""
INSERT INTO invoices
(sender, addressee, nature, amount, description, details, documents, status)
VALUES (%s, %s, 'expense', ('28.04','EUR'), 'badges and stickers', null, '{}'::jsonb, 'new')
RETURNING id
""", (self.david.id, self.org.id))
Payday.start().run()
def browse(self, **kw):
for url in self.urls:
if url.endswith('/%exchange_id') or '/receipts/' in url:
continue
url = url.replace('/team/invoices/%invoice_id', '/org/invoices/%s' % self.invoice_id)
url = url.replace('/%invoice_id', '/%s' % self.invoice_id)
assert '/%' not in url
try:
r = self.client.GET(url, **kw)
except Response as e:
if e.code == 404 or e.code >= 500:
raise
r = e
assert r.code != 404
assert r.code < 500
assert not overescaping_re.search(r.text)
class TestBrowsing(BrowseTestHarness):
def test_anon_can_browse_in_french(self):
self.browse_setup()
self.browse(HTTP_ACCEPT_LANGUAGE=b'fr')
def test_new_participant_can_browse(self):
self.browse_setup()
self.browse(auth_as=self.david)
def test_active_participant_can_browse(self):
self.browse_setup()
self.add_payment_account(self.david, 'stripe')
bob = self.make_participant('bob')
self.add_payment_account(bob, 'paypal')
bob.set_tip_to(self.david, EUR('1.00'))
bob_card = self.upsert_route(bob, 'stripe-card')
self.make_payin_and_transfer(bob_card, self.david, EUR('2.00'))
self.david.set_tip_to(bob, EUR('0.50'))
david_paypal = self.upsert_route(self.david, 'paypal')
self.make_payin_and_transfer(david_paypal, bob, EUR('20.00'))
self.browse(auth_as=self.david)
def test_admin_can_browse(self):
self.browse_setup()
admin = self.make_participant('admin', privileges=1)
self.browse(auth_as=admin)
@pytest.mark.skipif(
os.environ.get('LIBERAPAY_I18N_TEST') != 'yes',
reason="this is an expensive test, we don't want to run it every time",
)
class TestTranslations(BrowseTestHarness):
def test_all_pages_in_all_supported_langs(self):
self.browse_setup()
for _, l in self.client.website.lang_list:
self.browse(HTTP_ACCEPT_LANGUAGE=l.tag.encode('ascii'))
|
[
"changaco@changaco.oy.lc"
] |
changaco@changaco.oy.lc
|
f2b2b4d9515fcb5e791cf75aec0382ee54e71dfc
|
2d4b9ef6aa8c3e39999206cbfd1d1bb60e170077
|
/cup/version.py
|
ef9dfc6395747ccf22e0131a2940f38f9781ecab
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
IsaacjlWu/CUP
|
786ff784c7a22854d083e9cd041b605a1934072e
|
5c985cd33ee7dc6f1f052a491d1c7b8915670942
|
refs/heads/master
| 2023-05-11T23:31:51.975880
| 2018-07-16T08:54:59
| 2018-07-16T08:54:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*
# #############################################################################
#
# Copyright (c) Baidu.com, Inc. All Rights Reserved
#
# #############################################################################
"""
:author:
Guannan Ma maguannan @mythmgn
"""
VERSION = '1.6.1'
AUTHOR = 'nfs-qa'
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
|
[
"mythmgn@gmail.com"
] |
mythmgn@gmail.com
|
d58a13c2b21ea0a06e363f887cb465d9d2d70886
|
9680c27718346be69cf7695dba674e7a0ec662ca
|
/game-Python/Python Challenge-Math Game.py
|
9bfe4c62b34c305859a22614baca17bad957cb14
|
[] |
no_license
|
Md-Monirul-Islam/Python-code
|
5a2cdbe7cd3dae94aa63298b5b0ef7e0e31cd298
|
df98f37dd9d21784a65c8bb0e46d47a646259110
|
refs/heads/main
| 2023-01-19T05:15:04.963904
| 2020-11-19T06:10:09
| 2020-11-19T06:10:09
| 314,145,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
import random
import operator
score = 0
def random_problem():
operators = {
"+": operator.add,
"-": operator.sub,
"*":operator.mul,
"/":operator.truediv,
}
num_1 = random.randint(1,10)
num_2 = random.randint(1,10)
operation = random.choice(list(operators.keys()))
answer = operators.get(operation)(num_1,num_2)
print(f"What is {num_1} {operation} {num_2}?")
return answer
def ask_question():
answer = random_problem()
guess = float(input())
return guess==answer
def game():
print("How well do you math?\n")
score = 0
for i in range(5):
if ask_question()==True:
score += 1
print("Correct")
else:
print("Incorrect")
print(f"Your score is {score}")
game()
|
[
"61861844+Md-Monirul-Islam@users.noreply.github.com"
] |
61861844+Md-Monirul-Islam@users.noreply.github.com
|
459b4624310b53ddf1066b5c175112767f16b74e
|
60f7d711cb3f743f148ca4be4c507244a61d823d
|
/gaphor/diagram/classes/__init__.py
|
2c89e61845c0a50239c9e661f4a18d4de9e98b58
|
[
"Apache-2.0"
] |
permissive
|
paulopperman/gaphor
|
84ffd8c18ac4f015668fbd44662cbb3ae43b9016
|
6986c4f3469720a1618a9e8526cb6f826aea626a
|
refs/heads/master
| 2020-05-24T21:57:54.252307
| 2019-05-16T22:51:50
| 2019-05-16T22:51:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
from gaphor.diagram.classes.association import AssociationItem
from gaphor.diagram.classes.dependency import DependencyItem
from gaphor.diagram.classes.generalization import GeneralizationItem
from gaphor.diagram.classes.implementation import ImplementationItem
from gaphor.diagram.classes.interface import InterfaceItem
from gaphor.diagram.classes.klass import ClassItem
from gaphor.diagram.classes.package import PackageItem
def _load():
from gaphor.diagram.classes import (
classconnect,
interfaceconnect,
classespropertypages,
)
_load()
|
[
"gaphor@gmail.com"
] |
gaphor@gmail.com
|
74c35250f7819ac52063403afc93980734d0a8ca
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/dockerized-gists/781a8eb20c5b32f8e378353cde4daa51/snippet.py
|
1042a5ba0a6f94ea25c9ea17708bc9b21544c48c
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,545
|
py
|
# [filter size, stride, padding]
#Assume the two dimensions are the same
#Each kernel requires the following parameters:
# - k_i: kernel size
# - s_i: stride
# - p_i: padding (if padding is uneven, right padding will higher than left padding; "SAME" option in tensorflow)
#
#Each layer i requires the following parameters to be fully represented:
# - n_i: number of feature (data layer has n_1 = imagesize )
# - j_i: distance (projected to image pixel distance) between center of two adjacent features
# - r_i: receptive field of a feature in layer i
# - start_i: position of the first feature's receptive field in layer i (idx start from 0, negative means the center fall into padding)
import math
convnet = [[11,4,0],[3,2,0],[5,1,2],[3,2,0],[3,1,1],[3,1,1],[3,1,1],[3,2,0],[6,1,0], [1, 1, 0]]
layer_names = ['conv1','pool1','conv2','pool2','conv3','conv4','conv5','pool5','fc6-conv', 'fc7-conv']
imsize = 227
def outFromIn(conv, layerIn):
n_in = layerIn[0]
j_in = layerIn[1]
r_in = layerIn[2]
start_in = layerIn[3]
k = conv[0]
s = conv[1]
p = conv[2]
n_out = math.floor((n_in - k + 2*p)/s) + 1
actualP = (n_out-1)*s - n_in + k
pR = math.ceil(actualP/2)
pL = math.floor(actualP/2)
j_out = j_in * s
r_out = r_in + (k - 1)*j_in
start_out = start_in + ((k-1)/2 - pL)*j_in
return n_out, j_out, r_out, start_out
def printLayer(layer, layer_name):
print(layer_name + ":")
print("\t n features: %s \n \t jump: %s \n \t receptive size: %s \t start: %s " % (layer[0], layer[1], layer[2], layer[3]))
layerInfos = []
if __name__ == '__main__':
#first layer is the data layer (image) with n_0 = image size; j_0 = 1; r_0 = 1; and start_0 = 0.5
print ("-------Net summary------")
currentLayer = [imsize, 1, 1, 0.5]
printLayer(currentLayer, "input image")
for i in range(len(convnet)):
currentLayer = outFromIn(convnet[i], currentLayer)
layerInfos.append(currentLayer)
printLayer(currentLayer, layer_names[i])
print ("------------------------")
layer_name = raw_input ("Layer name where the feature in: ")
layer_idx = layer_names.index(layer_name)
idx_x = int(raw_input ("index of the feature in x dimension (from 0)"))
idx_y = int(raw_input ("index of the feature in y dimension (from 0)"))
n = layerInfos[layer_idx][0]
j = layerInfos[layer_idx][1]
r = layerInfos[layer_idx][2]
start = layerInfos[layer_idx][3]
assert(idx_x < n)
assert(idx_y < n)
print ("receptive field: (%s, %s)" % (r, r))
print ("center: (%s, %s)" % (start+idx_x*j, start+idx_y*j))
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
789d91f20f5e4d64ac0bfbc79cc982f2bf8ddde4
|
20cef5de28d025d4d37eb86ba2b1f832d52c089d
|
/src/gallium/drivers/zink/zink_extensions.py
|
6d01052c39f122aec5bcd098d634e276e7171d12
|
[] |
no_license
|
martinmullins/mesa-emscripten
|
73da0a64901b7664468f951ef09fb9a462134660
|
b4225e327b67fd7eef411cc046c1c0fecb3900de
|
refs/heads/main
| 2023-03-27T03:38:19.037337
| 2021-03-29T19:17:22
| 2021-03-29T19:17:22
| 352,753,658
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,925
|
py
|
# Copyright © 2020 Hoe Hao Cheng
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class Version:
device_version : (1,0,0)
struct_version : (1,0)
def __init__(self, version, struct=()):
self.device_version = version
if not struct:
self.struct_version = (version[0], version[1])
else:
self.struct_version = struct
# e.g. "VK_MAKE_VERSION(1,2,0)"
def version(self):
return ("VK_MAKE_VERSION("
+ str(self.device_version[0])
+ ","
+ str(self.device_version[1])
+ ","
+ str(self.device_version[2])
+ ")")
# e.g. "10"
def struct(self):
return (str(self.struct_version[0])+str(self.struct_version[1]))
# the sType of the extension's struct
# e.g. VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT
# for VK_EXT_transform_feedback and struct="FEATURES"
def stype(self, struct: str):
return ("VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_"
+ str(self.struct_version[0]) + "_" + str(self.struct_version[1])
+ '_' + struct)
class Extension:
name : str = None
alias : str = None
is_required : bool = False
enable_conds : [str] = None
# these are specific to zink_device_info.py:
has_properties : bool = False
has_features : bool = False
guard : bool = False
# these are specific to zink_instance.py:
core_since : Version = None
instance_funcs : [str] = None
def __init__(self, name, alias="", required=False, properties=False,
features=False, conditions=None, guard=False, core_since=None,
functions=None):
self.name = name
self.alias = alias
self.is_required = required
self.has_properties = properties
self.has_features = features
self.enable_conds = conditions
self.guard = guard
self.core_since = core_since
self.instance_funcs = functions
if alias == "" and (properties == True or features == True):
raise RuntimeError("alias must be available when properties and/or features are used")
# e.g.: "VK_EXT_robustness2" -> "robustness2"
def pure_name(self):
return '_'.join(self.name.split('_')[2:])
# e.g.: "VK_EXT_robustness2" -> "EXT_robustness2"
def name_with_vendor(self):
return self.name[3:]
# e.g.: "VK_EXT_robustness2" -> "Robustness2"
def name_in_camel_case(self):
return "".join([x.title() for x in self.name.split('_')[2:]])
# e.g.: "VK_EXT_robustness2" -> "VK_EXT_ROBUSTNESS2_EXTENSION_NAME"
# do note that inconsistencies exist, i.e. we have
# VK_EXT_ROBUSTNESS_2_EXTENSION_NAME defined in the headers, but then
# we also have VK_KHR_MAINTENANCE1_EXTENSION_NAME
def extension_name(self):
return self.name.upper() + "_EXTENSION_NAME"
# generate a C string literal for the extension
def extension_name_literal(self):
return '"' + self.name + '"'
# get the field in zink_device_info that refers to the extension's
# feature/properties struct
# e.g. rb2_<suffix> for VK_EXT_robustness2
def field(self, suffix: str):
return self.alias + '_' + suffix
# the sType of the extension's struct
# e.g. VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT
# for VK_EXT_transform_feedback and struct="FEATURES"
def stype(self, struct: str):
return ("VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_"
+ self.pure_name().upper()
+ '_' + struct + '_'
+ self.vendor())
# e.g. EXT in VK_EXT_robustness2
def vendor(self):
return self.name.split('_')[1]
# Type aliases
Layer = Extension
|
[
"eric+marge@anholt.net"
] |
eric+marge@anholt.net
|
cf2b1104c97b1464770fe039124140845851493c
|
8b40a2959b8d2a2faca09f017e529bb6e02c0030
|
/backend/manga/asgi.py
|
29cb53e8774243095ab48245d7c364e42a77d799
|
[
"MIT"
] |
permissive
|
linea-it/manga
|
80d0982ec83590abcec10a4d09510425d39c289d
|
26add95475345d6c7f34465848d4d33a7a6b63b7
|
refs/heads/master
| 2023-09-05T20:05:50.471757
| 2023-09-04T15:45:18
| 2023-09-04T15:45:18
| 212,671,416
| 0
| 0
|
MIT
| 2023-09-12T19:31:22
| 2019-10-03T20:15:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 403
|
py
|
"""
ASGI config for manga project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'manga.settings')
application = get_asgi_application()
|
[
"glauber.vila.verde@gmail.com"
] |
glauber.vila.verde@gmail.com
|
5be29987b306b8975af2a9ab57afca0a1aa21f8a
|
3b99e2863db4bcd3707e6f13641ddd9156289bc6
|
/tilemap/part 13/settings.py
|
e9b72d78360aa52426696357aa16353739911721
|
[
"MIT"
] |
permissive
|
m-julian/pygame_tutorials
|
500a5be3b4fad86fad577a7ea5493ac09ca41168
|
be57d865de4ac0c18148e1785443c05445159779
|
refs/heads/master
| 2022-07-18T00:15:01.335459
| 2020-05-12T15:39:45
| 2020-05-12T15:39:45
| 263,055,700
| 0
| 0
|
MIT
| 2020-05-11T13:48:49
| 2020-05-11T13:48:48
| null |
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
import pygame as pg
vec = pg.math.Vector2
# define some colors (R, G, B)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
DARKGREY = (40, 40, 40)
LIGHTGREY = (100, 100, 100)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
BROWN = (106, 55, 5)
CYAN = (0, 255, 255)
# game settings
WIDTH = 1024 # 16 * 64 or 32 * 32 or 64 * 16
HEIGHT = 768 # 16 * 48 or 32 * 24 or 64 * 12
FPS = 60
TITLE = "Tilemap Demo"
BGCOLOR = BROWN
TILESIZE = 64
GRIDWIDTH = WIDTH / TILESIZE
GRIDHEIGHT = HEIGHT / TILESIZE
WALL_IMG = 'tileGreen_39.png'
# Player settings
PLAYER_HEALTH = 100
PLAYER_SPEED = 280
PLAYER_ROT_SPEED = 200
PLAYER_IMG = 'manBlue_gun.png'
PLAYER_HIT_RECT = pg.Rect(0, 0, 35, 35)
BARREL_OFFSET = vec(30, 10)
# Gun settings
BULLET_IMG = 'bullet.png'
BULLET_SPEED = 500
BULLET_LIFETIME = 1000
BULLET_RATE = 150
KICKBACK = 200
GUN_SPREAD = 5
BULLET_DAMAGE = 10
# Mob settings
MOB_IMG = 'zombie1_hold.png'
MOB_SPEED = 150
MOB_HIT_RECT = pg.Rect(0, 0, 30, 30)
MOB_HEALTH = 100
MOB_DAMAGE = 10
MOB_KNOCKBACK = 20
|
[
"chris@kidscancode.org"
] |
chris@kidscancode.org
|
232a9fa2ac5e25b2aad3b20924a51dcc73d3f9b1
|
349f39b27a7c3157a1f3db65f35b96bcdb2f5919
|
/03/xx/07-sklearn/16-deep-learning/cnn.py
|
12a3bc7c6707c150b18a6aa9e0a89c549f22e8bb
|
[] |
no_license
|
microgenios/cod
|
5f870c9cefbb80d18690909baa4c9d8b9be463c2
|
0805609cc780244c640963dc4c70052e3df57b4e
|
refs/heads/master
| 2022-12-08T20:10:11.742940
| 2020-02-29T10:37:10
| 2020-02-29T10:37:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,855
|
py
|
#!/usr/bin/python
import argparse
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def build_arg_parser():
parser = argparse.ArgumentParser(description="Build a CNN classifier using MNIST data")
parser.add_argument("--input-dir", dest="input_dir", type=str, default="./mnist_data", help="Directory for storing data")
return parser
def get_weights(shape):
data = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(data)
def get_biases(shape):
data = tf.constant(0.1, shape=shape)
return tf.Variable(data)
def create_layer(shape):
W = get_weights(shape) # Get the weights and biases
b = get_biases([shape[-1]])
return W, b
def convolution_2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")
def max_pooling(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
if __name__ == "__main__":
args = build_arg_parser().parse_args()
mnist = input_data.read_data_sets(args.input_dir, one_hot=True) # Get the MNIST data
x = tf.placeholder(tf.float32, [None, 784]) # with 784 neurons (28x28=784) # The images are 28x28, so create the input layer
x_image = tf.reshape(x, [-1, 28, 28, 1]) # Reshape 'x' into a 4D tensor
W_conv1, b_conv1 = create_layer([5, 5, 1, 32]) # Define the first convolutional layer
h_conv1 = tf.nn.relu(convolution_2d(x_image, W_conv1) + b_conv1) # bias, and then apply the ReLU function # Convolve the image with weight tensor, add the
h_pool1 = max_pooling(h_conv1) # Apply the max pooling operator
W_conv2, b_conv2 = create_layer([5, 5, 32, 64]) # Define the second convolutional layer
h_conv2 = tf.nn.relu(convolution_2d(h_pool1, W_conv2) + b_conv2) # the ReLU function # weight tensor, add the bias, and then apply # Convolve the output of previous layer with the
h_pool2 = max_pooling(h_conv2) # Apply the max pooling operator
W_fc1, b_fc1 = create_layer([7 * 7 * 64, 1024]) # Define the fully connected layer
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) # Reshape the output of the previous layer
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # the ReLU function # weight tensor, add the bias, and then apply # Multiply the output of previous layer by the
keep_prob = tf.placeholder(tf.float32) # for all the neurons # Define the dropout layer using a probability placeholder
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2, b_fc2 = create_layer([1024, 10]) # Define the readout layer (output layer)
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
y_loss = tf.placeholder(tf.float32, [None, 10]) # Define the entropy loss and the optimizer
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_loss))
optimizer = tf.train.AdamOptimizer(1e-4).minimize(loss)
predicted = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_loss, 1)) # Define the accuracy computation
accuracy = tf.reduce_mean(tf.cast(predicted, tf.float32))
sess = tf.InteractiveSession() # Create and run a session
init = tf.initialize_all_variables()
sess.run(init)
num_iterations = 21000 # Start training
batch_size = 75
print("\nTraining the model....")
for i in range(num_iterations):
batch = mnist.train.next_batch(batch_size) # Get the next batch of images
if i % 50 == 0: # Print progress
cur_accuracy = accuracy.eval(feed_dict={x: batch[0], y_loss: batch[1], keep_prob: 1.0})
print("Iteration", i, ", Accuracy =", cur_accuracy)
optimizer.run(feed_dict={x: batch[0], y_loss: batch[1], keep_prob: 0.5}) # Train on the current batch
print("Test accuracy =", accuracy.eval(feed_dict={x: mnist.test.images, y_loss: mnist.test.labels, keep_prob: 1.0})) # Compute accuracy using test data
|
[
"githubfortyuds@gmail.com"
] |
githubfortyuds@gmail.com
|
ef444176e36f44038492a44d71ac8e6aca7a16c7
|
d1c352676563b2decacfad19120001959b043f05
|
/superset/migrations/versions/c5756bec8b47_time_grain_sqla.py
|
13eb8c9b65479c0103f6fddc7aee44249354651c
|
[
"CC-BY-4.0",
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] |
permissive
|
Affirm/incubator-superset
|
c9a09a10289b4ebf8a09284a483bca93725a4b51
|
421183d3f46c48215e88e9d7d285f2dc6c7ccfe6
|
refs/heads/master
| 2023-07-06T11:34:38.538178
| 2019-05-22T23:39:01
| 2019-05-22T23:39:01
| 128,005,001
| 1
| 3
|
Apache-2.0
| 2023-03-20T19:49:14
| 2018-04-04T04:02:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,190
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Time grain SQLA
Revision ID: c5756bec8b47
Revises: e502db2af7be
Create Date: 2018-06-04 11:12:59.878742
"""
# revision identifiers, used by Alembic.
revision = 'c5756bec8b47'
down_revision = 'e502db2af7be'
from alembic import op
import json
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Text
from superset import db
Base = declarative_base()
class Slice(Base):
__tablename__ = 'slices'
id = Column(Integer, primary_key=True)
params = Column(Text)
def upgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
for slc in session.query(Slice).all():
try:
params = json.loads(slc.params)
if params.get('time_grain_sqla') == 'Time Column':
params['time_grain_sqla'] = None
slc.params = json.dumps(params, sort_keys=True)
except Exception:
pass
session.commit()
session.close()
def downgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
for slc in session.query(Slice).all():
try:
params = json.loads(slc.params)
if params.get('time_grain_sqla') is None:
params['time_grain_sqla'] = 'Time Column'
slc.params = json.dumps(params, sort_keys=True)
except Exception:
pass
session.commit()
session.close()
|
[
"noreply@github.com"
] |
Affirm.noreply@github.com
|
898ac4865128e40a00c0e6ebf4e83bf0a1cdff2e
|
43fd8b12dc1b6a2fc7cf4d9b8a80d3f1ae0fac66
|
/Visualization/11. 3D绘图函数-Points3d().py
|
b763a01b925d2dfae930f78b64abb6ea2a8b557b
|
[] |
no_license
|
gxiang666/python_file
|
e707f829b2c35e6126bea79e299333faabe76b19
|
2ee0f52d53892d193dc83c10564f7326e0bad0da
|
refs/heads/master
| 2022-12-07T04:16:29.166707
| 2019-10-25T02:59:26
| 2019-10-25T02:59:26
| 139,252,161
| 1
| 0
| null | 2022-11-22T02:38:40
| 2018-06-30T13:35:14
|
Python
|
UTF-8
|
Python
| false
| false
| 305
|
py
|
import numpy as np
from mayavi import mlab
# 建立数据
t = np.linspace(0, 4 * np.pi, 20) # 0-4pi之间均匀的20个数
x = np.sin(2 * t)
y = np.cos(t)
z = np.cos(2 * t)
s = 2 + np.sin(t)
# 对数据进行可视化
points = mlab.points3d(x, y, z, s, colormap="Greens", scale_factor=.25)
mlab.show()
|
[
"1528357474@qq.com"
] |
1528357474@qq.com
|
a48d2e017f789758b775bae34f5ef4309987a757
|
458b1133df5b38a017f3a690a624a54f0f43fda7
|
/PaperExperiments/XHExp076/parameters.py
|
6f436fa18bf3398fcd04dd8ad45ea24a61fd102c
|
[
"MIT"
] |
permissive
|
stefan-c-kremer/TE_World2
|
9c7eca30ee6200d371183c5ba32b3345a4cc04ee
|
8e1fae218af8a1eabae776deecac62192c22e0ca
|
refs/heads/master
| 2020-12-18T14:31:00.639003
| 2020-02-04T15:55:49
| 2020-02-04T15:55:49
| 235,413,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,604
|
py
|
# parameters.py
"""
Exp 76 - {'Initial_genes': '5000', 'Host_mutation_rate': '0.30', 'TE_progeny': '0.15, 0, 0.55, 1, 0.30, 2', 'TE_Insertion_Distribution': 'Flat()', 'Carrying_capacity': '300', 'TE_excision_rate': '0.5', 'Junk_BP': '1.4', 'Gene_Insertion_Distribution': 'Flat()', 'mutation_effect': '0.01', 'TE_death_rate': '0.005'}
"""
from TEUtil import *;
# note that "#" indicates a comment
# set the following to True if you want messages printed to the screen
# while the program runs - search for these keywords in TESim.py to see
# what each one prints out
output = {
"SPLAT": False,
"SPLAT FITNESS": False,
"INITIALIZATION": False,
"GENERATION": True,
"HOST EXTINCTION": True,
"TE EXTINCTION": True,
"TRIAL NO": True,
"GENE INIT": False,
"TE INIT": False,
};
TE_Insertion_Distribution = Flat();
Gene_Insertion_Distribution = Flat();
# Triangle( pmax, pzero ) generates values between pmax and pzero with
# a triangular probability distribution, where pmax is the point of highest
# probability, and pzero is the point of lowest probability
# - you can change the orientation of the triangle by reversing the values
# of pmax and pzero
# Flat() generates values between 0 and 1 with uniform probability
Gene_length = 1000; # use 1000?
TE_length = 1000; # use 1000?
TE_death_rate = 0.005;
TE_excision_rate = 0.5; # set this to zero for retro transposons
# for retro transposons this is the probability of the given number of progeny
# for dna transposons this is the probability of the given number of progeny
# ___PLUS___ the original re-inserting
TE_progeny = ProbabilityTable( 0.15, 0, 0.55, 1, 0.30, 2 );
Initial_genes = 5000;
Append_gene = True; # True: when the intialization routine tries to place
# a gene inside another gene, it instead appends it
# at the end of the original gene (use this with small
# amounts of Junk_BP).
# False: when the intialization routine tries to place
# a gene inside another gene, try to place it somewhere
# else again (don't use theis option with samll amounts
# of Junk_BP).
Initial_TEs = 1;
MILLION = 1000000;
Junk_BP = 1.4 * MILLION;
Host_start_fitness = 1.0;
Host_mutation_rate = 0.30;
Host_mutation = ProbabilityTable( 0.40, lambda fit: 0.0,
0.30, lambda fit: fit - random.random()*0.01,
0.15, lambda fit: fit,
0.15, lambda fit: fit + random.random()*0.01
);
# what happens when a TA hits a gene
Insertion_effect = ProbabilityTable(0.30, lambda fit: 0.0,
0.20, lambda fit: fit - random.random()*0.01,
0.30, lambda fit: fit,
0.20, lambda fit: fit + random.random()*0.01
);
Carrying_capacity = 300;
Host_reproduction_rate = 1; # how many offspring each host has
Host_survival_rate = lambda propfit: min( Carrying_capacity * propfit, 0.95 );
# propfit = proportion of fitness owned by this individual
Maximum_generations = 1500;
Terminate_no_TEs = True; # end simulation if there are no TEs left
# seed = 0;
seed = None; # if seed = None, the random number generator's initial state is
# set "randomly"
save_frequency = 50; # Frequency with with which to save state of experiment
saved = None; # if saved = None then we start a new simulation from scratch
# if saves = string, then we open that file and resume a simulation
|
[
"stefan@kremer.ca"
] |
stefan@kremer.ca
|
df03d7cf506c83750e564807e2c690429a10ca18
|
e0b750751b22e2d3d93c72f0947b3dd3d173ce54
|
/runtests.py
|
b59af0a0dfe8c95546d7382b46d18af15e8bdf10
|
[
"MIT"
] |
permissive
|
CryptAxe/pinax-images
|
8deac776c534a2d3ab2d66df364ab654fb02b8df
|
d041993671cdec6ed3011f6d7ffa499591e7cc9a
|
refs/heads/master
| 2021-01-17T22:27:44.849972
| 2016-02-22T14:38:13
| 2016-02-22T14:38:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,254
|
py
|
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"pinax.images",
"pinax.images.tests"
],
MIDDLEWARE_CLASSES=[],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SITE_ID=1,
ROOT_URLCONF="pinax.images.tests.urls",
SECRET_KEY="notasecret",
)
def runtests(*test_args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
try:
from django.test.runner import DiscoverRunner
runner_class = DiscoverRunner
test_args = ["pinax.images.tests"]
except ImportError:
from django.test.simple import DjangoTestSuiteRunner
runner_class = DjangoTestSuiteRunner
test_args = ["tests"]
failures = runner_class(verbosity=1, interactive=True, failfast=False).run_tests(test_args)
sys.exit(failures)
if __name__ == "__main__":
runtests(*sys.argv[1:])
|
[
"paltman@gmail.com"
] |
paltman@gmail.com
|
ecacc3a564ffcaf31f442d38ff3d813c6c585b53
|
73d8089381b92b1965e2ac2a5f7c80d9293af2f5
|
/main.py
|
6a0a75f799a93f53747575c949df0934a27cee92
|
[] |
no_license
|
Fufuhu/python_movie_transcoder
|
b92e420bc0433676863f57c154cc2a7f3d939384
|
ddce7f230c52ff532a74b1e1f5b0a5d16fa7bbaf
|
refs/heads/master
| 2021-01-20T04:50:15.511316
| 2017-04-29T14:40:44
| 2017-04-29T14:40:44
| 89,738,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
import sys
from python_movie_transformer.ffmpeg.image_file import ImageFile
from python_movie_transformer.ffmpeg.options import ImageFileOptions
from python_movie_transformer.ffmpeg.manipulater import FFmpegManipulater
in_file = sys.argv[1]
out_file = sys.argv[2]
out_file_options = ImageFileOptions()
out_file_options.set_scale(width=640)
in_file_image = ImageFile(file_name=in_file)
out_file_image = ImageFile(file_name=out_file, options=out_file_options)
manipulater = FFmpegManipulater(input_file = in_file_image, output_file=out_file_image)
manipulater.manipulate()
|
[
"e238058r@hotmail.co.jp"
] |
e238058r@hotmail.co.jp
|
55bc19097227723308c5974a0b3429268e833458
|
03bf031efc1f171f0bb3cf8a565d7199ff073f96
|
/apps/splash/utils.py
|
55a7485240634ae9178da8eacef57efa57c85d31
|
[
"MIT"
] |
permissive
|
emilps/onlineweb4
|
a213175678ac76b1fbede9b0897c538c435a97e2
|
6f4aca2a4522698366ecdc6ab63c807ce5df2a96
|
refs/heads/develop
| 2020-03-30T01:11:46.941170
| 2019-05-10T19:49:21
| 2019-05-10T19:49:21
| 150,564,330
| 0
| 0
|
MIT
| 2019-05-10T19:49:22
| 2018-09-27T09:43:32
|
Python
|
UTF-8
|
Python
| false
| false
| 699
|
py
|
import icalendar
from django.utils import timezone
from apps.events.utils import Calendar
from apps.splash.models import SplashEvent
class SplashCalendar(Calendar):
def add_event(self, event):
cal_event = icalendar.Event()
cal_event.add('dtstart', event.start_time)
cal_event.add('dtend', event.end_time)
cal_event.add('summary', event.title)
cal_event.add('description', event.content)
cal_event.add('uid', 'splash-' + str(event.id) + '@online.ntnu.no')
self.cal.add_component(cal_event)
def events(self):
self.add_events(SplashEvent.objects.filter(start_time__year=timezone.now().year))
self.filename = 'events'
|
[
"hlsolbjorg@gmail.com"
] |
hlsolbjorg@gmail.com
|
bbb5385859b86e8e0d16e6fb2b6b59981333724f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_binaries.py
|
57ef5dd6379ebd43cd7d9aa73601d47f274634a6
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
#calss header
class _BINARIES():
def __init__(self,):
self.name = "BINARIES"
self.definitions = binary
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['binary']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
c39122b83132e528602a47b61c9c2da7e5a93bbb
|
302442c32bacca6cde69184d3f2d7529361e4f3c
|
/servidores/servidor_7-nao-comitados/code2py/migra_cass_1.py
|
483f945ca021c1c77c6c6c22f65b373840ff8c82
|
[] |
no_license
|
fucknoob/WebSemantic
|
580b85563072b1c9cc1fc8755f4b09dda5a14b03
|
f2b4584a994e00e76caccce167eb04ea61afa3e0
|
refs/heads/master
| 2021-01-19T09:41:59.135927
| 2015-02-07T02:11:23
| 2015-02-07T02:11:23
| 30,441,659
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,202
|
py
|
import pycassa
from pycassa.pool import ConnectionPool
from pycassa import index
from pycassa.columnfamily import ColumnFamily
pool1 = ConnectionPool('MINDNET', ['localhost:9160'],timeout=1000000)
pool2 = ConnectionPool('MINDNET', ['213.136.81.102:9160'],timeout=1000000)
def migr(tab1,tab2,tb):
#r1=tab1.get_range()
#tab2.truncate()
ind=0
while True:
cach=[]
r1=tab1.get_range()
for ky,col in r1:
cach.append([ky,col])
if len(cach) %1000==0:
print 'collect(',tb,'):',len(cach)
if len(cach) >= 500000:
break
if len(cach) == 0: break
b1 = tab2.batch(55000)
b2 = tab1.batch(55000)
indc=0
for ky,col in cach:
tab2.insert(ky,col)
tab1.remove(ky)
indc+=1
if indc % 50000==0:
b1.send()
b2.send()
b1 = tab2.batch(55000)
b2 = tab1.batch(55000)
print tb,'->',ind
b1.send()
b2.send()
print tb,'->',ind
web_cache10_1 = pycassa.ColumnFamily(pool1, 'web_cache10')
web_cache10_2 = pycassa.ColumnFamily(pool2, 'web_cache10')
migr(web_cache10_1,web_cache10_2,'web_cache10')
fz_store_sufix_1 = pycassa.ColumnFamily(pool1, 'fz_store_sufix')
fz_store_sufix_2 = pycassa.ColumnFamily(pool2, 'fz_store_sufix')
#migr(fz_store_sufix_1,fz_store_sufix_2,'fz_store_sufix')
SEMANTIC_RELACTIONS_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_RELACTIONS')
SEMANTIC_RELACTIONS_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_RELACTIONS')
#migr(SEMANTIC_RELACTIONS_1,SEMANTIC_RELACTIONS_2,'semantic_relactions')
SEMANTIC_OBJECT_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_OBJECT')
SEMANTIC_OBJECT_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT')
#migr(SEMANTIC_OBJECT_1,SEMANTIC_OBJECT_2,'semantic_object')
#
fz_store_defs_1 = pycassa.ColumnFamily(pool1, 'fz_store_defs')
fz_store_defs_2 = pycassa.ColumnFamily(pool2, 'fz_store_defs')
#migr(fz_store_defs_1,fz_store_defs_2,'fz_store_defs')
SEMANTIC_RELACTIONS3_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_RELACTIONS3')
SEMANTIC_RELACTIONS3_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_RELACTIONS3')
#migr(SEMANTIC_RELACTIONS3_1,SEMANTIC_RELACTIONS3_2,'semantic_relactions3')
knowledge_manager_1 = pycassa.ColumnFamily(pool1, 'knowledge_manager')
knowledge_manager_2 = pycassa.ColumnFamily(pool2, 'knowledge_manager')
#migr(knowledge_manager_1,knowledge_manager_2,'kwnolegde_manager')
SEMANTIC_OBJECT3_1_4_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_OBJECT3_1_4')
SEMANTIC_OBJECT3_1_4_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT3_1_4')
#migr(SEMANTIC_OBJECT3_1_4_1,SEMANTIC_OBJECT3_1_4_2,'semantic_object3_1_4')
web_cache3_1 = pycassa.ColumnFamily(pool1, 'web_cache3')
web_cache3_2 = pycassa.ColumnFamily(pool2, 'web_cache3')
#migr(web_cache3_1,web_cache3_2,'web_cache3')
fcb_users1_1 = pycassa.ColumnFamily(pool1, 'fcb_users1')
fcb_users1_2 = pycassa.ColumnFamily(pool2, 'fcb_users1')
#migr(fcb_users1_1,fcb_users1_2,'fcb_users1')
fz_store_refer_1 = pycassa.ColumnFamily(pool1, 'fz_store_refer')
fz_store_refer_2 = pycassa.ColumnFamily(pool2, 'fz_store_refer')
#migr(fz_store_refer_1,fz_store_refer_2,'fz_store_refer')
DATA_BEHAVIOUR_CODE_PY_1 = pycassa.ColumnFamily(pool1, 'DATA_BEHAVIOUR_CODE_PY')
DATA_BEHAVIOUR_CODE_PY_2 = pycassa.ColumnFamily(pool2, 'DATA_BEHAVIOUR_CODE_PY')
#migr(DATA_BEHAVIOUR_CODE_PY_1,DATA_BEHAVIOUR_CODE_PY_2,'data_behaviour_code_py')
SEMANTIC_OBJECT_DT_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_OBJECT_DT')
SEMANTIC_OBJECT_DT_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT_DT')
#migr(SEMANTIC_OBJECT_DT_1,SEMANTIC_OBJECT_DT_2,'semantic_object_dt')
SEMANTIC_OBJECT3_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_OBJECT3')
SEMANTIC_OBJECT3_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT3')
#migr(SEMANTIC_OBJECT3_1,SEMANTIC_OBJECT3_2,'semantic_object3')
to_posting_1 = pycassa.ColumnFamily(pool1, 'to_posting')
to_posting_2 = pycassa.ColumnFamily(pool2, 'to_posting')
#migr(to_posting_1,to_posting_2,'to_posting')
SEMANTIC_RELACTIONS3_1_4_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_RELACTIONS3_1_4')
SEMANTIC_RELACTIONS3_1_4_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_RELACTIONS3_1_4')
#migr(SEMANTIC_RELACTIONS3_1_4_1,SEMANTIC_RELACTIONS3_1_4_2,'semantic_relactions3_1_4')
SEMANTIC_OBJECT_DT3_1_4_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_OBJECT_DT3_1_4')
SEMANTIC_OBJECT_DT3_1_4_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT_DT3_1_4')
#migr(SEMANTIC_OBJECT_DT3_1_4_1,SEMANTIC_OBJECT_DT3_1_4_2,'semantic_object_dt3_1_4')
fuzzy_store_1 = pycassa.ColumnFamily(pool1, 'fuzzy_store')
fuzzy_store_2 = pycassa.ColumnFamily(pool2, 'fuzzy_store')
#migr(fuzzy_store_1,fuzzy_store_2,'fuzzy_store')
cache_products_1 = pycassa.ColumnFamily(pool1, 'cache_products')
cache_products_2 = pycassa.ColumnFamily(pool2, 'cache_products')
#migr(cache_products_1,cache_products_2,'cache_products')
cache_links_1 = pycassa.ColumnFamily(pool1, 'cache_links')
cache_links_2 = pycassa.ColumnFamily(pool2, 'cache_links')
#migr(cache_links_1,cache_links_2,'cache_links')
DATA_BEHAVIOUR_PY_1 = pycassa.ColumnFamily(pool1, 'DATA_BEHAVIOUR_PY')
DATA_BEHAVIOUR_PY_2 = pycassa.ColumnFamily(pool2, 'DATA_BEHAVIOUR_PY')
#migr(DATA_BEHAVIOUR_PY_1,DATA_BEHAVIOUR_PY_2,'data_behaviour_py')
SEMANTIC_OBJECT_DT3_1 = pycassa.ColumnFamily(pool1, 'SEMANTIC_OBJECT_DT3')
SEMANTIC_OBJECT_DT3_2 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT_DT3')
#migr(SEMANTIC_OBJECT_DT3_1,SEMANTIC_OBJECT_DT3_2,'semantic_object_dt3')
to_posting2_1 = pycassa.ColumnFamily(pool1, 'to_posting2')
to_posting2_2 = pycassa.ColumnFamily(pool2, 'to_posting2')
#migr(to_posting2_1,to_posting2_2,'to_posting2')
fz_store_pref_1 = pycassa.ColumnFamily(pool1, 'fz_store_pref')
fz_store_pref_2 = pycassa.ColumnFamily(pool2, 'fz_store_pref')
#migr(fz_store_pref_1,fz_store_pref_2,'fz_store_pref')
web_cache1_1 = pycassa.ColumnFamily(pool1, 'web_cache1')
web_cache1_2 = pycassa.ColumnFamily(pool2, 'web_cache1')
#migr(web_cache1_1,web_cache1_2,'web_cache1')
fz_arround_points_1 = pycassa.ColumnFamily(pool1, 'fz_arround_points')
fz_arround_points_2 = pycassa.ColumnFamily(pool2, 'fz_arround_points')
#migr(fz_arround_points_1,fz_arround_points_2,'fz_arround_points')
|
[
"learnfuzzy@gmail.com"
] |
learnfuzzy@gmail.com
|
41184d6766f09a040692ad47e6ee2ba8729e2760
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02842/s678047490.py
|
06867a1e60f0fc09c8a80429d6de00e18537b2ab
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
N = int(input())
n = int(N/1.08)
while int(n*1.08) <= N:
if int(n*1.08) == N:
print(n)
break
else:
n += 1
else:
print(":(")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
54dcbee3deeff946bab62193875ecf7f2be7928b
|
eddb3dfb5e1a0a3e58254f285c3700b45dce76d9
|
/mountaintools/mlprocessors/registry.py
|
959fb39fb2b78e11dd4457f64337724e13bb1063
|
[
"Apache-2.0"
] |
permissive
|
tjd2002/spikeforest2
|
f2281a8d3103b3fbdd85829c176819a5e6d310d0
|
2e393564b858b2995aa2ccccd9bd73065681b5de
|
refs/heads/master
| 2020-04-25T07:55:19.997810
| 2019-02-26T01:19:23
| 2019-02-26T01:19:23
| 172,628,686
| 0
| 0
|
Apache-2.0
| 2019-02-26T03:11:27
| 2019-02-26T03:11:26
| null |
UTF-8
|
Python
| false
| false
| 4,238
|
py
|
#!/usr/bin/env python
import json
import os
import traceback
import argparse
from .execute import execute
import types
class ProcessorRegistry:
def __init__(self, processors=[], namespace=None):
self.processors = processors
self.namespace = namespace
if namespace:
for proc in self.processors:
proc.NAME = "{}.{}".format(namespace, proc.NAME)
proc.NAMESPACE = namespace
def spec(self):
s = {}
s['processors'] = [cls.spec() for cls in self.processors]
return s
def find(self, **kwargs):
for P in self.processors:
for key in kwargs:
if not hasattr(P, key):
continue
if getattr(P, key) != kwargs[key]:
continue
return P
def get_processor_by_name(self, name):
return self.find(NAME=name)
def test(self, args, **kwargs):
procname = args[0]
proc = self.find(NAME=procname)
if not proc:
raise KeyError("Unable to find processor %s" % procname)
if not hasattr(proc, 'test') or not callable(proc.test):
raise AttributeError("No test function defined for %s" % proc.NAME)
print("----------------------------------------------")
print("Testing", proc.NAME)
try:
result = proc.test()
print("SUCCESS" if result else "FAILURE")
except Exception as e:
print("FAILURE:", e)
if kwargs.get('trace', False):
traceback.print_exc()
finally:
print("----------------------------------------------")
def process(self, args):
parser = argparse.ArgumentParser(prog=args[0])
subparsers = parser.add_subparsers(dest='command', help='main help')
parser_spec = subparsers.add_parser(
'spec', help='Print processor specs')
parser_spec.add_argument('processor', nargs='?')
parser_test = subparsers.add_parser('test', help='Run processor tests')
parser_test.add_argument('processor')
parser_test.add_argument('args', nargs=argparse.REMAINDER)
for proc in self.processors:
proc.invoke_parser(subparsers)
opts = parser.parse_args(args[1:])
opcode = opts.command
if not opcode:
parser.print_usage()
return
if opcode == 'spec':
if opts.processor:
try:
proc = self.get_processor_by_name(opts.processor)
print(json.dumps(proc.spec(), sort_keys=True, indent=4))
except:
print("Processor {} not found".format(opts.processor))
return
print(json.dumps(self.spec(), sort_keys=True, indent=4))
return
if opcode == 'test':
try:
self.test([opts.processor]+opts.args, trace=os.getenv('TRACEBACK',
False) not in ['0', 0, 'False', 'F', False])
except KeyError as e:
# taking __str__ from Base to prevent adding quotes to KeyError
print(BaseException.__str__(e))
except Exception as e:
print(e)
finally:
return
if opcode in [x.NAME for x in self.processors]:
try:
self.invoke(self.get_processor_by_name(opcode), args[2:])
except:
import sys
sys.exit(-1)
else:
print("Processor {} not found".format(opcode))
def invoke(self, proc, args):
return proc.invoke(args)
def register(self, proc):
if self.namespace and not proc.NAMESPACE:
proc.NAME = "{}.{}".format(self.namespace, proc.NAME)
proc.NAMESPACE = self.namespace
self.processors.append(proc)
def register_processor(registry):
def decor(cls):
cls = mlprocessor(cls)
registry.register(cls)
return cls
return decor
def mlprocessor(cls):
cls.execute = types.MethodType(execute, cls)
return cls
registry = ProcessorRegistry()
|
[
"jeremy.magland@gmail.com"
] |
jeremy.magland@gmail.com
|
9df227dc90f6ac68fe20815bb42614b23252771e
|
42cc27460f455808e251148cdbf672b04d468156
|
/maya/rbRrSubmit/rbRrSubmit.py
|
4d5208459d1a393a0bacb22e8a8eb8a5bdc18192
|
[] |
no_license
|
cgguo/rugbybugs
|
dc5f76a48bb0315ae336e8192cdad5d13b087f65
|
df66aa2e3a8e38a34224627a7222d6854aa1597f
|
refs/heads/master
| 2020-12-25T05:03:11.905437
| 2015-05-26T18:48:46
| 2015-05-26T18:48:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,597
|
py
|
#rbRrSubmit Module
#------------------------------------------------------------------
'''
Description:
Saves and submits the current scene to the rrSubmitter
'''
'''
ToDo:
'''
#Imports
#------------------------------------------------------------------
import os, sys
import maya.cmds as cmds
import pymel.core as pm
#RbRrSubmit class
#------------------------------------------------------------------
class RbRrSubmit():
#Constructor / Main Procedure
def __init__(self):
#Instance Vars
#------------------------------------------------------------------
self.verbose = True
#Methods
#------------------------------------------------------------------
#saveAndSubmitToRr
@staticmethod
def saveAndSubmitToRr(*args, **kwargs):
try:
# Get the current scene name.
curName, curExt = os.path.splitext(cmds.file(query=True, sceneName=True))
# Determine the file type.
if(curExt == ".ma"): curType = "mayaAscii"
if(curExt == ".mb"): curType = "mayaBinary"
#save file
cmds.file(f=True, type=curType, save=True)
#Check if animation in Render settings is on, otherwise print warning
if(pm.getAttr('defaultRenderGlobals.animation') == 0):
print('No Animation specified in Renderglobals. RRSubmitter will not open file to get settings')
#print to output window
sys.__stdout__.write('No Animation specified in Renderglobals. RRSubmitter will not open file to get settings \n')
#get rrSubmiterDir
rrSubmiterDir = os.environ['RR_Root']
#get scenePath
scenePath = cmds.file(q = True, sceneName = True)
#Check if scene path true, if so start submit
if (scenePath):
if ((sys.platform.lower() == "win32") or (sys.platform.lower() == "win64")):
os.system(rrSubmiterDir+"\\win__rrSubmitter.bat \""+scenePath+"\"")
elif (sys.platform.lower() == "darwin"):
os.system(rrSubmiterDir+"/bin/mac/rrSubmitter.app/Contents/MacOS/rrSubmitter \""+scenePath+"\"")
else:
os.system(rrSubmiterDir+"/lx__rrSubmitter.sh \""+scenePath+"\"")
print('Successfully submited scene to RRSubmitter')
except:
print('Error submitting scene to RRSubmitter')
#Shared Methods
#------------------------------------------------------------------
#Execute TMP
#------------------------------------------------------------------
'''
from rugbyBugs.maya.rbRrSubmit import rbRrSubmit
reload(rbRrSubmit)
rbRrSubmit.RbRrSubmit.saveAndSubmitToRr()
RbRrSubmitInstance = rbRrSubmit.RbRrSubmit()
RbRrSubmitInstance.saveAndSubmitToRr()
'''
|
[
"wagenertimm@gmail.com"
] |
wagenertimm@gmail.com
|
eb3ad9c31d3ebfe873ad9cae4a6722addf5dd306
|
946111147d7e3c2e9bc75f41e2c1fccaa365ae2d
|
/EdgeDetector.py
|
cfa70d7d305fe923db485f288522b479b83e557c
|
[
"MIT"
] |
permissive
|
Sid2697/Image-Processing
|
3c779c2be82e430f2f207ef2fc9d134dc0400196
|
d25628d9b90e238b1df0881ec55359c41692ebbb
|
refs/heads/master
| 2021-04-29T17:45:35.239800
| 2018-02-15T20:23:34
| 2018-02-15T20:23:34
| 121,676,811
| 2
| 0
|
MIT
| 2018-02-15T20:23:35
| 2018-02-15T20:06:34
|
Python
|
UTF-8
|
Python
| false
| false
| 284
|
py
|
import cv2
import numpy as np
cap=cv2.VideoCapture(0)
while True:
_,frame = cap.read()
Black=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
edges=cv2.Canny(frame,100,130)
cv2.imshow('Edges',edges)
if cv2.waitKey(2) & 0xFF == ord(' '):
break
cv2.destroyAllWindows()
cap.release()
|
[
"noreply@github.com"
] |
Sid2697.noreply@github.com
|
f965f55a5bd74cc12296683f04052d1b179291c4
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_jostles.py
|
e3ce9b04d14de25bb110e8ff39d95f207397bd36
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
#calss header
class _JOSTLES():
def __init__(self,):
self.name = "JOSTLES"
self.definitions = jostle
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['jostle']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
61f5914638bc300b27e0e1bbddc6ba9a4dfdcc4a
|
967de4753954f8a7988446c9edc5fbb14e3013a5
|
/conf/wsgi.py
|
b98642582f06de1bf7b21bac19ed12f6c73a3ef8
|
[
"MIT"
] |
permissive
|
uktrade/directory-ui-verification
|
20e199c9f4da180d82328a26f306f382736f10e1
|
e95b0e51c23ac2b79c8fab8b40cbc30808e3ea47
|
refs/heads/master
| 2020-03-18T11:34:07.562385
| 2018-06-18T11:13:53
| 2018-06-18T11:13:53
| 134,679,321
| 0
| 0
|
MIT
| 2018-06-18T11:13:54
| 2018-05-24T07:39:17
|
Python
|
UTF-8
|
Python
| false
| false
| 494
|
py
|
"""
WSGI config for directory-verification project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "conf.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
[
"rikatee@gmail.com"
] |
rikatee@gmail.com
|
a2b3aac32895d4ae785523f896686c3d758b2889
|
9f9f4280a02f451776ea08365a3f119448025c25
|
/plans/hsppw/qcut_hsp-l_005_pwcc_linear_hs.py
|
77e7cf7c8225054e48851cd9a3b74fcb28338848
|
[
"BSD-2-Clause"
] |
permissive
|
dbis-uibk/hit-prediction-code
|
6b7effb2313d2499f49b2b14dd95ae7545299291
|
c95be2cdedfcd5d5c27d0186f4c801d9be475389
|
refs/heads/master
| 2023-02-04T16:07:24.118915
| 2022-09-22T12:49:50
| 2022-09-22T12:49:50
| 226,829,436
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,070
|
py
|
"""Plan using all features."""
import os.path
from dbispipeline.evaluators import CvEpochEvaluator
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
import hit_prediction_code.common as common
from hit_prediction_code.dataloaders import ClassLoaderWrapper
from hit_prediction_code.dataloaders import EssentiaLoader
from hit_prediction_code.dataloaders import QcutLoaderWrapper
import hit_prediction_code.evaluations as evaluations
from hit_prediction_code.models.pairwise import PairwiseOrdinalModel
from hit_prediction_code.result_handlers import print_results_as_json
from hit_prediction_code.transformers.label import compute_hit_score_on_df
PATH_PREFIX = 'data/hit_song_prediction_msd_bb_lfm_ab/processed'
number_of_classes = 5
dataloader = ClassLoaderWrapper(
wrapped_loader=QcutLoaderWrapper(
wrapped_loader=EssentiaLoader(
dataset_path=os.path.join(
PATH_PREFIX,
'hsp-l_acousticbrainz.parquet',
),
features=[
*common.all_no_year_list(),
],
label='yang_hit_score',
nan_value=0,
data_modifier=lambda df: compute_hit_score_on_df(
df,
pc_column='lastfm_playcount',
lc_column='lastfm_listener_count',
hit_score_column='yang_hit_score',
),
),
number_of_bins=number_of_classes,
),
labels=list(range(number_of_classes)),
)
pipeline = Pipeline([
('scale', MinMaxScaler()),
('model',
PairwiseOrdinalModel(
wrapped_model=LinearRegression(),
pairs_factor=3.,
threshold_type='average',
pair_strategy='random',
pair_encoding='concat',
threshold_sample_training=False,
)),
])
evaluator = CvEpochEvaluator(
cv=evaluations.cv(),
scoring=evaluations.metrics.ordinal_classifier_scoring(),
scoring_step_size=1,
)
result_handlers = [
print_results_as_json,
]
|
[
"mikevo-uibk@famv.net"
] |
mikevo-uibk@famv.net
|
849c7944c6f42de4793f349e57f2d1419d86a881
|
7361493342853a2bd9a3225eb71819c3cfd39985
|
/python-numpy-to-cnn/Momentum.py
|
7989051e0e6a566f7ee4e8fd25fac6a1921d6eef
|
[] |
no_license
|
brightparagon/learn-machine-learning
|
234df2c1298f9d0a34b0db010d9f870f97f1b867
|
3e6fe095d416317b97827615dbb7aa538261d117
|
refs/heads/master
| 2021-05-01T10:45:55.661458
| 2018-05-26T15:54:45
| 2018-05-26T15:54:45
| 121,107,145
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
import numpy as np
class Momentum:
def __init__(self, lr=0.01, momentum=0.9):
self.lr = lr
self.momentum = momentum
self.v = None
def update(self, params, grads):
if self.v is None:
self.v = {}
for key, val in params.items():
self.v[key] = np.zeros_like(val)
for key in params.key():
self.v[key] = self.momentum*self.v[key] - self.lr*grads[key]
params[key] += self.v[key]
|
[
"kyeongmo2@gmail.com"
] |
kyeongmo2@gmail.com
|
e91e8d0a0d428aa8bb747635a2c022adadbd95bc
|
9ac405635f3ac9332e02d0c7803df757417b7fee
|
/cotizaciones/migrations/0076_cotizacionpagoproyectadoacuerdopago_motivo.py
|
d72a7ebf5cd297d779fd188093e3930c7e98020e
|
[] |
no_license
|
odecsarrollo/07_intranet_proyectos
|
80af5de8da5faeb40807dd7df3a4f55f432ff4c0
|
524aeebb140bda9b1bf7a09b60e54a02f56fec9f
|
refs/heads/master
| 2023-01-08T04:59:57.617626
| 2020-09-25T18:01:09
| 2020-09-25T18:01:09
| 187,250,667
| 0
| 0
| null | 2022-12-30T09:36:37
| 2019-05-17T16:41:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 475
|
py
|
# Generated by Django 2.2.6 on 2020-07-29 19:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cotizaciones', '0075_auto_20200729_1345'),
]
operations = [
migrations.AddField(
model_name='cotizacionpagoproyectadoacuerdopago',
name='motivo',
field=models.CharField(default='Falloooo', max_length=100),
preserve_default=False,
),
]
|
[
"fabio.garcia.sanchez@gmail.com"
] |
fabio.garcia.sanchez@gmail.com
|
def10769c95cfd6a081ef74f1f9c3574746eb34b
|
a4830a0189c325c35c9021479a5958ec870a2e8b
|
/routing/signals.py
|
c84cfbc1a8c76acc691ca275dec3a812c4b8c014
|
[] |
no_license
|
solutionprovider9174/steward
|
044c7d299a625108824c854839ac41f51d2ca3fd
|
fd681593a9d2d339aab0f6f3688412d71cd2ae32
|
refs/heads/master
| 2022-12-11T06:45:04.544838
| 2020-08-21T02:56:55
| 2020-08-21T02:56:55
| 289,162,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
# Python
import datetime
# Django
from django.db.models.signals import post_save
from django.dispatch import receiver
# Application
from routing import models
@receiver(post_save, sender=models.Record)
def route_post_save(sender, **kwargs):
instance = kwargs['instance']
instance.route.numbers.all().update(modified=datetime.datetime.now())
print(instance)
|
[
"guangchengwang9174@yandex.com"
] |
guangchengwang9174@yandex.com
|
06565d34f279d486d103d05a7560479d58d6a764
|
45eb50864138759adbcc7f8d9742c9f6c8102604
|
/remind_me/services/ping.py
|
1903aa1947bed8b5c31a86dd4b5ce54fa8521f32
|
[] |
no_license
|
bbelderbos/remind_me
|
bc8df4b24f701bb96edf336f9b310ee43dbbd9b4
|
d432d4fb9632aa9531ee6e101f80de233d97ce56
|
refs/heads/master
| 2023-08-29T06:39:08.127091
| 2021-04-28T17:51:44
| 2021-04-28T17:51:44
| 409,145,017
| 0
| 0
| null | 2021-09-22T09:40:07
| 2021-09-22T09:40:06
| null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
from datetime import datetime, timedelta
import requests
from apscheduler.schedulers.background import BackgroundScheduler
from dateutil.parser import parse
from remind_me.data import db_session
from remind_me.data.events import Events
from remind_me.schedule_jobs import timezones
from remind_me.sms import send
scheduler = BackgroundScheduler()
@scheduler.scheduled_job('interval', minutes=20)
def ping_site() -> None:
session = db_session.create_session()
all_events = session.query(Events).filter(Events.sent == False).all()
print(all_events)
for ev in all_events:
event_time = parse(ev.date_and_time) + timedelta(hours=timezones[ev.timezone])
current_time = datetime.now()
if current_time > event_time:
ev.sent = True
session.commit()
send(ev.event, ev.phone_number, ev.carrier)
response = requests.get('https://desolate-garden-98632.herokuapp.com/')
print(response.status_code)
session.close()
def make_pings():
scheduler.start()
print(scheduler.get_jobs())
|
[
"jbrink0925@gmail.com"
] |
jbrink0925@gmail.com
|
5647ac16de076973951bdb0f0c028435874b9b27
|
5b40c6df03e477f3109fda9dc9b15a347df6c2ca
|
/ch04_bayes/monoDemo.py
|
bbd883492c4566a90337f9ee56d6ff09de3171b9
|
[] |
no_license
|
yphacker/machinelearninginaction
|
e40dfebd4634fd8fa48473c497ce5c9184cd6513
|
886a86e0cb1f5e61828774d4337cd08d2b2c54ed
|
refs/heads/master
| 2020-03-28T02:09:38.090126
| 2019-12-06T11:54:25
| 2019-12-06T11:54:25
| 147,551,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
# coding=utf-8
# author=yphacker
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
t = arange(0.0, 0.5, 0.01)
s = sin(2*pi*t)
logS = log(s)
fig = plt.figure()
ax = fig.add_subplot(211)
ax.plot(t,s)
ax.set_ylabel('f(x)')
ax.set_xlabel('x')
ax = fig.add_subplot(212)
ax.plot(t,logS)
ax.set_ylabel('ln(f(x))')
ax.set_xlabel('x')
plt.show()
|
[
"yphacker@163.com"
] |
yphacker@163.com
|
a1606ca7c6c07c8ac5fa8713cd5d9e1a21a4b4d0
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/96/usersdata/218/52922/submittedfiles/estatistica.py
|
ee08c21d7372d72bf781c64fa80059a6fcfa769b
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
# -*- coding: utf-8 -*-
def media(lista):
soma = 0
for i in range(0,len(lista),1):
soma = soma + lista[i]
resultado = soma/len(lista)
return resultado
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
def desvio(lista):
soma=0
for i in range(0,len(lista),1):
soma=soma+(lista[i]-media(lista))**2
d=(soma/(len(lista)-1))**0,5
return d
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas.
n=int(input('escreva o numero de elementos das listas:'))
a=[]
for i in range(0,n,1):
valor=float(input('digite o numero a ser anexado a lista:'))
a.append(valor)
b=[]
for i in range(0,n,1):
valor=float(input('digite o numero a ser anexado a segunda lista:'))
b.append(valor)
ma=media(a)
print('%.2f'%ma)
dsva=desvio(a)
print('%.2f'% dsva)
mb=media(b)
print('%.2f'%mb)
dsvb=desvio(b)
print('%.2f'%dsvb)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
fd892f61cc3cebc966b6d92a58fa0b4f1b3e556f
|
a857d1911a118b8aa62ffeaa8f154c8325cdc939
|
/toontown/safezone/DistributedDLTreasure.py
|
1eebfe55516411c19e8b8c7a936f73b8fe62096a
|
[
"MIT"
] |
permissive
|
DioExtreme/TT-CL-Edition
|
761d3463c829ec51f6bd2818a28b667c670c44b6
|
6b85ca8352a57e11f89337e1c381754d45af02ea
|
refs/heads/main
| 2023-06-01T16:37:49.924935
| 2021-06-24T02:25:22
| 2021-06-24T02:25:22
| 379,310,849
| 0
| 0
|
MIT
| 2021-06-22T15:07:31
| 2021-06-22T15:07:30
| null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
import DistributedSZTreasure
class DistributedDLTreasure(DistributedSZTreasure.DistributedSZTreasure):
def __init__(self, cr):
DistributedSZTreasure.DistributedSZTreasure.__init__(self, cr)
self.modelPath = 'phase_8/models/props/zzz_treasure'
self.grabSoundPath = 'phase_4/audio/sfx/SZ_DD_treasure.ogg'
|
[
"devinhall4@gmail.com"
] |
devinhall4@gmail.com
|
1a8c7ee320b8aa83c9b60017f7c089b22d17f1f6
|
70fa6468c768d4ec9b4b14fc94fa785da557f1b5
|
/lib/surface/iam/service_accounts/get_iam_policy.py
|
b17087b6af89786547df27eda790cbdb2de1bd61
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
kylewuolle/google-cloud-sdk
|
d43286ef646aec053ecd7eb58566ab2075e04e76
|
75f09ebe779e99fdc3fd13b48621fe12bfaa11aa
|
refs/heads/master
| 2020-04-20T22:10:41.774132
| 2019-01-26T09:29:26
| 2019-01-26T09:29:26
| 169,131,028
| 0
| 0
|
NOASSERTION
| 2019-02-04T19:04:40
| 2019-02-04T18:58:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for getting IAM policies for service accounts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.api_lib.iam import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iam import iam_util
class GetIamPolicy(base.ListCommand):
"""Get the IAM policy for a service account.
This command gets the IAM policy for a service account. If formatted as
JSON, the output can be edited and used as a policy file for
set-iam-policy. The output includes an "etag" field identifying the version
emitted and allowing detection of concurrent policy updates; see
$ gcloud iam service-accounts set-iam-policy for additional details.
"""
detailed_help = {
'EXAMPLES': textwrap.dedent("""\
To print the IAM policy for a given service account, run:
$ {command} my-iam-account@somedomain.com
"""),
'DESCRIPTION': '\n\n'.join([
'{description}',
iam_util.GetHintForServiceAccountResource('get the iam policy of')])
}
@staticmethod
def Args(parser):
iam_util.AddServiceAccountNameArg(
parser,
action='whose policy to get')
base.URI_FLAG.RemoveFromParser(parser)
def Run(self, args):
client, messages = util.GetClientAndMessages()
return client.projects_serviceAccounts.GetIamPolicy(
messages.IamProjectsServiceAccountsGetIamPolicyRequest(
resource=iam_util.EmailToAccountResourceName(args.service_account)))
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
cc15763bac4ad87f4375500cf02cf860c4a57dec
|
ecd4b06d5d5368b71fd72a1c2191510a03b728fd
|
/8 - statistical thinking in python - part 1/quantitative exploratory data analysis/computing the ECDF.py
|
2ed8e62c2f0144dda23cca03c57322c98d0e5f72
|
[
"MIT"
] |
permissive
|
Baidaly/datacamp-samples
|
86055db5e326b59bfdce732729c80d76bf44629e
|
37b4f78a967a429e0abca4a568da0eb9d58e4dff
|
refs/heads/master
| 2022-07-27T01:18:00.700386
| 2022-07-18T19:27:23
| 2022-07-18T19:27:23
| 123,827,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
'''
In this exercise, you will write a function that takes as input a 1D array of data and then returns the x and y values of the ECDF. You will use this function over and over again throughout this course and its sequel. ECDFs are among the most important plots in statistical analysis. You can write your own function, foo(x,y) according to the following skeleton:
def foo(a,b):
"""State what function does here"""
# Computation performed here
return x, y
The function foo() above takes two arguments a and b and returns two values x and y. The function header def foo(a,b): contains the function signature foo(a,b), which consists of the function name, along with its parameters. For more on writing your own functions, see DataCamp's course Python Data Science Toolbox (Part 1) here!
'''
def ecdf(data):
"""Compute ECDF for a one-dimensional array of measurements."""
# Number of data points: n
n = len(data)
# x-data for the ECDF: x
x = np.sort(data)
# y-data for the ECDF: y
y = np.arange(1, n + 1) / n
return x, y
|
[
"daulet.urazalinov@uptake.com"
] |
daulet.urazalinov@uptake.com
|
990da7948f16010dbffcd011ca083b1ea177b02e
|
fed6c6bdb6276d195bc565e527c3f19369d22b74
|
/test/multi_shear.py
|
39dd5a7acffb39105c56fd9dc77b27df6c74ef91
|
[] |
no_license
|
hekunlie/astrophy-research
|
edbe12d8dde83e0896e982f08b463fdcd3279bab
|
7b2b7ada7e7421585e8993192f6111282c9cbb38
|
refs/heads/master
| 2021-11-15T05:08:51.271669
| 2021-11-13T08:53:33
| 2021-11-13T08:53:33
| 85,927,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,327
|
py
|
import numpy
import os
from sys import path,argv
path.append("E:/Github/astrophy-research/my_lib")
from Fourier_Quad import Fourier_Quad
import tool_box
import time
from astropy.io import fits
import matplotlib.pyplot as plt
import h5py
rng = numpy.random.RandomState(123)
fq = Fourier_Quad(12,122)
bin_num = 20
gh_num = 150
gh = numpy.linspace(-0.07, 0.07, gh_num)
signals = [0.05, -0.05]
sigmas = [2, 2]
nums = [50*1.e4, 50*1.e4]
datas = [rng.normal(signals[i], sigmas[i], int(nums[i])).reshape((int(nums[i]),1)) for i in range(len(signals))]
for i in range(len(datas)):
if i == 0:
data = datas[i]
else:
data = numpy.row_stack((data, datas[i]))
print(data.shape)
print(bin_num, data.shape)
bins = fq.set_bin(data, bin_num)
print("Bin length: ", bins.shape)
plt.scatter(bins, [0 for i in range(len(bins))])
plt.show()
# each single signal
for i in range(len(datas)):
chisq = []
for j in range(gh_num):
chisq.append(fq.G_bin(datas[i], 1, gh[j], bins, 0))
plt.scatter(gh, chisq)
plt.show()
plt.close()
est_g, est_g_sig = fq.fmin_g_new(datas[i], 1, bin_num)
print(signals[i], est_g, est_g_sig)
chisq = []
for i in range(gh_num):
chisq.append(fq.G_bin(data, 1, gh[i], bins, 0))
plt.figure(figsize=(16,12))
plt.scatter(gh, chisq)
plt.show()
plt.close()
|
[
"hekun_lee@sjtu.edu.cn"
] |
hekun_lee@sjtu.edu.cn
|
994ed213b18ce3f3062cdf14bd95b41d0758b7f6
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/MY_REPOS/Lambda-Resource-Static-Assets/2-resources/_External-learning-resources/02-pyth/python-patterns-master/tests/behavioral/test_observer.py
|
821f97a61aa7a46e2b10c705af230637a92179e0
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
from unittest.mock import Mock, patch
import pytest
from patterns.behavioral.observer import Data, DecimalViewer, HexViewer
@pytest.fixture
def observable():
return Data("some data")
def test_attach_detach(observable):
decimal_viewer = DecimalViewer()
assert len(observable._observers) == 0
observable.attach(decimal_viewer)
assert decimal_viewer in observable._observers
observable.detach(decimal_viewer)
assert decimal_viewer not in observable._observers
def test_one_data_change_notifies_each_observer_once(observable):
observable.attach(DecimalViewer())
observable.attach(HexViewer())
with patch(
"patterns.behavioral.observer.DecimalViewer.update", new_callable=Mock()
) as mocked_update:
assert mocked_update.call_count == 0
observable.data = 10
assert mocked_update.call_count == 1
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
4bcf70c72206f3613479dcdf9297012a0979000b
|
099b57613250ae0a0c3c75cc2a9b8095a5aac312
|
/leetcode/Tree/235. 二叉搜索树的最近公共祖先.py
|
1acbf0c219561464eeeca8a6ce5a098e67523a47
|
[] |
no_license
|
MitsurugiMeiya/Leetcoding
|
36e41c8d649b777e5c057a5241007d04ad8f61cd
|
87a6912ab4e21ab9be4dd6e90c2a6f8da9c68663
|
refs/heads/master
| 2022-06-17T19:48:41.692320
| 2020-05-13T16:45:54
| 2020-05-13T16:45:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
"""
Given a binary search tree (BST), find the lowest common ancestor (LCA)最小公共祖先
of two given nodes in the BST.
"""
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if root.val > p.val and root.val > q.val:
return self.lowestCommonAncestor(root.right,p,q)
if root.val < p.val and root.val < q.val:
return self.lowestCommonAncestor(root.left,p,q)
return root
"""
答案:
236的简化版,已知BST
1.假如说p,q的值小于root,说明这两个node在root的左子树,找
2.假如说p,q的值大于root,说明这两个node在root的右子树
3.假如终于发现分叉了,说明最小公共节点就是这个root
或者是一个就是root,另一个小或大,那也不满足1,2的同小或同大
这种写法检查不了,p,q不在树上的情况,且能通过官方的所有测试条件
"""
|
[
"yifu3@ualberta.ca"
] |
yifu3@ualberta.ca
|
79f3fc754229d3f7b6c4f441ef53015c1b039e64
|
b595a24b07662a89826a1b6d334dfcaa3ec1c4b0
|
/venv/lib/python3.6/site-packages/storages/backends/mogile.py
|
d6194194368306a8bb32dfed8b8bdf42f24f7c91
|
[
"CC0-1.0"
] |
permissive
|
kentarofujiy/base1
|
4629b638f96b3ed091ea695c81b3b7837af1ec79
|
f820b9b379cda86ca5b446c63800fbe4bb8f3bce
|
refs/heads/master
| 2021-07-13T02:06:01.371773
| 2017-03-11T12:43:19
| 2017-03-11T12:43:19
| 84,649,225
| 0
| 1
|
CC0-1.0
| 2020-07-26T01:08:25
| 2017-03-11T12:43:32
|
Python
|
UTF-8
|
Python
| false
| false
| 4,079
|
py
|
from __future__ import print_function
import mimetypes
import warnings
from django.conf import settings
from django.core.cache import cache
from django.utils.deconstruct import deconstructible
from django.utils.text import force_text
from django.http import HttpResponse, HttpResponseNotFound
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import Storage
try:
import mogilefs
except ImportError:
raise ImproperlyConfigured("Could not load mogilefs dependency.\
\nSee http://mogilefs.pbworks.com/Client-Libraries")
warnings.warn(
'MogileFSStorage is unmaintained and will be removed in the next django-storages version'
'See https://github.com/jschneier/django-storages/issues/202',
PendingDeprecationWarning
)
@deconstructible
class MogileFSStorage(Storage):
"""MogileFS filesystem storage"""
def __init__(self, base_url=settings.MEDIA_URL):
# the MOGILEFS_MEDIA_URL overrides MEDIA_URL
if hasattr(settings, 'MOGILEFS_MEDIA_URL'):
self.base_url = settings.MOGILEFS_MEDIA_URL
else:
self.base_url = base_url
for var in ('MOGILEFS_TRACKERS', 'MOGILEFS_DOMAIN',):
if not hasattr(settings, var):
raise ImproperlyConfigured("You must define %s to use the MogileFS backend." % var)
self.trackers = settings.MOGILEFS_TRACKERS
self.domain = settings.MOGILEFS_DOMAIN
self.client = mogilefs.Client(self.domain, self.trackers)
def get_mogile_paths(self, filename):
return self.client.get_paths(filename)
# The following methods define the Backend API
def filesize(self, filename):
raise NotImplemented
#return os.path.getsize(self._get_absolute_path(filename))
def path(self, filename):
paths = self.get_mogile_paths(filename)
if paths:
return self.get_mogile_paths(filename)[0]
else:
return None
def url(self, filename):
return urlparse.urljoin(self.base_url, filename).replace('\\', '/')
def open(self, filename, mode='rb'):
raise NotImplemented
#return open(self._get_absolute_path(filename), mode)
def exists(self, filename):
return filename in self.client
def save(self, filename, raw_contents, max_length=None):
filename = self.get_available_name(filename, max_length)
if not hasattr(self, 'mogile_class'):
self.mogile_class = None
# Write the file to mogile
success = self.client.send_file(filename, BytesIO(raw_contents), self.mogile_class)
if success:
print("Wrote file to key %s, %s@%s" % (filename, self.domain, self.trackers[0]))
else:
print("FAILURE writing file %s" % (filename))
return force_text(filename.replace('\\', '/'))
def delete(self, filename):
self.client.delete(filename)
def serve_mogilefs_file(request, key=None):
"""
Called when a user requests an image.
Either reproxy the path to perlbal, or serve the image outright
"""
# not the best way to do this, since we create a client each time
mimetype = mimetypes.guess_type(key)[0] or "application/x-octet-stream"
client = mogilefs.Client(settings.MOGILEFS_DOMAIN, settings.MOGILEFS_TRACKERS)
if hasattr(settings, "SERVE_WITH_PERLBAL") and settings.SERVE_WITH_PERLBAL:
# we're reproxying with perlbal
# check the path cache
path = cache.get(key)
if not path:
path = client.get_paths(key)
cache.set(key, path, 60)
if path:
response = HttpResponse(content_type=mimetype)
response['X-REPROXY-URL'] = path[0]
else:
response = HttpResponseNotFound()
else:
# we don't have perlbal, let's just serve the image via django
file_data = client[key]
if file_data:
response = HttpResponse(file_data, mimetype=mimetype)
else:
response = HttpResponseNotFound()
return response
|
[
"Kentaro@Kentaros-MacBook-Pro.local"
] |
Kentaro@Kentaros-MacBook-Pro.local
|
7ab2153783df2bde81ef89f4762af1316f8b3a5c
|
6ef3b1919e7acbc72e5706b2dc6d716f8929e3d2
|
/transformers/commands/convert.py
|
8c3f952f4a73fb49cf88b2f47e54fcb22282ebb7
|
[
"MIT"
] |
permissive
|
linshaoxin-maker/taas
|
04f7dcc7c0d2818718e6b245531e017ca5370231
|
34e11fab167a7beb78fbe6991ff8721dc9208793
|
refs/heads/main
| 2023-01-19T20:58:04.459980
| 2020-11-27T02:28:36
| 2020-11-27T02:28:36
| 329,522,465
| 6
| 0
|
MIT
| 2021-01-14T06:02:08
| 2021-01-14T06:02:07
| null |
UTF-8
|
Python
| false
| false
| 7,151
|
py
|
from argparse import ArgumentParser, Namespace
from transformers.commands import BaseTransformersCLICommand
from ..utils import logging
def convert_command_factory(args: Namespace):
"""
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
:return: ServeCommand
"""
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name
)
class ConvertCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
:param parser: Root parser to register command-specific arguments
:return:
"""
train_parser = parser.add_parser(
"convert",
help="CLI tool to run convert model from original "
"author checkpoints to Transformers PyTorch checkpoints.",
)
train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.")
train_parser.add_argument(
"--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder."
)
train_parser.add_argument(
"--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch savd model output."
)
train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name",
type=str,
default=None,
help="Optional fine-tuning task name if the TF model was a finetuned model.",
)
train_parser.set_defaults(func=convert_command_factory)
def __init__(
self,
model_type: str,
tf_checkpoint: str,
pytorch_dump_output: str,
config: str,
finetuning_task_name: str,
*args
):
self._logger = logging.get_logger("transformers-cli/converting")
self._logger.info("Loading model {}".format(model_type))
self._model_type = model_type
self._tf_checkpoint = tf_checkpoint
self._pytorch_dump_output = pytorch_dump_output
self._config = config
self._finetuning_task_name = finetuning_task_name
def run(self):
if self._model_type == "albert":
try:
from transformers.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from transformers.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "gpt":
from transformers.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from transformers.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
if "ckpt" in self._tf_checkpoint.lower():
TF_CHECKPOINT = self._tf_checkpoint
TF_DATASET_FILE = ""
else:
TF_DATASET_FILE = self._tf_checkpoint
TF_CHECKPOINT = ""
convert_transfo_xl_checkpoint_to_pytorch(
TF_CHECKPOINT, self._config, self._pytorch_dump_output, TF_DATASET_FILE
)
elif self._model_type == "gpt2":
try:
from transformers.convert_gpt2_original_tf_checkpoint_to_pytorch import (
convert_gpt2_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from transformers.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name
)
elif self._model_type == "xlm":
from transformers.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
else:
raise ValueError("--model_type should be selected in the list [bert, gpt, gpt2, transfo_xl, xlnet, xlm]")
|
[
"czh097@gmail.com"
] |
czh097@gmail.com
|
87cb1e514667d13033ff73c89aa8f2625c17fd15
|
b2260e6588f60f0830248757a858be8c129350f4
|
/QLabel img.py
|
c876389f8ade045218cc263cac41cfe34549d9d6
|
[] |
no_license
|
nengkya/PyQt
|
1fe04aeb23532f4a5b92248a3414ac000d41d078
|
06068556348c6906198d4db7efc979889263fd56
|
refs/heads/master
| 2023-02-08T01:10:20.970167
| 2023-01-20T18:03:57
| 2023-01-20T18:03:57
| 105,175,866
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
import sys
#from PyQt5.QtGui import *
#from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class MainForm(QWidget):
def __init__(self):
super().__init__()
self.setupUi()
def setupUi(self):
self.resize(400, 200)
self.move(300, 300)
self.setWindowTitle('Demo QLabel')
self.label1 = QLabel()
self.label1.setText('Demo menampilkan gambar dengan QLabel')
self.label2 = QLabel()
self.label2.setText('<img src = PyPassContinue.png>')
layout = QVBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.label2)
self.setLayout(layout)
if __name__ == '__main__':
a = QApplication(sys.argv)
form = MainForm()
form.show()
a.exec()
|
[
"you@example.com"
] |
you@example.com
|
53dd9814341ea2b91a56c29803edfa507a2eaf25
|
08ee36e0bb1c250f7f2dfda12c1a73d1984cd2bc
|
/src/mnistk/networks/conv3dsigmoid_11.py
|
5c9ca5cdb81a4e3052630d571bc92b9b040cf256
|
[] |
no_license
|
ahgamut/mnistk
|
58dadffad204602d425b18549e9b3d245dbf5486
|
19a661185e6d82996624fc6fcc03de7ad9213eb0
|
refs/heads/master
| 2021-11-04T07:36:07.394100
| 2021-10-27T18:37:12
| 2021-10-27T18:37:12
| 227,103,881
| 2
| 1
| null | 2020-02-19T22:07:24
| 2019-12-10T11:33:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,460
|
py
|
# -*- coding: utf-8 -*-
"""
conv3dsigmoid_11.py
:copyright: (c) 2019 by Gautham Venkatasubramanian.
:license: MIT
"""
import torch
from torch import nn
class Conv3dSigmoid_11(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.f0 = nn.Conv3d(in_channels=1, out_channels=12, kernel_size=(4, 4, 4), stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=False, padding_mode='zeros')
self.f1 = nn.Sigmoid()
self.f2 = nn.Conv3d(in_channels=12, out_channels=32, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=False, padding_mode='zeros')
self.f3 = nn.Sigmoid()
self.f4 = nn.Conv3d(in_channels=32, out_channels=10, kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=False, padding_mode='zeros')
self.f5 = nn.Sigmoid()
self.f6 = nn.Conv3d(in_channels=10, out_channels=10, kernel_size=(11, 2, 2), stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=False, padding_mode='zeros')
self.f7 = nn.LogSoftmax(dim=1)
def forward(self, *inputs):
x = inputs[0]
x = x.view(x.shape[0],1,16,7,7)
x = self.f0(x)
x = self.f1(x)
x = self.f2(x)
x = self.f3(x)
x = self.f4(x)
x = self.f5(x)
x = self.f6(x)
x = x.view(x.shape[0],10)
x = self.f7(x)
return x
|
[
"41098605+ahgamut@users.noreply.github.com"
] |
41098605+ahgamut@users.noreply.github.com
|
203728418ef83b30a6c1a44c18db32698264f957
|
e68c3cbb9d6291fcdd51adae8a55616dcfafe55c
|
/spf/parser/ccg/cky/chart/cell.py
|
e6c2ec2917c119fdfa78536df3e673831ccf333b
|
[] |
no_license
|
Oneplus/pyspf
|
26126f5094065960d5f034fea2be4709aa1a4c50
|
175f90b4f837aa60fd660cba850d10a82dd578a1
|
refs/heads/master
| 2016-08-12T15:18:25.606712
| 2015-11-22T02:49:07
| 2015-11-22T02:49:07
| 45,725,414
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
#!/usr/bin/env python
from spf.utils.log import get_logger
class Cell(object):
LOG = get_logger(__name__)
def __init__(self, parse_step, start, end, is_complete_span):
"""
:param CKYParseStepI parse_step:
:param int start:
:param int end:
:param bool is_complete_span:
:return:
"""
self.is_complete_span = is_complete_span
self.is_full_parse = parse_step.is_full_parse()
|
[
"oneplus.lau@gmail.com"
] |
oneplus.lau@gmail.com
|
6fc0344b9b9c4a260c80ee9f7f61a9f1d948ca1e
|
40d371136f2d7de9c95bfe40fd3c0437095e9819
|
/build/rbx1/rbx1_nav/catkin_generated/pkg.develspace.context.pc.py
|
a74ba9f55c2c3dd4d7c607c520ce9e56cd9b59fa
|
[] |
no_license
|
marine0131/ros_ws
|
b4e6c5cf317260eaae1c406fb3ee234b3a3e67d5
|
6ddded3a92a717879bb646e7f2df1fea1a2d46b2
|
refs/heads/master
| 2021-07-05T06:29:43.054275
| 2017-09-28T08:29:14
| 2017-09-28T08:29:14
| 100,458,679
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/whj/catkin_ws/devel/include".split(';') if "/home/whj/catkin_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "dynamic_reconfigure".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rbx1_nav"
PROJECT_SPACE_DIR = "/home/whj/catkin_ws/devel"
PROJECT_VERSION = "0.4.0"
|
[
"wanghj@woosiyuan.com"
] |
wanghj@woosiyuan.com
|
4ab744495bdd71914089b3fabd09162e9ee06ce5
|
4c77c3f68ddd280ad26ed78a9f4927ff9eb5e1f1
|
/src/ledger/lib/python2.7/site-packages/pip/_internal/commands/freeze.py
|
ac562d7d84b626944ef398efbea2d4f93b40da0b
|
[
"MIT"
] |
permissive
|
neoscoin/neos-core
|
5f4a4e9fcdf13a21d1dbedfc7c01a8a8ba454a98
|
22cecda54875e3554e7c2a4569551c042fa6c0a2
|
refs/heads/master
| 2020-03-23T18:54:58.602764
| 2019-08-04T16:44:27
| 2019-08-04T16:44:27
| 141,940,658
| 4
| 4
|
MIT
| 2018-07-28T21:39:26
| 2018-07-23T00:05:03
|
C++
|
UTF-8
|
Python
| false
| false
| 3,320
|
py
|
from __future__ import absolute_import
import sys
from pip._internal import index
from pip._internal.basecommand import Command
from pip._internal.cache import WheelCache
from pip._internal.compat import stdlib_pkgs
from pip._internal.operations.freeze import freeze
DEV_PKGS = {'pip', 'setuptools', 'distribute', 'wheel'}
class FreezeCommand(Command):
"""
Output installed packages in requirements format.
packages are listed in a case-insensitive sorted order.
"""
name = 'freeze'
usage = """
%prog [options]"""
summary = 'Output installed packages in requirements format.'
log_streams = ("ext://sys.stderr", "ext://sys.stderr")
def __init__(self, *args, **kw):
super(FreezeCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help="Use the order in the given requirements file and its "
"comments when generating output. This option can be "
"used multiple times.")
self.cmd_opts.add_option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='URL',
help='URL for finding packages, which will be added to the '
'output.')
self.cmd_opts.add_option(
'-l', '--local',
dest='local',
action='store_true',
default=False,
help='If in a virtualenv that has global access, do not output '
'globally-installed packages.')
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
self.cmd_opts.add_option(
'--all',
dest='freeze_all',
action='store_true',
help='Do not skip these packages in the output:'
' %s' % ', '.join(DEV_PKGS))
self.cmd_opts.add_option(
'--exclude-editable',
dest='exclude_editable',
action='store_true',
help='Exclude editable package from output.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
format_control = index.FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
skip = set(stdlib_pkgs)
if not options.freeze_all:
skip.update(DEV_PKGS)
freeze_kwargs = dict(
requirement=options.requirements,
find_links=options.find_links,
local_only=options.local,
user_only=options.user,
skip_regex=options.skip_requirements_regex,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
skip=skip,
exclude_editable=options.exclude_editable,
)
try:
for line in freeze(**freeze_kwargs):
sys.stdout.write(line + '\n')
finally:
wheel_cache.cleanup()
|
[
"kris@blockchaindatasystems.com"
] |
kris@blockchaindatasystems.com
|
0e888f3c5656339bfcd90422c0e0e3b11133b3ef
|
c94662c1c58f4b75e01195da6e2446993eada579
|
/core/cooggerapp/signals/__init__.py
|
96b341230d38d951150ab6786ad691283328fec6
|
[
"MIT"
] |
permissive
|
ozcanyarimdunya/coogger
|
e8f74ac215630473f88b612e6c236bd5441b32a8
|
832b9af196cf68917dabaa5b9c5ab0b80725ca6e
|
refs/heads/master
| 2020-07-12T03:59:41.928819
| 2019-08-27T08:33:09
| 2019-08-27T08:33:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
from .topic import when_utopic_create, increase_utopic_view
from .userextra import follow_and_repos_update, create_userprofile, send_mail_to_follow
from .issue import when_issue_delete, issue_counter
from .content import when_content_delete, when_content_create
from .commit import when_commit_create
|
[
"hakancelik96@outlook.com"
] |
hakancelik96@outlook.com
|
851aede55bcaefdea7999c415c35cdc90ce4b200
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/2808. Minimum Seconds to Equalize a Circular Array/2808.py
|
5390dcc57938a36fa03a978735ee042ecbc01463
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 622
|
py
|
class Solution:
def minimumSeconds(self, nums: List[int]) -> int:
n = len(nums)
ans = n
numToIndices = collections.defaultdict(list)
for i, num in enumerate(nums):
numToIndices[num].append(i)
def getSeconds(i: int, j: int) -> int:
"""Returns the number of seconds required to make nums[i..j] the same."""
return (i - j) // 2
for indices in numToIndices.values():
seconds = getSeconds(indices[0] + n, indices[-1])
for i in range(1, len(indices)):
seconds = max(seconds, getSeconds(indices[i], indices[i - 1]))
ans = min(ans, seconds)
return ans
|
[
"me@pengyuc.com"
] |
me@pengyuc.com
|
11a1ce5dc526b7604a7b8b1257f22f55b26ae5e1
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/cirq_new/cirq_program/startCirq_noisy383.py
|
552ed9b1b74d4b568697c570d633b3c387234b8a
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,185
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=17
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=13
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=15
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
circuit = circuit.with_noise(cirq.depolarize(p=0.01))
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_noisy383.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
5c491fba4b421a9a4742e951f5c6d9f00279d088
|
88d8eed7081167bf5f81766dc5758ea4074eb9e5
|
/opt2q_examples/cell_death_data_calibration/calibration_fixed_measurement_model_p9_pysb_timout_arg.py
|
7be59c473e49651616e8b3bb052ae553721a3e2a
|
[] |
no_license
|
michael-irvin/Opt2Q
|
e28ee272dc1630f1f1cbc6ef2692888d9a09b6b2
|
58c18fd7ecab11857ce386202f13a8586c329836
|
refs/heads/master
| 2023-04-20T00:12:09.985042
| 2021-05-15T06:20:27
| 2021-05-15T06:20:27
| 143,816,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,691
|
py
|
import numpy as np
import datetime as dt
from scipy.stats import norm, invgamma
from pydream.core import run_dream
from pydream.convergence import Gelman_Rubin
from pydream.parameters import SampledParam
from multiprocessing import current_process
from opt2q.calibrator import objective_function
from opt2q_examples.cell_death_data_calibration.cell_death_data_calibration_setup \
import shift_and_scale_heterogeneous_population_to_new_params as sim_population
from opt2q_examples.cell_death_data_calibration.cell_death_data_calibration_setup \
import set_up_simulator, pre_processing, true_params, set_up_classifier, synth_data, \
time_axis, handle_timeouts, TimeoutException
from pysb.simulator import ScipyOdeSimulator
from opt2q_examples.apoptosis_model import model
import time
import signal
# Model name
now = dt.datetime.now()
model_name = f'apoptosis_model_tbid_cell_death_data_calibration_fmm_{now.year}{now.month}{now.day}'
# Priors
nu = 100
noisy_param_stdev = 0.20
alpha = int(np.ceil(nu/2.0))
beta = alpha/noisy_param_stdev**2
sampled_params_0 = [SampledParam(norm, loc=true_params, scale=1.5),
SampledParam(invgamma, *[alpha], scale=beta)]
n_chains = 4
n_iterations = 100000 # iterations per file-save
burn_in_len = 50000 # number of iterations during burn-in
max_iterations = 100000
# Simulator
# opt2q_solver doesn't run on Power9, but has useful methods for handling simulation results
opt2q_solver = set_up_simulator('cupsoda')
delattr(opt2q_solver, 'sim')
delattr(opt2q_solver, 'solver')
solver = ScipyOdeSimulator(model, tspan=time_axis, **{'integrator': 'lsoda', 'integrator_options': {'mxstep': 2**20}})
# Measurement Model
slope = 4
intercept = slope * -0.25 # Intercept (-0.25)
unr_coef = slope * 0.00 # "Unrelated_Signal" coef (0.00)
tbid_coef = slope * 0.25 # "tBID_obs" coef (0.25)
time_coef = slope * -1.00 # "time" coef (-1.00)
classifier = set_up_classifier()
classifier.set_params(**{'coefficients__apoptosis__coef_': np.array([[unr_coef, tbid_coef, time_coef]]),
'coefficients__apoptosis__intercept_': np.array([intercept]),
'do_fit_transform': False})
# likelihood function
def likelihood(x):
params_df = sim_population(x) # simulate heterogeneous population around new param values
opt2q_solver.param_values = params_df
# Add scipyodesolver using parameter values from Opt2Q solver
params_array = opt2q_solver._param_values_run
start_time = time.time()
try:
results = solver.run(param_values=params_array, num_processors=2, timeout=60) # run model
new_results = opt2q_solver.opt2q_dataframe(results.dataframe).reset_index()
features = pre_processing(new_results)
# run fixed classifier
prediction = classifier.transform(
features[['simulation', 'tBID_obs', 'time', 'Unrelated_Signal', 'TRAIL_conc']])
# calculate likelihood
ll = sum(np.log(prediction[synth_data.apoptosis == 1]['apoptosis__1']))
ll += sum(np.log(prediction[synth_data.apoptosis == 0]['apoptosis__0']))
elapsed_time = time.time() - start_time
print("Elapsed time: ", elapsed_time)
print(x[:len(true_params)])
print(ll)
return ll
except (ValueError, ZeroDivisionError, TypeError, TimeoutException):
elapsed_time = time.time() - start_time
print("Elapsed time: ", elapsed_time)
print(x[:len(true_params)])
return -1e10
# -------- Calibration -------
# Model Inference via PyDREAM
if __name__ == '__main__':
ncr = 25
gamma_levels = 8
p_gamma_unity = 0.1
print(ncr, gamma_levels, p_gamma_unity)
# Run DREAM sampling. Documentation of DREAM options is in Dream.py.
converged = False
total_iterations = n_iterations
sampled_params, log_ps = run_dream(parameters=sampled_params_0,
likelihood=likelihood,
niterations=n_iterations,
nchains=n_chains,
multitry=False,
nCR=ncr,
gamma_levels=gamma_levels,
adapt_gamma=True,
p_gamma_unity=p_gamma_unity,
history_thin=1,
model_name=model_name,
verbose=True,
crossover_burnin=min(n_iterations, burn_in_len),
)
# Save sampling output (sampled parameter values and their corresponding logps).
for chain in range(len(sampled_params)):
np.save(model_name + '_' + str(chain) + '_' + str(total_iterations) + '_' + 'parameters', sampled_params[chain])
np.save(model_name + '_' + str(chain) + '_' + str(total_iterations) + '_' + 'log_p', log_ps[chain])
GR = Gelman_Rubin(sampled_params)
burn_in_len = max(burn_in_len - n_iterations, 0)
print('At iteration: ', total_iterations, ' GR = ', GR)
print(f'At iteration: {total_iterations}, {burn_in_len} steps of burn-in remain.')
np.savetxt(model_name + str(total_iterations) + '.txt', GR)
old_samples = sampled_params
if np.isnan(GR).any() or np.any(GR > 1.2):
# append sample with a re-run of the pyDream algorithm
while not converged or (total_iterations < max_iterations):
starts = [sampled_params[chain][-1, :] for chain in range(n_chains)]
total_iterations += n_iterations
sampled_params, log_ps = run_dream(parameters=sampled_params_0,
likelihood=likelihood,
niterations=n_iterations,
nchains=n_chains,
multitry=False,
nCR=ncr,
gamma_levels=gamma_levels,
adapt_gamma=True,
p_gamma_unity=p_gamma_unity,
history_thin=1,
model_name=model_name,
verbose=True,
restart=True, # restart at the last sampled position
start=starts,
crossover_burnin=min(n_iterations, burn_in_len))
# Save sampling output (sampled parameter values and their corresponding logps).
for chain in range(len(sampled_params)):
np.save(model_name + '_' + str(chain) + '_' + str(total_iterations) + '_' + 'parameters',
sampled_params[chain])
np.save(model_name + '_' + str(chain) + '_' + str(total_iterations) + '_' + 'log_p', log_ps[chain])
old_samples = [np.concatenate((old_samples[chain], sampled_params[chain])) for chain in range(n_chains)]
GR = Gelman_Rubin(old_samples)
burn_in_len = max(burn_in_len - n_iterations, 0)
print('At iteration: ', total_iterations, ' GR = ', GR)
print(f'At iteration: {total_iterations}, {burn_in_len} steps of burn-in remain.')
np.savetxt(model_name + str(total_iterations) + '.txt', GR)
if np.all(GR < 1.2):
converged = True
|
[
"michael.w.irvin@vanderbilt.edu"
] |
michael.w.irvin@vanderbilt.edu
|
07a4523135d60b04ed51747157c9e44b0f036a7f
|
dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5
|
/eggs/Products.CMFQuickInstallerTool-3.0.5-py2.7.egg/Products/CMFQuickInstallerTool/tests/test_install.py
|
159fd3c8b974543f139c8c6551b4c3dc743d10b2
|
[] |
no_license
|
nacho22martin/tesis
|
ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5
|
e137eb6225cc5e724bee74a892567796166134ac
|
refs/heads/master
| 2020-12-24T13:20:58.334839
| 2013-11-09T12:42:41
| 2013-11-09T12:42:41
| 14,261,570
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,366
|
py
|
import doctest
import unittest
from Products.CMFTestCase import CMFTestCase
from Products.GenericSetup import EXTENSION, profile_registry
from Testing.ZopeTestCase import FunctionalDocFileSuite as Suite
CMFTestCase.installProduct('CMFQuickInstallerTool')
CMFTestCase.installProduct('CMFCalendar')
CMFTestCase.setupCMFSite()
OPTIONFLAGS = (doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
def registerTestProfile(test):
profile_registry.registerProfile('test',
'CMFQI test profile',
'Test profile for CMFQuickInstallerTool',
'profiles/test',
'Products.CMFQuickInstallerTool',
EXTENSION,
for_=None)
def test_suite():
return unittest.TestSuite((
Suite('actions.txt',
optionflags=OPTIONFLAGS,
package='Products.CMFQuickInstallerTool.tests',
setUp=registerTestProfile,
test_class=CMFTestCase.FunctionalTestCase),
Suite('profiles.txt',
optionflags=OPTIONFLAGS,
package='Products.CMFQuickInstallerTool.tests',
test_class=CMFTestCase.FunctionalTestCase),
Suite('install.txt',
optionflags=OPTIONFLAGS,
package='Products.CMFQuickInstallerTool.tests',
test_class=CMFTestCase.FunctionalTestCase),
))
|
[
"ignacio@plone.(none)"
] |
ignacio@plone.(none)
|
a0ae43e473fed201713fb7ef16cd61bf0708f846
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/zqr6f8dRD84K8Lvzk_3.py
|
cf260be9367695b002d1e201a20f2264da1b9dae
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,722
|
py
|
"""
As stated on the [On-Line Encyclopedia of Integer
Sequences](https://oeis.org/A003215):
> The hexagonal lattice is the familiar 2-dimensional lattice in which each
> point has 6 neighbors.
A **centered hexagonal number** is a centered figurate number that represents
a hexagon with a dot in the center and all other dots surrounding the center
dot in a hexagonal lattice.
At the end of that web page the following illustration is shown:
Illustration of initial terms:
.
. o o o o
. o o o o o o o o
. o o o o o o o o o o o o
. o o o o o o o o o o o o o o o o
. o o o o o o o o o o o o
. o o o o o o o o
. o o o o
.
. 1 7 19 37
.
Write a function that takes an integer `n` and returns `"Invalid"` if `n` is
not a **centered hexagonal number** or its illustration as a multiline
rectangular string otherwise.
### Examples
hex_lattice(1) ➞ " o "
# o
hex_lattice(7) ➞ " o o \n o o o \n o o "
# o o
# o o o
# o o
hex_lattice(19) ➞ " o o o \n o o o o \n o o o o o \n o o o o \n o o o "
# o o o
# o o o o
# o o o o o
# o o o o
# o o o
hex_lattice(21) ➞ "Invalid"
### Notes
N/A
"""
def hex_lattice(n):
r = (3 + (12 * n - 3)**0.5) / 6
layers = int(r)
if layers != r : return "Invalid"
rlen = layers*4 -1
prnt = []
d = (layers-1)*2
for _ in range(layers):
prnt.append('{: ^{}}'.format('o '*d + 'o', rlen))
d -= 1
return '\n'.join(prnt[1:][::-1] + prnt)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
654481ab224a9394d1a33536a5456ad7582ecd1a
|
c6070314ce23ede0f7b10cf3a4126b3575909e57
|
/canvas2nbgrader.py
|
5248ec5c33aa093d864dec72c8d1e31de970075e
|
[] |
no_license
|
vahtras/canvas2nbgrader
|
b4615b49c4ebdd041a3a91d9be6d4c2fd7275349
|
b880e478b9c98c9976005df63620a264d257d134
|
refs/heads/master
| 2020-03-08T10:40:08.219057
| 2018-04-04T19:13:24
| 2018-04-04T19:13:24
| 128,078,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,474
|
py
|
#!/usr/bin/env python
"""Import student id from Canvas (Exported Grade table)"""
import sys
import csv
def get_records(csv_file):
return list(csv.DictReader(csv_file))
def split_names(records):
new = []
for r in records:
s = r['Student']
if s == 'Studenttest' or s.strip() == 'Points Possible':
pass
else:
last, first = r['Student'].split(', ')
d = dict(first_name=first, last_name=last)
new.append({**r, **d})
return new
def out_dict(records):
select = []
for r in records:
select.append(
{
'id': r["ID"],
'first_name': r["first_name"],
'last_name': r["last_name"],
"email": r["SIS Login ID"],
}
)
with open('students.csv', 'w') as csvfile:
writer = csv.DictWriter(
csvfile,
fieldnames=["id", "first_name", "last_name", "email"]
)
writer.writeheader()
for r in select:
writer.writerow(r)
def main():
try:
csv_file = sys.argv[1]
except IndexError:
print("Usage: {} csv_file".format(sys.argv[0]))
sys.exit(1)
with open(csv_file) as f:
#Remove BOM character in file
lines = [line.replace('\ufeff', '') for line in f]
records = split_names(get_records(lines))
out_dict(records)
if __name__ == "__main__":
sys.exit(main())
|
[
"vahtras@kth.se"
] |
vahtras@kth.se
|
61e0747aab767e96bb13632b74f3de7fa9282af3
|
e146d44875fb44a13b3b004604694bccaa23ddf2
|
/docs/Amadeus-master/pactravel-master/swagger_client/models/flight_search_bound.py
|
5fd17c599197fd7743dbc3b5a7ee7b5989daf56d
|
[] |
no_license
|
shopglobal/travel
|
8d959b66d77f2e1883b671628c856daf0f3b21bb
|
0c33467cd2057da6e01f9240be2fd4b8f5490539
|
refs/heads/master
| 2022-12-23T00:13:02.597730
| 2017-09-26T06:03:15
| 2017-09-26T06:03:15
| 104,405,869
| 0
| 0
| null | 2022-12-08T00:35:36
| 2017-09-21T22:43:23
|
PHP
|
UTF-8
|
Python
| false
| false
| 3,993
|
py
|
# coding: utf-8
"""
Amadeus Travel Innovation Sandbox
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FlightSearchBound(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'flights': 'list[FlightSearchSegment]',
'duration': 'str'
}
attribute_map = {
'flights': 'flights',
'duration': 'duration'
}
def __init__(self, flights=None, duration=None):
"""
FlightSearchBound - a model defined in Swagger
"""
self._flights = None
self._duration = None
self.flights = flights
if duration is not None:
self.duration = duration
@property
def flights(self):
"""
Gets the flights of this FlightSearchBound.
:return: The flights of this FlightSearchBound.
:rtype: list[FlightSearchSegment]
"""
return self._flights
@flights.setter
def flights(self, flights):
"""
Sets the flights of this FlightSearchBound.
:param flights: The flights of this FlightSearchBound.
:type: list[FlightSearchSegment]
"""
if flights is None:
raise ValueError("Invalid value for `flights`, must not be `None`")
self._flights = flights
@property
def duration(self):
"""
Gets the duration of this FlightSearchBound.
The duration of this bound, including layover time, expressed in the format hh:mm
:return: The duration of this FlightSearchBound.
:rtype: str
"""
return self._duration
@duration.setter
def duration(self, duration):
"""
Sets the duration of this FlightSearchBound.
The duration of this bound, including layover time, expressed in the format hh:mm
:param duration: The duration of this FlightSearchBound.
:type: str
"""
self._duration = duration
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FlightSearchBound):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"president@worldvaporexpo.com"
] |
president@worldvaporexpo.com
|
de52fcf01c00c0c8c33658cec50f0c1df04688f3
|
32eba552c1a8bccb3a329d3d152b6b042161be3c
|
/9_pj_mcw.pyw
|
3ed748fdabe7367b51ed1ef4ff3088848476a0c4
|
[] |
no_license
|
ilmoi/ATBS
|
d3f501dbf4b1099b76c42bead3ec48de3a935a86
|
7f6993751e2ad18af36de04168d32b049d85a9c1
|
refs/heads/master
| 2022-07-11T21:56:23.284871
| 2020-05-15T05:26:06
| 2020-05-15T05:26:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,518
|
pyw
|
import pyperclip
import sys
import shelve
# uncomment and run once!
# TEXT = {'agree': """Yes, I agree. That sounds fine to me.""",
# 'busy': """Sorry, can we do this later this week or next week?""",
# 'upsell': """Would you consider making this a monthly donation?"""}
# file =shelve.open('phrase_db')
# for key, value in list(TEXT.items()):
# file[key] = value
# # test
# print(file['agree'])
# file.close()
if len(sys.argv) < 2:
print('Usage: python 9_pj_mcw [action - save / list / item to load]')
sys.exit()
first = sys.argv[1]
file = shelve.open('phrase_db')
if first == 'save':
keyword = sys.argv[2]
text = pyperclip.paste()
file[keyword] = text
elif first == 'delete':
second = sys.argv[2]
if second == 'all':
confirm = input('are you sure you want to wipe the dic?')
if confirm == 'yes':
for key in file.keys():
del file[key]
print("done! clean like a baby's ass?[wtf]")
else:
second = sys.argv[2]
if second in file.keys():
del file[second]
print('deleted!')
else:
print('no such keyword')
sys.exit()
elif first == 'list':
print('___current contents are:___')
for key, value in list(file.items()):
print(f'{key}: {value}')
else:
if first in file.keys():
pyperclip.copy(file[first])
print('copied to clipboard!')
else:
print('no such keyword')
sys.exit()
file.close()
|
[
"iljamoisejevs@gmail.com"
] |
iljamoisejevs@gmail.com
|
b99eaded4ce9e40c473b322f7ef3d19ceb146945
|
3299ee49e292fc5f4a0f1c0e364eb27cd539521a
|
/lesson_33_homework/test_site/Articles/views.py
|
0ae598994470e23fd2cc65f5fba238d2c49aba5f
|
[] |
no_license
|
alexeypodorozhnyi/Python_course_django_part
|
5258db76b5ca0592ed62f1d2d7a5bf8f80353bf1
|
9b51531f8b2d042210797c707d5f38b398897977
|
refs/heads/master
| 2020-11-26T01:52:13.467747
| 2020-01-29T20:55:23
| 2020-01-29T20:55:23
| 228,926,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
from django.http import HttpResponse
from django.shortcuts import render
class MyClass:
string = ''
def __init__(self, s):
self.string = s
def index(request):
my_list = [1,2,3,4,5,6]
return render(request, 'index.html', {
'my_list': my_list
})
def new(request):
return HttpResponse("Hey your new News!")
def edit(request):
return HttpResponse("Hey you can edit News!")
def lock(request):
return HttpResponse("Hey your can lock News!")
def add(request, item_id):
if item_id:
return HttpResponse('Add new news with item id:' + str(item_id))
def processing(request, mode, item_id):
if mode == 'add':
if item_id:
return HttpResponse('Add new articles with item id:' + str(item_id))
elif mode == 'delete':
if item_id:
return HttpResponse('Delete articles with item id:' + str(item_id))
else:
return HttpResponse('Error chouse correct mode')
def return_code(request, code):
if code:
return HttpResponse('Article code:' + str(code))
|
[
"alexey.podorozniy@gmail.com"
] |
alexey.podorozniy@gmail.com
|
a88b2e2e2cbe66502e17b460f6c306fee092a54e
|
660e87488effa7f3e6c043cf45a11bc59b31a1e9
|
/setup.py
|
59eae9dd4f0ecf542ee3dfeaa1513f436fa1a424
|
[] |
no_license
|
bramwelt/alabaster
|
df967aa165ea15678cce0b960d2993cff058c697
|
65876483837ecdd4e6798b4a5c5b1842f598f4f2
|
refs/heads/master
| 2021-01-17T15:48:51.117359
| 2014-01-01T03:07:10
| 2014-01-01T03:07:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 779
|
py
|
#!/usr/bin/env python
from setuptools import setup
# Version info -- read without importing
_locals = {}
with open('alabaster/_version.py') as fp:
exec(fp.read(), None, _locals)
version = _locals['__version__']
setup(
name='alabaster',
version=version,
description='A configurable sidebar-enabled Sphinx theme',
author='Jeff Forcier',
author_email='jeff@bitprophet.org',
url='https://github.com/bitprophet/sphinx-theme',
packages=['alabaster'],
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Documentation',
'Topic :: Software Development :: Documentation',
],
)
|
[
"jeff@bitprophet.org"
] |
jeff@bitprophet.org
|
a0f34dfff064add39a5a0e2c24fef9d5508e159a
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_1/schcha027/question2.py
|
592328ec8d41a424a62af6a489e967d7653733b3
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
hours=eval(input("Enter the hours:\n"))
minutes=eval(input("Enter the minutes:\n"))
seconds=eval(input("Enter the seconds:\n"))
if 0<=hours<=23:
checkH=1
else:
checkH=0
if 0<=minutes<=59:
checkM=1
else:
checkM=0
if 0<=seconds<=59:
checkS=1
else:
checkS=0
#print("H: ",checkH , " M: ",checkM , " S: ",checkS )
if checkM==1 & checkH==1 & checkS==1:
print("Your time is valid.")
else:
print("Your time is invalid.")
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
b17e57c7ad95e495e68871a72f53d9b3fa51a4f5
|
bf5dcdc1cb57ed72a47e0c444bb2fb631d3f0933
|
/setup.py
|
3d40c4fdc4479440f3c1e2913596de9a253375ae
|
[] |
no_license
|
vphpersson/twitter_osint
|
3e2128f1d9944053ee127ec748a56ede55cefcac
|
a437825d488afa2d5b15c221348cc72157f25227
|
refs/heads/master
| 2023-07-05T08:44:29.579442
| 2021-08-22T09:12:28
| 2021-08-22T09:12:28
| 398,564,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
from setuptools import setup, find_packages
setup(
name='twitter_osint',
version='0.1',
packages=find_packages(),
install_requires=[
'httpx',
'httpx_oauth @ git+ssh://git@github.com/vphpersson/httpx_oauth.git#egg=httpx_oauth',
'pyutils @ git+ssh://git@github.com/vphpersson/pyutils.git#egg=pyutils'
'twitter_api @ git+ssh://git@github.com/vphpersson/twitter_api.git#egg=twitter_api'
]
)
|
[
"vph.persson@gmail.com"
] |
vph.persson@gmail.com
|
893741290acaa4737579c1cfb54e07484866c834
|
70b0d4b4440a97b648a08de0d89cc536e8f4c569
|
/1313.py
|
69aedd9d90dbd61fabe97a8b396f434ba1868c40
|
[] |
no_license
|
seoseokbeom/leetcode
|
01c9ca8a23e38a3d3c91d2de26f0b2a3a1710487
|
9d68de2271c2d5666750c8060407b56abbf6f45d
|
refs/heads/master
| 2023-03-27T20:20:24.790750
| 2021-03-25T04:43:50
| 2021-03-25T04:43:50
| 273,779,517
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
class Solution(object):
def decompressRLElist(self, nums):
freq = 0
arr = []
for i, v in enumerate(nums):
if i % 2 == 0:
freq = nums[i]
else:
arr.extend(([nums[i]]*freq))
return arr
a = Solution()
print(a.decompressRLElist([1, 1, 2, 3]))
|
[
"pronunciatio@naver.com"
] |
pronunciatio@naver.com
|
1d8792acf20db18580b85389fa2d5f8108a2d512
|
b3e9a8963b9aca334b93b95bc340c379544e1046
|
/euler/59.py
|
eab411b4784038d836ba37febc29bd02a82d47d8
|
[] |
no_license
|
protocol7/euler.py
|
86ea512c2c216968e6c260b19469c0c8d038feb7
|
e2a8e46a9b07e6d0b039a5496059f3bf73aa5441
|
refs/heads/master
| 2022-09-08T22:49:47.486631
| 2022-08-23T20:07:00
| 2022-08-23T20:07:00
| 169,478,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
#!/usr/bin/env python3
from string import ascii_lowercase
from itertools import permutations, cycle
def read_cipher():
with open("p059_cipher.txt") as f:
s = f.read().strip()
return [int(i) for i in s.split(",")]
c = read_cipher()
def to_string(l):
return "".join([chr(x) for x in l])
def find():
for key in permutations(ascii_lowercase, 3):
key = cycle([ord(x) for x in key])
pt = list(map(lambda x: x[0] ^ x[1], zip(c, key)))
if " the " in to_string(pt):
return sum(pt)
assert 129448 == find()
|
[
"niklas@protocol7.com"
] |
niklas@protocol7.com
|
010cfa1c616d88a8f9af32b2216f527d47fe7ef3
|
dd3b8bd6c9f6f1d9f207678b101eff93b032b0f0
|
/basis/AbletonLive10.1_MIDIRemoteScripts/ableton/v2/control_surface/elements/optional.py
|
f4cef1c4ec91e78d8434b687f1a2ab8f8b7de8ca
|
[] |
no_license
|
jhlax/les
|
62955f57c33299ebfc4fca8d0482b30ee97adfe7
|
d865478bf02778e509e61370174a450104d20a28
|
refs/heads/master
| 2023-08-17T17:24:44.297302
| 2019-12-15T08:13:29
| 2019-12-15T08:13:29
| 228,120,861
| 3
| 0
| null | 2023-08-03T16:40:44
| 2019-12-15T03:02:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,432
|
py
|
# uncompyle6 version 3.4.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.16 (v2.7.16:413a49145e, Mar 2 2019, 14:32:10)
# [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/ableton/v2/control_surface/elements/optional.py
# Compiled at: 2019-04-09 19:23:45
from __future__ import absolute_import, print_function, unicode_literals
from ...base import listens
from .combo import ToggleElement
class ChoosingElement(ToggleElement):
u"""
An Element wrapper that enables one of the nested elements based on
the value of the given flag.
"""
def __init__(self, flag=None, *a, **k):
super(ChoosingElement, self).__init__(*a, **k)
self.__on_flag_changed.subject = flag
self.__on_flag_changed(flag.value)
@listens('value')
def __on_flag_changed(self, value):
self.set_toggled(value)
class OptionalElement(ChoosingElement):
u"""
An Element wrapper that enables the nested element IFF some given
flag is set to a specific value.
"""
def __init__(self, control=None, flag=None, value=None, *a, **k):
on_control = control if value else None
off_control = None if value else control
super(OptionalElement, self).__init__(on_control=on_control, off_control=off_control, flag=flag, *a, **k)
return
|
[
"jharrington@transcendbg.com"
] |
jharrington@transcendbg.com
|
fbba06a4b19bbae28afe04b3603983a619889f87
|
a6f9e8412682d8a9f21b2a3bf54b7088f7149cc9
|
/pytest/Compiler/constants32.py
|
df0f657cfab914f20431d61a087c5fe902148935
|
[
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
stellaraccident/mlir-npcomp
|
49a3c285d728d43db4caf7d18cb5919be40d6206
|
a9d7610f9d6740e984cbeb55854abac1f92414f9
|
refs/heads/master
| 2021-09-26T18:24:46.630327
| 2020-07-13T23:15:42
| 2020-07-13T23:15:42
| 250,896,585
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
# RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail
# Subset of constant tests which verify against a GenericTarget32.
from npcomp.compiler import test_config
from npcomp.compiler.target import *
import_global = test_config.create_import_dump_decorator(
target_factory=GenericTarget32)
# CHECK-LABEL: func @integer_constants
@import_global
def integer_constants():
# CHECK: %[[A:.*]] = constant 100 : i32
a = 100
return a
# CHECK-LABEL: func @float_constants
@import_global
def float_constants():
# CHECK: %[[A:.*]] = constant 2.200000e+00 : f32
a = 2.2
return a
|
[
"stellaraccident@gmail.com"
] |
stellaraccident@gmail.com
|
c939c29a265c9ad2c8e60bbe024d8471ccb7348d
|
480e33f95eec2e471c563d4c0661784c92396368
|
/Geometry/CMSCommonData/test/dd4hep/2026D35.py
|
fcd71f1b6cde299aae8e0ce696f2cf7e34257bba
|
[
"Apache-2.0"
] |
permissive
|
cms-nanoAOD/cmssw
|
4d836e5b76ae5075c232de5e062d286e2026e8bd
|
4eccb8a758b605875003124dd55ea58552b86af1
|
refs/heads/master-cmsswmaster
| 2021-01-23T21:19:52.295420
| 2020-08-27T08:01:20
| 2020-08-27T08:01:20
| 102,867,729
| 7
| 14
|
Apache-2.0
| 2022-05-23T07:58:09
| 2017-09-08T14:03:57
|
C++
|
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Phase2C4_cff import Phase2C4
process = cms.Process("HcalParametersTest",Phase2C4)
process.load('Geometry.HcalCommonData.hcalParameters_cff')
process.load('Geometry.HcalCommonData.hcalSimulationParameters_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.DDDetectorESProducer = cms.ESSource("DDDetectorESProducer",
confGeomXMLFiles = cms.FileInPath('Geometry/CMSCommonData/data/dd4hep/cmsExtendedGeometry2026D35.xml'),
appendToDataLabel = cms.string('')
)
process.DDCompactViewESProducer = cms.ESProducer("DDCompactViewESProducer",
appendToDataLabel = cms.string('')
)
process.hpa = cms.EDAnalyzer("HcalParametersAnalyzer")
process.hcalParameters.fromDD4Hep = cms.bool(True)
process.hcalSimulationParameters.fromDD4Hep = cms.bool(True)
process.Timing = cms.Service("Timing")
process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck")
process.p1 = cms.Path(process.hpa)
|
[
"sunanda.banerjee@cern.ch"
] |
sunanda.banerjee@cern.ch
|
051b8fc4c4f9b655d4722a097ae2ebb6b6478ded
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_appended.py
|
d4a4abe95c2c87fcc180869b9bcb91fd2dea25b1
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
#calss header
class _APPENDED():
def __init__(self,):
self.name = "APPENDED"
self.definitions = append
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['append']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
d93e55450c39085ee035efdef32eaa204a90914b
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=3.5_rd=0.8_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=9/params.py
|
de9ca510da01c4ee2e0649bfbafc64737c8bc56b
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.531929',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.8',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 9,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
195af1a86103dd62444c59e3c5fb1c2951f026c8
|
924147cf4ce118a1856bf0e6107d3dac671e6ac4
|
/test_app/urls.py
|
44861043de0c4c84c4ec4b65c1d706a9b60d1cc0
|
[
"BSD-3-Clause"
] |
permissive
|
jlongster/django-waffle
|
4a3ec12477a7a2a783c8b3c661a4dbe313311d7c
|
acc8e4adb41e7713be9778460fc3e99e034b7511
|
refs/heads/master
| 2020-04-08T16:19:11.434019
| 2012-01-26T18:54:29
| 2012-01-26T18:54:29
| 3,734,066
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
from django.conf.urls.defaults import patterns, url, include
from django.contrib import admin
from django.http import HttpResponseNotFound, HttpResponseServerError
from test_app import views
from waffle.views import wafflejs
handler404 = lambda r: HttpResponseNotFound()
handler500 = lambda r: HttpResponseServerError()
admin.autodiscover()
urlpatterns = patterns('',
url(r'^flag_in_view', views.flag_in_view, name='flag_in_view'),
url(r'^wafflejs$', wafflejs, name='wafflejs'),
url(r'^switch-on', views.switched_view),
url(r'^switch-off', views.switched_off_view),
url(r'^flag-on', views.flagged_view),
url(r'^flag-off', views.flagged_off_view),
(r'^admin/', include(admin.site.urls))
)
|
[
"james@mozilla.com"
] |
james@mozilla.com
|
1dc90019573b41fd04ccda4e3a6b90bc90a27b7a
|
48b6546e0cf0aeba23f802c005dbcb863f8ceecb
|
/searching-algorithms/linear_search.py
|
1da3586fe8a61e11198ed6a4335d78e697ab93b3
|
[] |
no_license
|
panu2306/Data-Structure-Programs
|
42bc1b592fc070eed9c16a192d27103593723061
|
a4cb5fb496b672492e19468868a2da266d7d76aa
|
refs/heads/master
| 2022-11-14T23:04:20.609617
| 2022-11-14T18:01:21
| 2022-11-14T18:01:21
| 148,877,607
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
def linearSearch(a, searchElement):
arraySize = len(a)
for i in range(0, arraySize):
if(a[i] == searchElement):
return 1
return -1
a = [1, 4, 2, 5, 3]
searchElement = 5
result = linearSearch(a, searchElement)
print("Element is not present in array") if(result == -1) else print("Element is present in array")
|
[
"pranavbhendawade@gmail.com"
] |
pranavbhendawade@gmail.com
|
4206b96806030b27e6a032f97cb22dfdb1822a45
|
d42f7d4f2377f67797d41b2f75347c5330d34953
|
/fabfile/france.local.py
|
319441ef820b3e39735a14277588f4449a76011b
|
[
"WTFPL"
] |
permissive
|
phreephree/addok
|
8d92893b791416e0169e6c74f5842868833478e9
|
320d145e72964d54eb33742f0329e9f46f5c5ab5
|
refs/heads/master
| 2021-08-24T06:51:04.971611
| 2017-12-08T13:44:40
| 2017-12-08T13:44:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
QUERY_PROCESSORS_PYPATHS = [
'addok.helpers.text.check_query_length',
"addok_france.extract_address",
"addok_france.clean_query",
"addok_france.remove_leading_zeros",
]
SEARCH_RESULT_PROCESSORS_PYPATHS = [
"addok.helpers.results.match_housenumber",
"addok_france.make_labels",
"addok.helpers.results.score_by_importance",
"addok.helpers.results.score_by_autocomplete_distance",
"addok.helpers.results.score_by_ngram_distance",
"addok.helpers.results.score_by_geo_distance",
]
PROCESSORS_PYPATHS = [
"addok.helpers.text.tokenize",
"addok.helpers.text.normalize",
"addok_france.glue_ordinal",
"addok_france.fold_ordinal",
"addok_france.flag_housenumber",
"addok.helpers.text.synonymize",
"addok_fr.phonemicize",
]
SQLITE_DB_PATH = '/srv/addok/addok.db'
|
[
"yb@enix.org"
] |
yb@enix.org
|
d2619d839aa6f5611e6030cf62cb0f38db8c7b50
|
c3cff86728d436e4e7b522b1382d96f8e32611ff
|
/minidjango/utils/types.py
|
6df56e1950ad1a53a82390965a13bada3545b9c2
|
[] |
no_license
|
pahaz/lesson2
|
6189ce3d3c06c040b27d283ae0754eed6c496b43
|
aea2e09e98e5562476a5d15447e15e127f900d43
|
refs/heads/master
| 2023-08-26T20:27:08.697314
| 2016-04-15T12:26:09
| 2016-04-15T12:26:09
| 55,223,164
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,387
|
py
|
import collections
from io import BytesIO
import io
__author__ = 'pahaz'
class MultiValueDict(collections.UserDict):
"""
>>> d = MultiValueDict()
>>> d['foo'] = ['bar']
>>> d['foo']
'bar'
>>> d = MultiValueDict({'foo': ['v1', 'v2']})
>>> d['foo']
'v1'
>>> d.getlist('foo')
['v1', 'v2']
>>> list(d.items())
[('foo', 'v1')]
>>> dict(MultiValueDict({'foo': ['v1', 'v2']}))
{'foo': 'v1'}
>>> dict(MultiValueDict({'foo': ['v1']}))
{'foo': 'v1'}
"""
def __iter__(self):
a = super().__iter__()
for x in a:
yield x
def __getitem__(self, key):
val = super().__getitem__(key)
if isinstance(val, (list, tuple)):
val = val[0]
else:
raise RuntimeError('Invalid MultiValueDict inner state')
return val
def __setitem__(self, key, item):
if not isinstance(item, (list, tuple)):
raise TypeError("Can't set not a multi value")
if not item:
raise ValueError("Can't set empty multi value")
self.data[key] = item
def getlist(self, key, default=None):
val = self.data.get(key, default)
if not isinstance(val, (list, tuple)):
raise RuntimeError('Invalid MultiValueDict inner state')
return val
class LimitedStream(io.IOBase):
"""
LimitedStream wraps another stream in order to not allow
reading from it past specified amount of bytes.
>>> import io
>>> bio = io.BytesIO(b"some -- long -- byte string")
>>> lbio = LimitedStream(bio, 4)
>>> lbio.read()
b'some'
>>> lbio.read()
b''
>>> bio = io.BytesIO(b"s\\nome -- long -- byte string")
>>> lbio = LimitedStream(bio, 4)
>>> lbio.readline()
b's\\n'
>>> lbio.read()
b'om'
>>> lbio.read()
b''
"""
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
|
[
"pahaz.blinov@gmail.com"
] |
pahaz.blinov@gmail.com
|
5b5cfdbc293e4fea4032ad37a2ddd1f57d91ab27
|
4999d470db3128d6b2d904babf1446d62a9a6cc2
|
/flask_project/app.py
|
ae20cc5f3b6b108c0d979f90fd5d9a9704534c78
|
[] |
no_license
|
love-adela/jungle_admission
|
2ade66d7a828965f250f5eac9b971a9a84eddb88
|
bf3c3f52c61c6dded256245e28aaf30ab2ec5ffa
|
refs/heads/main
| 2023-01-01T17:16:16.756328
| 2020-10-29T09:27:55
| 2020-10-29T09:27:55
| 306,814,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/test', methods=['GET'])
def test_get():
received_title = request.args.get('given_title')
print(received_title)
return jsonify({'result': 'success', 'msg': '이 요청은 GET!'})
@app.route('/test', methods=['POST'])
def test_post():
received_title = request.form('given_title')
print(received_title)
return jsonify({'result':'success', 'msg': '이 요청은 POST!'})
if __name__ == '__main__':
app.run('0.0.0.0', port=5000, debug=True)
|
[
"love.adelar@gmail.com"
] |
love.adelar@gmail.com
|
7ff6e8fd5a803d6d9e9157afe6eacd17efe5e4a0
|
0c3f4769e91bf7bea8f9ce74a6dd11e092638bc4
|
/tests/test_ingest_title.py
|
613caef04600cc33de74fa6ad3a98fe50d745030
|
[
"MIT"
] |
permissive
|
ourresearch/journalsdb
|
392ea36282b17154289f1845628cc4706d3c59e9
|
718b118b8e97da9a07f89c2cd2bae207a9217b66
|
refs/heads/main
| 2022-10-29T10:05:59.410041
| 2022-10-26T18:31:21
| 2022-10-26T18:31:21
| 331,048,946
| 9
| 0
| null | 2021-04-03T16:34:12
| 2021-01-19T16:56:22
|
Python
|
UTF-8
|
Python
| false
| false
| 728
|
py
|
from ingest.journals.journals_new_journal import NewJournal
from models.journal import ISSNMetaData
def test_clean_title_print():
issn_md = ISSNMetaData()
nj = NewJournal(issn_md)
title_with_print = nj.clean_title("Cooking Today (Print)")
assert title_with_print == "Cooking Today"
def test_clean_title_electronic():
issn_md = ISSNMetaData()
nj = NewJournal(issn_md)
title_with_electronic = nj.clean_title("Cooking Today (electronic)")
assert title_with_electronic == "Cooking Today"
def test_clean_title_trailing_period():
issn_md = ISSNMetaData()
nj = NewJournal(issn_md)
title_with_period = nj.clean_title("Cooking today. ")
assert title_with_period == "Cooking today"
|
[
"caseym@gmail.com"
] |
caseym@gmail.com
|
ae4a4cd38051c792e2d024de49626d30f9f91601
|
8c917dc4810e2dddf7d3902146280a67412c65ea
|
/v_7/NISS/common_shamil_v3/hr_custom/report/promotion_report.py
|
28508ee80d1f425d9b4ed82fa841a66a554347f0
|
[] |
no_license
|
musabahmed/baba
|
d0906e03c1bbd222d3950f521533f3874434b993
|
0b997095c260d58b026440967fea3a202bef7efb
|
refs/heads/master
| 2021-10-09T02:37:32.458269
| 2018-12-20T06:00:00
| 2018-12-20T06:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,012
|
py
|
import time
import pooler
#import rml_parse
import copy
from report import report_sxw
import pdb
import re
class promotion_report(report_sxw.rml_parse):
_name = 'report.promotion.report'
def __init__(self, cr, uid, name, context):
super(promotion_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'_get_emp':self._get_emp,
'line5':self.get_promotion_total,
})
self.context = context
def get_promotion_total(self,data,choice,date1_v,date2_v):
process_archive=self.pool.get('hr.process.archive')
# res = process_archive.self(self,choice,'promotion_date','promotion_date',date1_v,date2_v)
res = process_archive._archive_count(self,choice,'promotion_date','promotion_date',date1_v,date2_v)
return res
def _get_emp(self,data):
prom_obj=self.pool.get('hr.process.archive')
date1 = data['form']['fromm']
date2 = data['form']['to']
ids_list=prom_obj.search(self.cr,self.uid, [('approve_date', '>=', date1),('approve_date', '<=', date2)],context=self.context)
#for l in ids_list:
# degree_id=prom_obj.browse(self.cr,self.uid,l,context=self.context).reference.id
self.cr.execute('''
SELECT ROW_NUMBER ( )
OVER (order by p.id) as no,e.emp_code as code,r.name as emp,p.approve_date as date,
d.name AS degree FROM hr_process_archive AS p
left join hr_employee AS e on (p.employee_id=e.id)
left join resource_resource AS r on (e.resource_id=r.id)
left join hr_salary_degree AS d on (e.degree_id=d.id)
where
e.employment_date < p.approve_date and
p.approve_date between %s and %s
''',(date1,date2))
res = self.cr.dictfetchall()
return res
report_sxw.report_sxw('report.promotion.report', 'hr.process.archive','addons/hr_custom/report/promotion_report.rml', parser=promotion_report, header=True)
|
[
"bakry@exp-sa.com"
] |
bakry@exp-sa.com
|
ce3e467bee2432e67dcc978a34ac48c49a0424b6
|
466912406272829982f75854cf0104c6ce8c9814
|
/data/nlp/fund/gongshang.py
|
818b29f17f8deb811cc1922bc2142c0840229a65
|
[] |
no_license
|
logonmy/Codes
|
9631fa103fc499663361fa7eeccd7cedb9bb08e4
|
92723efdeccfc193f9ee5d0ab77203c254f34bc2
|
refs/heads/master
| 2021-09-21T18:07:22.985184
| 2018-08-30T05:53:26
| 2018-08-30T05:53:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,884
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'victor'
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '..'))
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../util'))
import db as dbcon
import loghelper
from common import dbutil
from datetime import datetime, timedelta
from bson.objectid import ObjectId
loghelper.init_logger("gsf", stream=True)
logger_gsf = loghelper.get_logger('gsf')
class GongshangFundEvent(object):
def __init__(self, check_period=1):
self.db = dbcon.connect_torndb()
self.mongo = dbcon.connect_mongo()
self.check_period = check_period
def generate_gs_fund_event(self):
global logger_gsf
yesterday = datetime.now() - timedelta(days=self.check_period)
logger_gsf.info('Gongshang Fund starts')
for tpm in dbutil.get_topic_messages(self.db, 44, yesterday):
logger_gsf.info('Processing %s' % tpm.id)
change_date = tpm.get('comments')
# update funding
cids = self.mongo.article.news.find_one({'_id': ObjectId(tpm.relateId)}).get('companyIds', [])
for cid in cids:
cprtid = dbutil.get_company_corporate_id(self.db, cid)
dbutil.update_gongshang_funding(self.db, cid, cprtid, change_date)
# generate task news
self.mongo.task.news.update({'news_id': tpm.relateId},
{'news_id': tpm.relateId, 'news_date': datetime.now(), 'type': 'fund',
'createTime': datetime.utcnow(), 'processStatus': int(0),
'source': 'gongshang', 'companyIds': cids}, True)
if __name__ == '__main__':
gsfe = GongshangFundEvent()
gsfe.generate_gs_fund_event()
|
[
"hush_guo@163.com"
] |
hush_guo@163.com
|
ed8d5a9e266a052caacfc08d036eb4aa9618228c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02886/s131934757.py
|
9c2208d916bb0fd3e3065d0aabe691293dfc81f2
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
#!/usr/bin/env python3
N = int(input())
D = [int(s) for s in input().split()]
life = 0
for i in range(N):
for j in range(i+1, N):
life += D[i] * D[j]
print(life)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
36a6f635c959bf4b4b5b30b9756599e5d0831ffd
|
108db0d378354947b94b0d649fdb2779c8b7957f
|
/jwtauth/jwtauth/settings.py
|
51713e79093067e6196f4aa3a8c2f50e6eee1390
|
[] |
no_license
|
P-iyushRaj/DRF-WORK
|
25f2676f62694ea5619397a2e10aca0947dbe902
|
21ca80a6027f110e7213fe7ee3e783bcfe357089
|
refs/heads/master
| 2023-03-27T13:25:03.928779
| 2021-03-27T04:14:20
| 2021-03-27T04:14:20
| 346,010,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,372
|
py
|
"""
Django settings for jwtauth project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z5ubsq2gb+^a4@^!tucy@(bv7u#1z6ksja(wq724&i5__6+3im'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'knox',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'jwtauth.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jwtauth.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
# 'rest_framework.authentication.BasicAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
'knox.auth.TokenAuthentication',
]
}
|
[
"piyush@gmail.com"
] |
piyush@gmail.com
|
00d156d0f2a3e03443c4a9aa53137d649e2d9735
|
18a846d1d598d193976437fbefdf144a13e0404b
|
/mezzanine/utils/models.py
|
ff1d2ea8f18a782a0a0e607e836138d0e185fa25
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
kowalej/mezzanine
|
8282195c99717625856510474f4f9583a36c7cf6
|
96915c33325fd74277a630c27069e4c92482e951
|
refs/heads/master
| 2021-01-17T22:25:25.495684
| 2012-04-16T11:59:21
| 2012-04-16T11:59:21
| 3,392,468
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,343
|
py
|
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Model, Field
def base_concrete_model(abstract, instance):
"""
Used in methods of abstract models to find the super-most concrete
(non abstract) model in the inheritance chain that inherits from the
given abstract model. This is so the methods in the abstract model can
query data consistently across the correct concrete model.
Consider the following::
class Abstract(models.Model)
class Meta:
abstract = True
def concrete(self):
return base_concrete_model(Abstract, self)
class Super(Abstract):
pass
class Sub(Super):
pass
sub = Sub.objects.create()
sub.concrete() # returns Super
In actual Mezzanine usage, this allows methods in the ``Displayable`` and
``Orderable`` abstract models to access the ``Page`` instance when
instances of custom content types, (eg: models that inherit from ``Page``)
need to query the ``Page`` model to determine correct values for ``slug``
and ``_order`` which are only relevant in the context of the ``Page``
model and not the model of the custom content type.
"""
for cls in reversed(instance.__class__.__mro__):
if issubclass(cls, abstract) and not cls._meta.abstract:
return cls
return instance.__class__
class ModelMixinBase(type):
"""
Metaclass for ``ModelMixin`` which is ued for injecting model
fields and methods into models defined outside of a project.
"""
def __new__(cls, name, bases, attrs):
"""
Checks for an inner ``Meta`` class with a ``mixin_for``
attribute containing the model that this model will be mixed
into. Once found, copy over any model fields and methods onto
the model being mixed into, and return it as the actual class
definition for the mixin.
"""
if name == "ModelMixin":
# Actual ModelMixin class definition.
return super(ModelMixinBase, cls).__new__(cls, name, bases, attrs)
try:
mixin_for = attrs.pop("Meta").mixin_for
if not issubclass(mixin_for, Model):
raise TypeError
except (TypeError, KeyError, AttributeError):
raise ImproperlyConfigured("The ModelMixin class '%s' requires "
"an inner Meta class with the "
"``mixin_for`` attribute defined, "
"with a value that is a valid model.")
# Copy fields and methods onto the model being mixed into, and
# return it as the definition for the mixin class itself.
for k, v in attrs.items():
if isinstance(v, Field):
v.contribute_to_class(mixin_for, k)
elif k != "__module__":
setattr(mixin_for, k, v)
return mixin_for
class ModelMixin(object):
"""
Used as a subclass for mixin models that inject their behaviour onto
models defined outside of a project. The subclass should define an
inner ``Meta`` class with a ``mixin_for`` attribute containing the
model that will be mixed into.
"""
__metaclass__ = ModelMixinBase
|
[
"steve@jupo.org"
] |
steve@jupo.org
|
b1cbfa625f10b9e309d35cfdf8103961d6a183cb
|
2dd4b89f60bd22d96ca6043666816069ba060875
|
/TPplots/circos_convert_contigs_coords.py
|
4458d301054967ca4be4740ced7df70f80b3440b
|
[] |
no_license
|
liaochenlanruo/TPplots
|
4d65b970f3a105c48a1a66aeb176e299f4bb3cea
|
4d0ed24f9b5b7fcd80942abb5f22167d1aba38c6
|
refs/heads/master
| 2023-07-19T13:22:25.278304
| 2021-09-07T14:13:01
| 2021-09-07T14:13:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,444
|
py
|
#!/usr/bin/env python
# python 2.7.5 requires biopython
########### promer2circos ############
def get_contig(location, contig_coordlist, contig_pos):
# print 'location', location
# print contig_coordlist
for i, one_contig in enumerate(contig_coordlist):
if location >= one_contig[1] and location <= one_contig[2]:
# print 'match!'
return one_contig, contig_coordlist[i + 1:len(contig_coordlist)], location
elif location > contig_coordlist[i - 1][2] and location <= one_contig[1]:
# print 'between contigs!'
if contig_pos == 'start':
# print "start located between contigs,------", location, '-->new start:', one_contig[1]
# print one_contig
# print contig_coordlist[i+1:len(contig_coordlist)]
# return contig start as location
return one_contig, contig_coordlist[i + 1:len(contig_coordlist)], one_contig[1]
else:
# print "end located between contigs-------", location, '-->new end:', contig_coordlist[i-1][2]
# end of contig located between contigs, return previous contig data
# print 'contig', contig_coordlist[i-1]
# print 'following contigs', contig_coordlist[i:len(contig_coordlist)]
return contig_coordlist[i - 1], contig_coordlist[i:len(contig_coordlist)], contig_coordlist[i - 1][2]
else:
# print "partial match, probably overlap between end contig and gap region", one_contig
continue
return False, False, False
def rename_karyotype(contig_coords, data_list):
'''
:param contig_coords: chr - NZ_JSAM00000000_1 NZ_JSAM00000000_1 0 104228 spectral-5-div-4 ==> keep 3, 4 et 5
:param data_list: list of contig coords: [[contig_X, start, end],[...]]
:return:
'''
import copy
renamed_data = []
for i, data in enumerate(data_list):
start = int(data[1])
end = int(data[2])
# print i, data
contig_start, following_contigs1, position1 = get_contig(start, contig_coords, contig_pos="start")
contig_end, following_contigs2, position2 = get_contig(end, contig_coords, contig_pos="end")
if (contig_start is False) or (contig_end is False):
# print 'one falese!'
# print start, end, contig_start, contig_end
if start > contig_coords[-1][1]:
# print 'last contig'
contig_start, following_contigs1, position1 = contig_coords[-1], [], start
contig_end, following_contigs2, position2 = contig_coords[-1], [], contig_coords[-1][2]
if contig_end is False:
continue
# print 'contig_start', contig_start
# print 'contig_end', contig_end
data[1] = position1
data[2] = position2
# if position2-position1>1000:
# print 'current range:', position1, position2
if contig_start[0] == contig_end[0]:
data[0] = contig_start[0]
renamed_data.append(data)
else:
# print 'new start end', position1, position2
# print 'contig start', contig_start
# print 'contig end', contig_end
# print 'spanning several contigs!'
# span across 2 contigs: make 2 coordinates (until the end of the first contig and from the begining of the second)
data_1 = copy.copy(data)
data_1[0] = contig_start[0]
data_1[2] = contig_start[2]
renamed_data.append(data_1)
# enumerate following contigs until we match the final one
for contig2 in following_contigs1:
# final contig of the range, add it and break the inner loop
if contig2[0] == contig_end[0]:
data_2 = copy.copy(data)
data_2[0] = contig_end[0]
# start from the first position of the second contig
data_2[1] = contig_end[1]
renamed_data.append(data_2)
break
else:
#print contig_end
#print 'entire contig within the range! %s bp long' % (int(contig2[2]) - int(contig2[1])), contig2
# entire contig comprised within the range
# add it entiely to the new list
renamed_data.append(contig2)
'''
for one_contig in contig_coords:
# within contig
if start >= one_contig[1] and end <=one_contig[2]:
data[0] = one_contig[0]
renamed_data.append(data)
# overlap between two contigs
elif start >= one_contig[1] and start <=one_contig[2] and end >one_contig[2]:
data_1 = data
data_2 = data
'''
return renamed_data
def read_circos_file(circos_file):
data_list = []
with open(circos_file) as f:
for row in f:
data = row.rstrip().split(' ')
if len(data) < 3:
data = row.rstrip().split('\t')
data_list.append(data)
return data_list
if __name__ == '__main__':
###Argument handling.
import argparse
arg_parser = argparse.ArgumentParser(description='');
# arg_parser.add_argument("coords_input", help="Directory to show-coords tab-delimited input file.");
arg_parser.add_argument("-i", "--reference_karyotype", help="ref karyotype")
arg_parser.add_argument("-t", "--target_karyotype", help="target karyotype")
arg_parser.add_argument("-o", "--out_name", help="output name")
args = arg_parser.parse_args()
if not args.out_name:
out_name = args.target_karyotype.split('.')[0] + '_renamed.' + args.target_karyotype.split('.')[1]
with open(args.reference_karyotype, 'r') as f:
contig_coords = []
for row in f:
data = row.rstrip().split(' ')
if len(data) < 3:
data = row.rstrip().split('\t')
contig_coords.append([data[3], int(data[4]), int(data[5])])
data_list = read_circos_file(args.target_karyotype)
renamed_data = rename_karyotype(contig_coords, data_list)
with open(out_name, 'w') as new_circos_file:
for row in renamed_data:
row = [str(i) for i in row]
new_circos_file.write('\t'.join(row) + '\n')
|
[
"trestan.pillonel@gmail.com"
] |
trestan.pillonel@gmail.com
|
8020130f940f8f456909b2b56cec135d85f0a20b
|
8bf56892667d732c67ed0ae43fe7b08923893c71
|
/version4.py
|
2d633cee8aa4adf9a88167e0e1eb32f2e21b3da3
|
[] |
no_license
|
erhan-orun/BuildTable
|
8a3ff82a979a19f7f2b975b88c690a5f769510a0
|
546e14a548d22d0ac237c2c8e544e152e33002bd
|
refs/heads/master
| 2023-09-01T16:27:32.695756
| 2021-11-02T06:22:15
| 2021-11-02T06:22:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,908
|
py
|
import datetime as dt
import tkinter as tk
from tkinter import *
from tkinter import ttk
class App(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.root = tk.Tk()
self.tree = ttk.Treeview(self.root, selectmode='extended')
self.tree["columns"] = ("1", "2", "3", "4")
self.tree.column("1", width=125, minwidth=30, anchor='c')
self.tree.column("2", width=125, minwidth=30, anchor='c')
self.tree.column("3", width=125, minwidth=30, anchor='c')
self.tree.column("4", width=125, minwidth=30, anchor='c')
self.tree.bind('<ButtonRelease-1>', self.selectItem)
self.tree.heading("1", text="ID")
self.tree.heading("2", text="Sensor No")
self.tree.heading("3", text="IP")
self.tree.heading("4", text="Time")
self.tree['show'] = 'headings'
self.root.geometry('540x400')
self.root.title("Sensor Insert")
self.root.grid()
self.tree.grid(row=1, column=1, columnspan=4, padx=20, pady=20)
verscrlbar = ttk.Scrollbar(self.root, orient="vertical", command=self.tree.yview)
self.tree.configure(xscrollcommand=verscrlbar.set)
self.time_data = dt.datetime.now().strftime('%Y-%m-%d %X')
self.add_label = tk.Label(self.root, text='Add Sensor',
font=('Helvetica', 16), width=30, anchor="c")
self.add_label.grid(row=2, column=1, columnspan=4)
self.name_label = tk.Label(self.root, text='Sensor No: ', width=10, anchor="c")
self.name_label.grid(row=3, column=1)
self.t1 = tk.Text(self.root, height=1, width=16, bg='white')
self.t1.grid(row=3, column=2)
self.l3 = tk.Label(self.root, text='Sensor IP: ', width=10)
self.l3.grid(row=5, column=1)
self.t3 = tk.Text(self.root, height=1, width=16, bg='white')
self.t3.grid(row=5, column=2)
self.b1 = tk.Button(self.root, text='Save', width=10,
command=lambda: self.add_data())
self.b1.grid(row=6, column=2)
self.my_str = tk.StringVar()
self.l5 = tk.Label(self.root, textvariable=self.my_str, width=10)
self.l5.grid(row=8, column=1)
self.i = 0
# self.root.mainloop()
def selectItem(self, event):
global cell_value
curItem = self.tree.item(self.tree.focus())
col = self.tree.identify_column(event.x)
print('curItem = ', curItem)
print('col = ', col)
if col == '#0':
cell_value = curItem['text']
elif col == '#1':
cell_value = curItem['values'][0]
elif col == '#2':
cell_value = curItem['values'][1]
elif col == '#3':
cell_value = curItem['values'][2]
elif col == '#4':
cell_value = curItem['values'][3]
print('cell_value = ', cell_value)
def add_data(self):
sensor_name = self.t1.get("1.0", END)
sensor_ip = self.t3.get("1.0", END)
global i
self.i = self.i + 1
self.tree.insert("", 'end',
values=(int(self.i), sensor_name, sensor_ip, str(self.time_data)))
self.t1.delete('1.0', END) # reset the text entry box
self.t3.delete('1.0', END) # reset the text entry box
self.my_str.set("Sensor Added !")
self.t1.focus()
self.l5.after(3000, lambda: self.my_str.set('')) # remove the message
'''def record_data():
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["Sensor_Record"]
mycol = mydb["data1"]
mycol.insert_one()
'''
'''if __name__ == "__main__":
app = App()
app.mainloop()'''
'''def main(): #run mianloop
root = tk.Tk()
app = Window1(root)
root.mainloop()
if __name__ == '__main__':
main()'''
app = App()
app.mainloop()
|
[
"erhan0494@gmail.com"
] |
erhan0494@gmail.com
|
1750d9bd2d2d74e5249e0afd3dbfb651013e01bd
|
cf7d6b1f45efe4d97389da2918b4f1b04673e66f
|
/utils/utils.py
|
310d39b1f1996389d1f52e5ffaffd0b292de8091
|
[] |
no_license
|
metehancekic/deep_noise_rejection
|
4d1379c16fe57ed95aa152d39f33bf36d1c501a9
|
fd8e260e489f421fe7bd30c7ab8e9d397305247a
|
refs/heads/master
| 2022-06-18T09:07:52.907752
| 2020-05-12T18:46:07
| 2020-05-12T18:46:07
| 262,714,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,431
|
py
|
"""
Utilities
PyTorch
Example Run
python -m deep_adv.utils.utils
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import numpy as np
import os
from tqdm import tqdm
from deepillusion.torchattacks import PGD
def save_perturbed_images(args, model, device, data_loader, data_params, attack_params):
# Set phase to testing
model.eval()
test_loss = 0
correct = 0
all_images = []
all_labels = []
all_preds = []
for data, target in tqdm(data_loader):
data, target = data.to(device), target.to(device)
# Attacks
pgd_args = dict(net=model,
x=data,
y_true=target,
data_params=data_params,
attack_params=attack_params)
perturbs = PGD(**pgd_args)
data += perturbs
output = model(data)
test_loss += F.cross_entropy(output, target, reduction="sum").item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
all_images.append(data.detach().cpu().numpy())
all_labels.append(target.detach().cpu().numpy())
all_preds.append(pred.detach().cpu().numpy())
# Divide summed-up loss by the number of datapoints in dataset
test_loss /= len(data_loader.dataset)
# Print out Loss and Accuracy for test set
print(
f"\nAdversarial test set (l_{attack_params['norm']}): Average loss: {test_loss:.2f}, Accuracy: {correct}/{len(data_loader.dataset)} ({100. * correct / len(data_loader.dataset):.2f}%)\n"
)
all_images = np.array(all_images)
all_labels = np.array(all_labels)
all_preds = np.array(all_preds)
if not os.path.exists(args.directory + "data/attacked_images/"):
os.makedirs(args.directory + "data/attacked_images/")
np.savez_compressed(
args.directory + "data/attacked_images/" + args.model,
images=all_images,
labels=all_labels,
preds=all_preds,
)
def main():
from ..CIFAR10.read_datasets import cifar10
from ..CIFAR10.parameters import get_arguments
from ..CIFAR10.models.resnet import ResNet34
args = get_arguments()
# Get same results for each training with same parameters !!
torch.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
train_loader, test_loader = cifar10(args)
x_min = 0.0
x_max = 1.0
# Decide on which model to use
if args.model == "ResNet":
model = ResNet34().to(device)
else:
raise NotImplementedError
if device == "cuda":
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
model.load_state_dict(
torch.load(args.directory + "checkpoints/" + args.model + ".pt")
)
data_params = {"x_min": x_min, "x_max": x_max}
attack_params = {
"norm": "inf",
"eps": args.epsilon,
"step_size": args.step_size,
"num_steps": args.num_iterations,
"random_start": args.rand,
"num_restarts": args.num_restarts,
}
save_perturbed_images(
args,
model,
device,
test_loader,
data_params=data_params,
attack_params=attack_params,
)
if __name__ == "__main__":
main()
|
[
"metehancekic@umail.ucsb.edu"
] |
metehancekic@umail.ucsb.edu
|
21d2f7e2323d617b60c05ef764ccd5f70ec6a1c2
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/34/usersdata/82/13368/submittedfiles/moedas.py
|
7c8cb901e522ee6f3565298cf8f75f60a478e842
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
a= int(input('Digite o valor de a:'))
b= int(input('Digite o valor de b:'))
c= int(input('Digite o valor de c:'))
qa= 0
qb= 0
contador=0
while qa<=(c//a):
qb=(c-qa*a)//b
if qa*a+qb*b==c:
contador=contador+1
break
else:
qa=qa+1
if contador>0:
print (%qa)
print (%qb)
else:
print ('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
afaefeacf6a86365775689c2eb6cfda2d6b6b824
|
7f031e500bb73f084f932a166c3398672a3b8027
|
/config.py
|
0dbbc38f5b3b39dd4a87cfdac60b95503a2eff92
|
[] |
no_license
|
Guangzhan/nlp_demo
|
109fb0ed7f6bfc3469ac71cc59106449c1927ec5
|
4e88515968156461326dff3046c8bba14a12e32f
|
refs/heads/master
| 2020-09-13T16:38:58.744704
| 2019-11-20T03:37:56
| 2019-11-20T03:37:56
| 222,843,808
| 0
| 0
| null | 2019-11-20T03:35:08
| 2019-11-20T03:35:07
| null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
__author__ = 'yangbin1729'
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = '123@456#789$012%'
class DevelopmentConfig(Config):
DEBUG = True
MODEL_DIR = os.path.join(basedir, 'models')
Word2Vec_DIR = os.path.join(MODEL_DIR,
'word2vec\wiki_corpus_above200.model')
LTP_DATA_DIR = os.path.join(MODEL_DIR, 'ltp')
CLASSIFIER_DIR = os.path.join(MODEL_DIR, 'classifier')
TOKENIZER = os.path.join(CLASSIFIER_DIR, 'tokenizer.pickle')
class ProductionConfig(Config):
DEBUG = False
MODEL_DIR = r'/home/student/project/project-01/noam/project01/models'
Word2Vec_DIR = os.path.join(MODEL_DIR,
'word2vec/wiki_corpus_above200.model')
LTP_DATA_DIR = os.path.join(MODEL_DIR, 'ltp')
CLASSIFIER_DIR = os.path.join(MODEL_DIR, 'classifier')
TOKENIZER = os.path.join(CLASSIFIER_DIR, 'tokenizer.pickle')
config = {'development': DevelopmentConfig, 'production': ProductionConfig, }
|
[
"360661716@qq.com"
] |
360661716@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.