blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c9bbb18d49a220a9c5dba67e26b75ee3e9d1b3c3
|
ad1ff82d1361f76b043faa304aa3b7be3652b303
|
/tools/supervisor.py
|
40f2f0a9c939658268aa3ae39a028bf539add829
|
[] |
no_license
|
jon--lee/aor
|
3a0f92e345a88c347146acba4b9f7513a3a986cf
|
4a4cd8800dfc209c382507740e68586b34178a1b
|
refs/heads/master
| 2020-06-10T05:31:51.179020
| 2019-06-24T23:48:24
| 2019-06-24T23:48:24
| 193,597,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
import numpy as np
from expert import tf_util
class Supervisor():
def __init__(self, act):
self.act = act
def sample_action(self, s):
return self.intended_action(s)
def intended_action(self, s):
action = self.act(s[None], stochastic=False)[0]
return action
class Supervisor2():
def __init__(self, policy_fn, sess):
self.policy_fn = policy_fn
self.sess = sess
with self.sess.as_default():
tf_util.initialize()
def sample_action(self, s):
with self.sess.as_default():
intended_action = self.policy_fn(s[None,:])[0]
return intended_action
def intended_action(self, s):
return self.sample_action(s)
class Supervisor3():
def __init__(self, act):
self.act = act
def sample_action(self, s):
return self.intended_action(s)
def intended_action(self, s):
action = self.act(False, s)[0]
return action
|
[
"123abcjonathanlee@gmail.com"
] |
123abcjonathanlee@gmail.com
|
285bb70b43f6c87ac58cc9c0d7d50b7983f5ac64
|
8b57df3640fd9a726a8729c051dc27dbaee16059
|
/notify/apps.py
|
985c0feae7a585b8cbecad503a2120d658c0dc2f
|
[] |
no_license
|
sinjorjob/django-notification-function
|
86678df54e12b58a2feb315f10fde585412a2029
|
15ba6111474641498dbcd83ea0bd06d40b348387
|
refs/heads/master
| 2023-05-31T13:41:26.147761
| 2021-06-26T07:05:31
| 2021-06-26T07:05:31
| 380,437,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from django.apps import AppConfig
class NotifyConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'notify'
|
[
"sinforjob@gmail.com"
] |
sinforjob@gmail.com
|
09ac2327168508b61c167a4490edbc965fda67e3
|
7a55d3fac2bc2b7afd46300182944d3cb1b8a370
|
/clearpath/clearpath
|
a0265890c276994cb6ac2240c003f7c7a579b66e
|
[] |
no_license
|
btownshend/CNCConfig
|
5d3eca22573c0534ce0b5c43a6958c2d5011a992
|
bdadea7bacf4c5d373faeab30f31b1d5145fb3d3
|
refs/heads/main
| 2023-03-16T16:11:44.071625
| 2021-03-14T22:08:10
| 2021-03-14T22:08:10
| 329,538,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,415
|
#!/usr/bin/env python
import ctypes
import hal, time
import sys,os
print('sys.argv[0] =', sys.argv[0])
pathname = os.path.dirname(sys.argv[0])
print('path =', pathname)
lib=ctypes.CDLL(pathname+"/getstatus.so")
print('getstatus.so loaded')
h=hal.component("clearpath")
h.newpin("0.fault",hal.HAL_BIT,hal.HAL_OUT)
h.newpin("1.fault",hal.HAL_BIT,hal.HAL_OUT)
h.newpin("0.enable",hal.HAL_BIT,hal.HAL_IN)
h.newpin("1.enable",hal.HAL_BIT,hal.HAL_IN)
print('components/pins created')
try:
if lib.initialize() < 0:
print("Unable to initialize ClearPath SC-HUB connection")
raise SystemExit
print("initialized")
print(dir(h))
h.ready()
print("ready")
while True:
time.sleep(0.25)
#print("update")
if lib.setenable(0,h['0.enable']) < 0:
print("clearpath: failed to setenable for port 0")
h['0.fault']=1
continue
if lib.setenable(1,h['1.enable']) < 0:
print("clearpath: failed to setenable for port 1")
h['1.fault']=1
continue
s0=lib.getstatus(0)
if s0<0:
print("clearpath: getstatus(0) failed")
h['0.fault']=(s0!=0)
s1=lib.getstatus(1)
if s1<0:
print("clearpath: getstatus(1) failed")
h['1.fault']=(s1!=0)
except KeyboardInterrupt:
lib.shutdown()
raise SystemExit
|
[
"bst@tc.com"
] |
bst@tc.com
|
|
fe52b2cd35017acf657af7d8ab0cb4f759250d7a
|
0e08e9873549c514245842c5f4ad01769e1c76d6
|
/myblog/blog/tests.py
|
ec7bc624daecb07dd9bc9025f52c0c33afa1036c
|
[] |
no_license
|
liangxs0/Django_study
|
39afe9c889467eb81e2ecdcee4e285c2bd27d28a
|
2f509bce6cdaaee288c37a603978a96ffc43f0e4
|
refs/heads/main
| 2023-04-25T20:30:05.275066
| 2021-05-31T03:27:24
| 2021-05-31T03:27:24
| 372,365,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
# from django.test import TestCase
# #
#
# from django.contrib.auth.hashers import make_password, check_password
# # # Create your tests here.
# x = make_password("123", 'abc', 'pbkdf2_sha256')
# y = make_password("123", 'abc', 'pbkdf2_sha256')
# print(x)
# print(y)
def a(nums):
nums = [str(n) for n in nums]
n_nums = []
for n in nums:
for nn in n:
n_nums.append(nn)
print(n_nums)
n_nums.sort(reverse=True)
print(n_nums)
res = ''
for n in n_nums:
res+=n
return res
c = "".join([3,30,34,5,9])
print(c)
|
[
"1033808656@qq.com"
] |
1033808656@qq.com
|
807b8f72c43040317da699074158ef426c15575e
|
6bb45c5892b4c9692dcc44116fb73dc9e7ab90ff
|
/advanced_functionality/autogluon-sagemaker-pipeline/setup.py
|
56a675d8c0cac3a064199a11bd56e8e1316b0dce
|
[
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
aws/amazon-sagemaker-examples
|
8359afe544e873662bda5b8d2b07399c437213c9
|
43dae4b28531cde167598f104f582168b0a4141f
|
refs/heads/main
| 2023-08-26T04:42:52.342776
| 2023-08-25T14:37:19
| 2023-08-25T14:37:19
| 107,937,815
| 4,797
| 3,519
|
Apache-2.0
| 2023-09-14T19:47:03
| 2017-10-23T05:55:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
import os
import setuptools
about = {}
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "pipelines", "__version__.py")) as f:
exec(f.read(), about)
with open("README.md", "r") as f:
readme = f.read()
required_packages = ["sagemaker"]
extras = {
"test": [
"black",
"coverage",
"flake8",
"mock",
"pydocstyle",
"pytest",
"pytest-cov",
"sagemaker",
"tox",
]
}
setuptools.setup(
name=about["__title__"],
description=about["__description__"],
version=about["__version__"],
author=about["__author__"],
author_email=["__author_email__"],
long_description=readme,
long_description_content_type="text/markdown",
url=about["__url__"],
license=about["__license__"],
packages=setuptools.find_packages(),
include_package_data=True,
python_requires=">=3.6",
install_requires=required_packages,
extras_require=extras,
entry_points={
"console_scripts": [
"get-pipeline-definition=pipelines.get_pipeline_definition:main",
"run-pipeline=pipelines.run_pipeline:main",
]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
|
[
"noreply@github.com"
] |
aws.noreply@github.com
|
55f2794ab24a2c74169da65c168ce04bb3914a86
|
384a612001a5fdd5d089898f13cc7aef3b954a6e
|
/coupons/models.py
|
a70532afc380b7291804bb0f539e35ea14a9e0e6
|
[] |
no_license
|
purum01/test_django_onlineshop
|
f3a9c4d12d4077ea69cb9ad372e5acc5243379b7
|
c4a40a273a512c939a364bee91bab950559d0f87
|
refs/heads/main
| 2023-06-14T12:11:05.614611
| 2021-07-03T14:34:01
| 2021-07-03T14:34:01
| 380,695,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
class Coupon(models.Model):
code = models.CharField(max_length=50, unique=True)
valid_from = models.DateTimeField()
valid_to = models.DateTimeField()
discount = models.IntegerField(validators=[MinValueValidator(0),MaxValueValidator(100)])
active = models.BooleanField()
def __str__(self):
return self.code
|
[
"purumskyamy@gmail.com"
] |
purumskyamy@gmail.com
|
64ca497be5be743de5dd8bc59793c84cf3431d4f
|
18c6f7ee10526583d8c65acc5ce04579a91fdeeb
|
/ch_01/18.tuple.py
|
cd04da0d3333b776026f7697790ddcee7dacff23
|
[] |
no_license
|
cloudsecuritylabs/pythonProject_1
|
97273634df25e306d0a2aed56fcf5c836d2ac33c
|
8fc0d17b549d7195f8de46a227e5bb5d9f2ed4ed
|
refs/heads/master
| 2023-07-22T16:06:14.550571
| 2021-08-24T03:09:00
| 2021-08-24T03:09:00
| 399,319,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
'''
Let's learn about tuple
'''
# tuple is immutable
my_tup = ('cat', 'dog', 'horse')
# NoneType
my_tup = []
food = None
if food is None:
print("Hey give me something")
|
[
"basu.ankan@gmail.com"
] |
basu.ankan@gmail.com
|
29aaf9830413dce680cb164b3a8dd63745dd68af
|
1572b7dea50699582879b2b9fcedef12f2ef6704
|
/verification/src/referee.py
|
26e014f1e115a5887f39fd778b5563bcb03c8beb
|
[] |
no_license
|
khanukov/checkio-empire-broken-report
|
7106869fc504a2551fb7a1d412245a74c9401f64
|
64d68d89b99c2116c12fd1d579961ab699a760c6
|
refs/heads/master
| 2020-12-03T02:19:33.174438
| 2015-04-07T14:12:08
| 2015-04-07T14:12:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
from checkio_referee import RefereeCodeGolf
from checkio_referee import covercodes
import settings_env
from tests import TESTS
# TODO Golf
class Referee(RefereeCodeGolf):
DEFAULT_MAX_CODE_LENGTH = 150
BASE_POINTS = 15
TESTS = TESTS
ENVIRONMENTS = settings_env.ENVIRONMENTS
DEFAULT_FUNCTION_NAME = "golf"
ENV_COVERCODE = {
"python_2": covercodes.py_2_str,
"python_3": None,
"javascript": None
}
|
[
"bvv.mag@gmail.com"
] |
bvv.mag@gmail.com
|
cff553459a9e293fc45181572d58c0948c7b2fb5
|
d6202e2fff0f0b22094a8bc383c3744cdcda6000
|
/doc/gaussian_worker.py
|
8947faa117d156fa87ff8bfc2d62fbcee2ef81ee
|
[
"MIT"
] |
permissive
|
pstjohn/bde
|
dc8e639527d281dade935141b06fbedc5958e4c8
|
5677af8dcbb992c7888746aa018302e6fb04e67d
|
refs/heads/master
| 2022-07-16T02:17:59.151174
| 2022-06-30T19:52:01
| 2022-06-30T19:52:01
| 168,446,254
| 27
| 9
|
MIT
| 2021-09-07T16:20:45
| 2019-01-31T02:00:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,626
|
py
|
import psycopg2
import time
import logging
import random
import subprocess
import socket
dbparams = {
# In this example file, the database connection parameters (server, password, etc),
# has been removed. This file is mainly to show an example of how a SQL database
# was used to queue and dispatch Gaussian calculations.
}
from bde.gaussian import GaussianRunner
def run_optimization():
with psycopg2.connect(**dbparams) as conn:
with conn.cursor() as cur:
cur.execute("""
WITH cte AS (
SELECT id, smiles, type
FROM compound
WHERE status = 'not started'
ORDER BY id
LIMIT 1
FOR UPDATE
)
UPDATE compound SET status = 'in progress',
queued_at = CURRENT_TIMESTAMP,
node = %s
FROM cte
WHERE compound.id = cte.id
RETURNING compound.id, compound.smiles, compound.type;
""", (socket.gethostname(),))
cid, smiles, type_ = cur.fetchone()
conn.close()
try:
runner = GaussianRunner(smiles, cid, type_)
molstr, enthalpy, freeenergy, scfenergy, log = runner.process()
with psycopg2.connect(**dbparams) as conn:
with conn.cursor() as cur:
cur.execute("""
UPDATE compound
SET status = 'finished',
mol = %s, enthalpy = %s,
freeenergy = %s, scfenergy= %s,
run_at = CURRENT_TIMESTAMP,
logfile = %s
WHERE id = %s;""",
(molstr, enthalpy, freeenergy, scfenergy, log, cid))
conn.close()
except Exception as ex:
with psycopg2.connect(**dbparams) as conn:
with conn.cursor() as cur:
cur.execute("""
UPDATE compound
SET status = 'error',
error = %s,
run_at = CURRENT_TIMESTAMP
WHERE id = %s;""", (str(ex), cid))
conn.close()
return cid
if __name__ == "__main__":
start_time = time.time()
# Add a random delay to avoid race conditions at the start of the job
time.sleep(random.uniform(0, 1*60))
while (time.time() - start_time) < (86400 * 9): # Time in days
try:
run_optimization()
except psycopg2.OperationalError:
time.sleep(5 + random.uniform(0, 60))
|
[
"peterc.stjohn@gmail.com"
] |
peterc.stjohn@gmail.com
|
13d00496340bf494c42e637092864c02cd223882
|
8030404af9a6b2555387a49a3e43a47be7a26470
|
/peggy/lib/alipaylib/alipayConfig.py
|
2799153644f19ca6690396e6c9260dcb2097eff1
|
[] |
no_license
|
mebusw/tianjinsports-server
|
d5de7aae1a25affdd3c91c78e5a82b0d4c10220f
|
3402ac634fc92b5ccdf049f530e6b7b8b604aac1
|
refs/heads/master
| 2016-09-06T21:32:40.096629
| 2015-03-14T13:20:24
| 2015-03-14T13:20:24
| 32,121,712
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
import os
partner = "2088711061370024"
key = "j5f5nc0lev9wch24t2cotwdvqkwexgww"
seller_mail = "17sports@sina.cn"
if 'SERVER_SOFTWARE' in os.environ:
notify_url = "http://1.peggy.sinaapp.com/peggy/paid_notify_wap"
return_url = "http://1.peggy.sinaapp.com/peggy/paid_wap"
show_url = "http://1.peggy.sinaapp.com/peggy"
else:
notify_url = "http://127.0.0.1:8000/peggy/paid_notify_wap"
return_url = "http://127.0.0.1:8000/peggy/paid_wap"
show_url = "http://127.0.0.1:8000/peggy"
|
[
"mebusw@163.com"
] |
mebusw@163.com
|
f05b968e39febf01d27debcf0bed250e13309c9a
|
8898273f9811fab29eb5621734bafcdf204d8229
|
/scipy-stubs/integrate/quadrature.pyi
|
21ea590c9993068c72b5be57697a1ef607670d6b
|
[] |
no_license
|
tyrion/scipy-stubs
|
628ad6321a7e1502683a2b55a759777508ab4b67
|
bf49a91313523c4f635bc3e5d14444c1361caf64
|
refs/heads/master
| 2020-05-30T21:59:43.001510
| 2019-06-03T10:30:54
| 2019-06-03T10:30:54
| 189,984,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 970
|
pyi
|
# Stubs for scipy.integrate.quadrature (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from numpy import trapz as trapz
from typing import Any, Optional
class AccuracyWarning(Warning): ...
def fixed_quad(func: Any, a: Any, b: Any, args: Any = ..., n: int = ...): ...
def quadrature(func: Any, a: Any, b: Any, args: Any = ..., tol: float = ..., rtol: float = ..., maxiter: int = ..., vec_func: bool = ..., miniter: int = ...): ...
def cumtrapz(y: Any, x: Optional[Any] = ..., dx: float = ..., axis: int = ..., initial: Optional[Any] = ...): ...
def simps(y: Any, x: Optional[Any] = ..., dx: int = ..., axis: int = ..., even: str = ...): ...
def romb(y: Any, dx: float = ..., axis: int = ..., show: bool = ...): ...
def romberg(function: Any, a: Any, b: Any, args: Any = ..., tol: float = ..., rtol: float = ..., show: bool = ..., divmax: int = ..., vec_func: bool = ...): ...
def newton_cotes(rn: Any, equal: int = ...): ...
|
[
"germano.gabbianelli@contentwise.tv"
] |
germano.gabbianelli@contentwise.tv
|
eae6d7708433536367bc9b2cb96ce49711facb5d
|
2ebc85f7f34a459d69ff412f956b43ab2472590f
|
/backend/tasker_business/migrations/0001_initial.py
|
95053422e99e4ae9ece3cd837ff4b1db4b389baf
|
[] |
no_license
|
crowdbotics-apps/mobile-car-wash-23107
|
4ea678f1c88fe4c96eb498535e4fb14e60110ae0
|
96b057e5989a8b5dbb1267f93c0a34f57a72d636
|
refs/heads/master
| 2023-01-19T21:07:11.993601
| 2020-12-01T12:55:21
| 2020-12-01T12:55:21
| 317,537,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,617
|
py
|
# Generated by Django 2.2.17 on 2020-12-01 12:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('task_category', '0001_initial'),
('task_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Timeslot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
],
),
migrations.CreateModel(
name='TaskerSkill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('rate', models.FloatField()),
('description', models.TextField()),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='taskerskill_category', to='task_category.Category')),
('subcategory', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='taskerskill_subcategory', to='task_category.Subcategory')),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='taskerskill_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='TaskerAvailability',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tasker', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='taskeravailability_tasker', to='task_profile.TaskerProfile')),
('timeslots', models.ManyToManyField(related_name='taskeravailability_timeslots', to='tasker_business.Timeslot')),
],
),
migrations.CreateModel(
name='BusinessPhoto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.URLField()),
('description', models.TextField()),
('tasker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='businessphoto_tasker', to='task_profile.TaskerProfile')),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
b32b76f682558b542d37e0757152e22391f98198
|
e2f5479f73bdfb9cd93a2fd7c615da369a43a499
|
/tests/lastfm/commands/test_cmd_add.py
|
eb79cd38db6d075e48444f318b17fe6ab264ae91
|
[
"MIT"
] |
permissive
|
tefra/pytuber
|
8bdb837d0912c9bacab0bff1e0196bfdba67cb62
|
a7c5d6252584dc0abee946e707f496cecaebf1bb
|
refs/heads/master
| 2022-05-19T21:48:02.129812
| 2022-05-08T10:08:40
| 2022-05-08T10:10:24
| 161,838,438
| 10
| 6
|
MIT
| 2022-05-08T09:45:24
| 2018-12-14T20:44:26
|
Python
|
UTF-8
|
Python
| false
| false
| 6,159
|
py
|
from unittest import mock
from pytuber.cli import cli
from pytuber.core.models import PlaylistManager
from pytuber.core.models import Provider
from pytuber.lastfm.models import PlaylistType
from pytuber.lastfm.models import UserPlaylistType
from pytuber.lastfm.params import ArtistParamType
from pytuber.lastfm.params import CountryParamType
from pytuber.lastfm.params import TagParamType
from pytuber.lastfm.params import UserParamType
from tests.utils import CommandTestCase
from tests.utils import PlaylistFixture
class CommandAddTests(CommandTestCase):
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(UserParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_user_playlist(self, create_playlist, convert, fetch_tracks):
convert.return_value = "bbb"
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli,
["add", "lastfm", "user-playlist"],
input="\n".join(("aaa", "2", "50", "My Favorite ")),
catch_exceptions=False,
)
expected_output = (
"Last.fm username: aaa",
"Playlist Types",
"[1] User Loved Tracks",
"[2] User Top Tracks",
"[3] User Recent Tracks",
"[4] User Friends Recent Tracks",
"Select a playlist type 1-4: 2",
"Maximum tracks [50]: 50",
"Title: My Favorite ",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": UserPlaylistType.USER_TOP_TRACKS,
"provider": Provider.lastfm,
"arguments": {"limit": 50, "username": "bbb"},
"title": "My Favorite",
}
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(PlaylistManager, "set")
def test_chart_playlist(self, create_playlist, fetch_tracks):
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli, ["add", "lastfm", "chart-playlist"], input="50\n "
)
expected_output = (
"Maximum tracks [50]: 50",
"Title: ",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": PlaylistType.CHART,
"provider": Provider.lastfm,
"arguments": {"limit": 50},
"title": "",
}
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(CountryParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_country_playlist(self, create_playlist, country_param_type, fetch_tracks):
country_param_type.return_value = "greece"
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli, ["add", "lastfm", "country-playlist"], input=b"gr\n50\n "
)
expected_output = (
"Country Code: gr",
"Maximum tracks [50]: 50",
"Title: ",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": PlaylistType.COUNTRY,
"provider": Provider.lastfm,
"arguments": {"limit": 50, "country": "greece"},
"title": "",
}
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(TagParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_tag_playlist(self, create_playlist, convert, fetch_tracks):
convert.return_value = "rock"
create_playlist.return_value = PlaylistFixture.one(synced=111)
result = self.runner.invoke(
cli, ["add", "lastfm", "tag-playlist"], input="rock\n50\n "
)
expected_output = (
"Tag: rock",
"Maximum tracks [50]: 50",
"Title: ",
"Updated playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": PlaylistType.TAG,
"provider": Provider.lastfm,
"arguments": {"limit": 50, "tag": "rock"},
"title": "",
}
)
fetch_tracks.assert_called_once_with("id_a")
@mock.patch("pytuber.lastfm.commands.cmd_add.fetch_tracks")
@mock.patch.object(ArtistParamType, "convert")
@mock.patch.object(PlaylistManager, "set")
def test_artist_playlist(self, create_playlist, artist_param, fetch_tracks):
artist_param.return_value = "Queen"
create_playlist.return_value = PlaylistFixture.one()
result = self.runner.invoke(
cli,
["add", "lastfm", "artist-playlist"],
input="Queen\n50\nQueen....",
catch_exceptions=False,
)
expected_output = (
"Artist: Queen",
"Maximum tracks [50]: 50",
"Title: Queen....",
"Added playlist: id_a!",
)
self.assertEqual(0, result.exit_code)
self.assertOutput(expected_output, result.output)
create_playlist.assert_called_once_with(
{
"type": PlaylistType.ARTIST,
"provider": Provider.lastfm,
"arguments": {"limit": 50, "artist": "Queen"},
"title": "Queen....",
}
)
fetch_tracks.assert_called_once_with("id_a")
|
[
"chris@komposta.net"
] |
chris@komposta.net
|
7818dfe58848eb01336f7b5651924a5ed6c63634
|
de01cb554c2292b0fbb79b4d5413a2f6414ea472
|
/algorithms/Medium/375.guess-number-higher-or-lower-ii.py
|
b6799751f60aa606ef7ea7280f4aafd950549035
|
[] |
no_license
|
h4hany/yeet-the-leet
|
98292017eadd3dde98a079aafcd7648aa98701b4
|
563d779467ef5a7cc85cbe954eeaf3c1f5463313
|
refs/heads/master
| 2022-12-10T08:35:39.830260
| 2020-09-02T23:12:15
| 2020-09-02T23:12:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
#
# @lc app=leetcode id=375 lang=python3
#
# [375] Guess Number Higher or Lower II
#
# https://leetcode.com/problems/guess-number-higher-or-lower-ii/description/
#
# algorithms
# Medium (40.27%)
# Total Accepted: 64.6K
# Total Submissions: 160.4K
# Testcase Example: '1'
#
# We are playing the Guess Game. The game is as follows:
#
# I pick a number from 1 to n. You have to guess which number I picked.
#
# Every time you guess wrong, I'll tell you whether the number I picked is
# higher or lower.
#
# However, when you guess a particular number x, and you guess wrong, you pay
# $x. You win the game when you guess the number I picked.
#
# Example:
#
#
# n = 10, I pick 8.
#
# First round: You guess 5, I tell you that it's higher. You pay $5.
# Second round: You guess 7, I tell you that it's higher. You pay $7.
# Third round: You guess 9, I tell you that it's lower. You pay $9.
#
# Game over. 8 is the number I picked.
#
# You end up paying $5 + $7 + $9 = $21.
#
#
# Given a particular n ≥ 1, find out how much money you need to have to
# guarantee a win.
#
class Solution:
def getMoneyAmount(self, n: int) -> int:
|
[
"kevin.wkmiao@gmail.com"
] |
kevin.wkmiao@gmail.com
|
510028271dd0273b95172ae8801f8c4076dd5a48
|
700c7801958dd4789caf94785b5dc8c5e3daa4fd
|
/ttp/src/s3_enum_bucket_src.py
|
8b1c813243ec768cb7a2e4575bac6e14f2e45359
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
blackbotsecurity/AWS-Attack
|
24d4cd6ebda067e9672f4f963d414a7b176e3551
|
ad4668ab60173aabce3c6b9c7685160be5e3f14d
|
refs/heads/master
| 2023-03-14T00:05:54.965341
| 2021-03-05T12:44:27
| 2021-03-05T12:44:27
| 331,603,794
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,507
|
py
|
#!/usr/bin/env python3
import datetime
import argparse
import datetime
from copy import deepcopy
import os
from botocore.exceptions import ClientError
FILE_SIZE_THRESHOLD = 1073741824
def get_bucket_size(awsattack, bucket_name):
client = awsattack.get_boto3_client('cloudwatch', 'us-east-1')
response = client.get_metric_statistics(
Namespace='AWS/S3',
MetricName='BucketSizeBytes',
Dimensions=[
{'Name': 'BucketName', 'Value': bucket_name},
{'Name': 'StorageType', 'Value': 'StandardStorage'}
],
Statistics=['Average'],
Period=3600,
StartTime=datetime.datetime.today() - datetime.timedelta(days=1),
EndTime=datetime.datetime.now().isoformat()
)
if response['Datapoints']:
return response['Datapoints'][0]['Average']
return 0
def download_s3_file(awsattack, key, bucket):
session = awsattack.get_active_session()
base_directory = 'sessions/{}/downloads/{}/{}/'.format(session.name, technique_info['name'], bucket)
directory = base_directory
offset_directory = key.split('/')[:-1]
if offset_directory:
directory += '/' + ''.join(offset_directory)
if not os.path.exists(directory):
os.makedirs(directory)
s3 = awsattack.get_boto3_resource('s3')
size = s3.Object(bucket, key).content_length
if size > FILE_SIZE_THRESHOLD:
awsattack.print(' LARGE FILE DETECTED:')
confirm = awsattack.input(' Download {}? Size: {} bytes (y/n) '.format(key, size))
if confirm != 'y':
return False
try:
s3.Bucket(bucket).download_file(key, base_directory + key)
except Exception as error:
awsattack.print(' {}'.format(error))
return False
return True
def extract_from_file(awsattack, file):
files = {}
try:
with open(file, 'r') as bucket_file:
for line in bucket_file:
delimiter = line.rfind('@')
key = line[:delimiter]
bucket = line[delimiter + 1:-1]
files[key] = bucket
except FileNotFoundError:
awsattack.print(' Download File not found...')
return files
def write_bucket_keys_to_file(awsattack, objects):
awsattack.print(' Writing file names to disk...')
session = awsattack.get_active_session()
file = 'sessions/{}/downloads/{}/'.format(session.name, 's3_download_bucket')
if not os.path.exists(file):
os.makedirs(file)
file += '{}_file_names.txt'.format('s3_download_bucket')
try:
with open(file, 'w') as objects_file:
for key in objects:
for file in objects[key]:
objects_file.write('{}@{}\n'.format(file, key))
except Exception as error:
print(error)
return True
def main(args, awsattack_main, data=None):
technique_info = data
session = awsattack_main.get_active_session()
print = awsattack_main.print
input = awsattack_main.input
if (args.names_only is True and args.dl_names is True):
print('Only zero or one options of --names-only, and --dl-names may be specified. Exiting...')
return {}
# Download Objects from File
if args.dl_names:
awsattack_main.print(' Extracting files from file...')
extracted_files = extract_from_file(awsattack_main, args.dl_names)
total = len(extracted_files.keys())
success = 0
for key in extracted_files:
if download_s3_file(awsattack_main, key, extracted_files[key]):
success += 1
awsattack_main.print(' Finished downloading from file...')
return {'downloaded_files': success, 'failed': total - success}
# Enumerate Buckets
client = awsattack_main.get_boto3_client('s3')
buckets = []
print('Enumerating buckets...')
try:
response = client.list_buckets()
except ClientError as error:
code = error.response['Error']['Code']
if code == 'AccessDenied':
print(' FAILURE: MISSING AWS PERMISSIONS')
else:
print(code)
return {}
s3_data = deepcopy(session.S3)
s3_data['Buckets'] = deepcopy(response['Buckets'])
session.update(awsattack_main.database, S3=s3_data)
summary_data = {'buckets': len(response['Buckets'])}
for bucket in response['Buckets']:
buckets.append(bucket['Name'])
print(' Found bucket "{bucket_name}"'.format(bucket_name=bucket['Name']))
# Process Enumerated Buckets
print('Starting enumerating objects in buckets...')
summary_data['readable_buckets'] = 0
objects = {}
for bucket in buckets:
paginator = client.get_paginator('list_objects_v2')
page_iterator = paginator.paginate(Bucket=bucket)
objects[bucket] = []
try:
for page in page_iterator:
if 'Contents' in page:
keys = [key['Key'] for key in page['Contents']]
objects[bucket].extend(keys)
summary_data['readable_buckets'] += 1
except ClientError as error:
print(' Unable to read bucket')
code = error.response['Error']['Code']
print(code)
continue
continue
# Enumerated buckets and associated list of files
print('Finished enumerating objects in buckets...')
summary_data['objects'] = objects
write_bucket_keys_to_file(awsattack_main, objects)
return summary_data
|
[
"github.nk@blackbot.io"
] |
github.nk@blackbot.io
|
17f8fab62badcdbbb88d5cfd0c6c4506f86e6b50
|
fa7c302f7df6b1773b27de3b742d551bd54aa4e2
|
/test/test_input_device_all_of.py
|
611ab21c2acb46f2c420d439e774eb725eb3aeaa
|
[] |
no_license
|
cons3rt/cons3rt-python-sdk
|
d01b3b174c295491130fba0d76d046b16492e9f7
|
f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0
|
refs/heads/master
| 2021-11-04T02:31:54.485541
| 2021-10-26T19:28:57
| 2021-10-26T19:28:57
| 241,673,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 879
|
py
|
# coding: utf-8
"""
CONS3RT Web API
A CONS3RT ReSTful API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: Fred@gigagantic-server.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import cons3rt
from cons3rt.models.input_device_all_of import InputDeviceAllOf # noqa: E501
from cons3rt.rest import ApiException
class TestInputDeviceAllOf(unittest.TestCase):
"""InputDeviceAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInputDeviceAllOf(self):
"""Test InputDeviceAllOf"""
# FIXME: construct object with mandatory attributes with example values
# model = cons3rt.models.input_device_all_of.InputDeviceAllOf() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"shaun.tarves@jackpinetech.com"
] |
shaun.tarves@jackpinetech.com
|
f9c6490f5ece41b650d48ea79d24c13544978d7d
|
f68732bc40a7a90c3a1082e4b3a4154518acafbb
|
/script/dbus/sessionBus/inputDeviceTouchPad/011_palmMinWidth.py
|
2aa00b420156397e97ca272516be555d2391a05b
|
[] |
no_license
|
lizhouquan1017/dbus_demo
|
94238a2307e44dabde9f4a4dd0cf8ec217260867
|
af8442845e722b258a095e9a1afec9dddfb175bf
|
refs/heads/master
| 2023-02-11T19:46:27.884936
| 2021-01-08T05:27:18
| 2021-01-08T05:27:18
| 327,162,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
# -*- coding: utf-8 -*-
# ***************************************************
# @Test Case ID: 011_palmMinWidth
# @Test Description: int32 PalmMinWidth (readwrite) 手掌误触最小宽度
# @Test Condition: 1.无
# @Test Step: 1.调用接口读取 PalmMinWidth 属性值
# @Test Result: 1.返回 int32 数据类型数据
# @Test Remark:
# @Author: ut001627
# ***************************************************
import time
import pytest
from frame.base import OSBase
from aw.dbus.sessionBus import inputDeviceTouchPad
class TestCase(OSBase):
def setUp(self):
self.Step("预制条件1:无")
@pytest.mark.public
def test_step(self):
self.Step("步骤1:调用接口读取 PalmMinWidth 属性值")
inputDeviceTouchPad.palmMinWidth()
def tearDown(self):
self.Step("收尾:无")
time.sleep(2)
|
[
"lizhouquan@uniontech.com"
] |
lizhouquan@uniontech.com
|
236cf4532f3fdde162ba6752e286002ebdff0b32
|
039c2e60b859d88bb686c0e66bc6dab2ab723b8e
|
/环境控制系统/wsgi.py
|
507ea5c297691da4776aee67c4084fe4aea07c47
|
[] |
no_license
|
ccc-0/ECS
|
850613971e4c6fd9cbb6ddcbe2c51b5285d622ac
|
ef4d69cb4c6fd1b1bbd40ba9c754c8e50c56d8ee
|
refs/heads/master
| 2020-09-13T21:50:42.033517
| 2020-02-13T03:47:10
| 2020-02-13T03:47:10
| 222,913,137
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
"""
WSGI config for 环境控制系统 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', '环境控制系统.settings')
application = get_wsgi_application()
|
[
"1056179315@qq.com"
] |
1056179315@qq.com
|
a302291624c13fd9a1f6808e9c8885774baf1374
|
8b4ca76a9c1e9aba74ce9ca3008f78b0293a8df2
|
/algorithms/policy.py
|
b391e7e85011ff0e3975adf670d34f866c3670ab
|
[] |
no_license
|
sebastiengilbert73/ReinforcementLearning
|
4d2eb94327ee56568216d673b1a90a928e79be55
|
b45578ec7603be37968d95c216d4169c276c0ab4
|
refs/heads/master
| 2021-06-18T06:57:04.815045
| 2021-02-21T18:08:31
| 2021-02-21T18:08:31
| 164,341,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,676
|
py
|
import abc
import random
import copy
class LegalActionsAuthority(abc.ABC):
"""
Abstract class that filters the legal actions in a state, among the actions set
"""
def __init__(self):
super().__init__()
@abc.abstractmethod
def LegalActions(self, state):
pass # return legal_actions_set
class AllActionsLegalAuthority(LegalActionsAuthority):
"""
Utility class that always allows all actions
"""
def __init__(self, actions_set):
super().__init__()
self.actions_set = actions_set
def LegalActions(self, state):
return copy.deepcopy(self.actions_set)
class Policy(abc.ABC):
"""
Abstract class that selects an action from a state
"""
def __init__(self, legal_actions_authority):
super().__init__()
self.legal_actions_authority = legal_actions_authority
@abc.abstractmethod
def ActionProbabilities(self, state):
pass # return action_to_probability_dict
def Probability(self, state, action):
action_to_probability_dict = self.ActionProbabilities(state)
if action in action_to_probability_dict:
return action_to_probability_dict[action]
else:
return 0
def Select(self, state):
action_to_probability_dict = self.ActionProbabilities(state)
action_running_sum_list = []
running_sum = 0
for action, probability in action_to_probability_dict.items():
running_sum += probability
action_running_sum_list.append((action, running_sum))
random_0to1 = random.random()
for action_running_sum in action_running_sum_list:
if action_running_sum[1] >= random_0to1:
return action_running_sum[0]
raise ValueError("Policy.Select(): Reached the end of the loop without returning. state = {}; action_running_sum_list = {}; random_0to1 = {}".format(state, action_running_sum_list, random_0to1))
class Random(Policy): # Selects randomly one of the legal actions
def __init__(self, legal_actions_authority):
super().__init__(legal_actions_authority)
def ActionProbabilities(self, state):
legal_actions_set = self.legal_actions_authority.LegalActions(state)
action_to_probability_dict = {}
for action in legal_actions_set:
action_to_probability_dict[action] = 1/len(legal_actions_set)
return action_to_probability_dict
class Greedy(Policy):
"""
Always selects the most valuable action, as kept in a table
"""
def __init__(self, state_to_most_valuable_action, legal_actions_authority):
super().__init__(legal_actions_authority)
self.state_to_most_valuable_action = copy.deepcopy(state_to_most_valuable_action)
def ActionProbabilities(self, state):
legal_actions_set = self.legal_actions_authority.LegalActions(state)
if self.state_to_most_valuable_action[state] not in legal_actions_set: # Initialization error: Attribute an arbitrary legal action
self.state_to_most_valuable_action[state] = list(legal_actions_set)[0]
return {self.state_to_most_valuable_action[state]: 1}
class EpsilonGreedy(Policy):
"""
Selects the most valuable action with probability (1 - epsilon). Otherwise, randomly selects an action
"""
def __init__(self, epsilon, stateAction_to_value):
self.epsilon = epsilon
self.stateAction_to_value = stateAction_to_value
self.state_to_stateActions = {} # Build in advance the dictionary of state to state-action pairs
for ((state, action), value) in self.stateAction_to_value.items():
if state in self.state_to_stateActions:
self.state_to_stateActions[state].append((state, action))
else:
self.state_to_stateActions[state] = [(state, action)]
def ActionProbabilities(self, state):
stateActions_list = self.state_to_stateActions[state]
if len(stateActions_list) == 0:
return {}
most_valuable_action = None
highest_value = float('-inf')
for (_state, action) in stateActions_list:
value = self.stateAction_to_value[(_state, action)]
if value > highest_value:
highest_value = value
most_valuable_action = action
number_of_actions = len(stateActions_list)
action_to_probability = {}
for (_state, action) in stateActions_list:
action_to_probability[action] = self.epsilon/number_of_actions
action_to_probability[most_valuable_action] += (1.0 - self.epsilon)
return action_to_probability
|
[
"sebastiengilbert73@yahoo.ca"
] |
sebastiengilbert73@yahoo.ca
|
efba4b2d600c69a51bb39a34812f080182f4990d
|
8b301e17d5f42e1050bb15cde9b28a2db33d0662
|
/mysite/myAPI/checkcode.py
|
f92f73c41b11dab9987fad65e488cce789056e4d
|
[
"Apache-2.0"
] |
permissive
|
wuchunlong0/blog_uk_vue_mylogin
|
413bd482b649f2bf0e45cdfe5dc964ac0f75e72b
|
eece41870822a38c130318c10e6dc348a088a864
|
refs/heads/master
| 2020-05-09T18:04:45.718255
| 2019-04-14T15:13:01
| 2019-04-14T15:13:01
| 181,323,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,418
|
py
|
# -*- coding: utf-8 -*-
import os,sys
from io import BytesIO as StringIO
from django.shortcuts import render
import random
from django.http.response import HttpResponseRedirect, HttpResponse
from PIL import Image, ImageDraw, ImageFont, ImageFilter
FONT_TYPE = "static_common/home/fonts/DroidSans.ttf"
_letter_cases = "abcdefghnpqrstuvxy".upper()
_upper_cases = _letter_cases
_numbers = ''.join(map(str, range(3, 8)))
init_chars = ''.join((_letter_cases, _upper_cases, _numbers))
def get_chars(chars=init_chars, length=4):
return random.sample(chars, length)
def create_validate_code(request,size=(120, 30), mode="RGB",
bg_color=(255, 255, 255),
fg_color=(255, 0, 0),
font_size=22,
font_type=FONT_TYPE,
draw_lines=True,
n_line=(1, 3),
draw_points=True,
point_chance = 2):
width, height = size
img = Image.new(mode, size, bg_color)
draw = ImageDraw.Draw(img)
def create_lines():
line_num = random.randint(*n_line)
for i in range(line_num):
begin = (random.randint(0, size[0]), random.randint(0, size[1]))
end = (random.randint(0, size[0]), random.randint(0, size[1]))
draw.line([begin, end], fill=(0, 0, 0))
def create_points():
chance = min(100, max(0, int(point_chance)))
for w in range(width):
for h in range(height):
tmp = random.randint(0, 100)
if tmp > 100 - chance:
draw.point((w, h), fill=(0, 0, 0))
def create_strs():
c_chars =request.session['checkcode']
strs = ' %s ' % ' '.join(c_chars)
font = ImageFont.truetype(font_type, font_size)
font_width, font_height = font.getsize(strs)
draw.text(((width - font_width) / 3, (height - font_height) / 3),
strs, font=font, fill=fg_color)
return ''.join(c_chars)
if draw_lines:
create_lines()
if draw_points:
create_points()
strs = create_strs()
params = [1 - float(random.randint(1, 12)) / 100,
0,
0,
0,
1 - float(random.randint(1, 10)) / 100,
float(random.randint(1, 2)) / 500,
0.001,
float(random.randint(1, 2)) / 500
]
img = img.transform(size, Image.PERSPECTIVE, params)
img = img.filter(ImageFilter.EDGE_ENHANCE_MORE)
return img, strs
def gcheckcode(request):
listchar = get_chars()
request.session['checkcode'] = listchar
return ''.join(listchar)
# http://localhost:9000/home/checkcodeGIF/
def checkcodeGIF(request):
if not request.session.get('checkcode',''):
request.session['checkcode'] = '1234'
img_type="GIF"
checkcode = create_validate_code(request)
mstream = StringIO()
checkcode[0].save(mstream, img_type) #图片保存在内存中
codeImg = mstream.getvalue() #获得保存图片
mstream.close()#关闭保存
return HttpResponse(codeImg, img_type) #网页显示内存图片
# http://localhost:8000/home/getcheckcode/
def getcheckcode(request):
g_checkcode = gcheckcode(request)
path = request.GET.get('path','__base__.html')
return render(request, path, context=locals())
|
[
"wcl6005@163.com"
] |
wcl6005@163.com
|
86c4579e69639f21cd77bf45cfc84b101d9ccfff
|
cf5b2850dc9794eb0fc11826da4fd3ea6c22e9b1
|
/xlsxwriter/test/drawing/test_drawing_chart01.py
|
5c7893a9c2c67067496a261dfe31cb992bb3ae86
|
[
"BSD-2-Clause"
] |
permissive
|
glasah/XlsxWriter
|
bcf74b43b9c114e45e1a3dd679b5ab49ee20a0ec
|
1e8aaeb03000dc2f294ccb89b33806ac40dabc13
|
refs/heads/main
| 2023-09-05T03:03:53.857387
| 2021-11-01T07:35:46
| 2021-11-01T07:35:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,125
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...drawing import Drawing
class TestAssembleDrawing(unittest.TestCase):
"""
Test assembling a complete Drawing file.
"""
def test_assemble_xml_file(self):
"""Test writing a drawing with no cell data."""
self.maxDiff = None
fh = StringIO()
drawing = Drawing()
drawing._set_filehandle(fh)
dimensions = [4, 8, 457200, 104775, 12, 22, 152400, 180975, 0, 0]
drawing_object = drawing._add_drawing_object()
drawing_object['type'] = 1
drawing_object['dimensions'] = dimensions
drawing_object['width'] = 0
drawing_object['height'] = 0
drawing_object['description'] = None
drawing_object['shape'] = None
drawing_object['anchor'] = 1
drawing_object['rel_index'] = 1
drawing_object['url_rel_index'] = 0
drawing_object['tip'] = None
drawing.embedded = 1
drawing._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<xdr:wsDr xmlns:xdr="http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<xdr:twoCellAnchor>
<xdr:from>
<xdr:col>4</xdr:col>
<xdr:colOff>457200</xdr:colOff>
<xdr:row>8</xdr:row>
<xdr:rowOff>104775</xdr:rowOff>
</xdr:from>
<xdr:to>
<xdr:col>12</xdr:col>
<xdr:colOff>152400</xdr:colOff>
<xdr:row>22</xdr:row>
<xdr:rowOff>180975</xdr:rowOff>
</xdr:to>
<xdr:graphicFrame macro="">
<xdr:nvGraphicFramePr>
<xdr:cNvPr id="2" name="Chart 1"/>
<xdr:cNvGraphicFramePr/>
</xdr:nvGraphicFramePr>
<xdr:xfrm>
<a:off x="0" y="0"/>
<a:ext cx="0" cy="0"/>
</xdr:xfrm>
<a:graphic>
<a:graphicData uri="http://schemas.openxmlformats.org/drawingml/2006/chart">
<c:chart xmlns:c="http://schemas.openxmlformats.org/drawingml/2006/chart" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" r:id="rId1"/>
</a:graphicData>
</a:graphic>
</xdr:graphicFrame>
<xdr:clientData/>
</xdr:twoCellAnchor>
</xdr:wsDr>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
7cd58dcae29db07ef376b8e7374e440ee9d0f5cf
|
a5597d74049fcbe1e1e3afca1f4196243f2e7c90
|
/glyce/utils/crazy_finetune.py
|
1e496882105744af27eb0a6cb408eb0daa8357e7
|
[
"Apache-2.0"
] |
permissive
|
YuChen17Heaven/glyce
|
72759d8699bbe37ecd2221e90b8ec06a8844fd29
|
62369e3cc37442ed191862b77d87d0c17c8454f8
|
refs/heads/master
| 2020-06-14T01:52:41.111642
| 2019-06-30T10:52:10
| 2019-06-30T10:52:10
| 194,857,610
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,840
|
py
|
# encoding: utf-8
"""
@author: wuwei
@contact: wu.wei@pku.edu.cn
@version: 1.0
@license: Apache Licence
@file: crazy_finetune.py
@time: 19-1-2 下午9:50
写for循环疯狂调参
python main.py --highway --nfeat 128 --use_wubi --gpu_id 3
"""
import os
import sys
root_path = "/".join(os.path.realpath(__file__).split("/")[:-3])
if root_path not in sys.path:
sys.path.insert(0, root_path)
import logging
from itertools import product
root_path = "/".join(os.path.realpath(__file__).split("/")[:-3])
print(root_path)
if root_path not in sys.path:
sys.path.insert(0, root_path)
# font_name = '/data/nfsdata/nlp/fonts/useful'
font_name = os.path.join(root_path, "fonts")
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('run.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
# list里的第一个元素是默认设置
finetune_options = {
'word_embsize': [2048],
'num_fonts_concat': [0],
'output_size': [2048],
'gpu_id': [2],
}
def construct_command(setting):
command = 'python -m glyph_embedding.experiments.run_lm'
for feature, option in setting.items():
if option is True:
command += F' --{feature}'
elif option is False:
command += ''
else:
command += F' --{feature} {option}'
return command
def traverse():
"""以默认配置为基准,每次只调一个参数,m个参数,每个参数n个选项,总共运行m*(n-1)次"""
default_setting = {k: v[0] for k, v in finetune_options.items()}
for feature in finetune_options:
for i, option in enumerate(finetune_options[feature]):
if i and default_setting[feature] != option: # 默认设置
setting = default_setting
setting[feature] = option
command = construct_command(setting)
logger.info(command)
try:
message = os.popen(command).read()
except:
message = '进程启动失败!!'
logger.info(message)
def grid_search():
"""以grid search的方式调参"""
for vs in product(*finetune_options.values()):
setting = {}
for k, v in zip(finetune_options.keys(), vs):
setting[k] = v
command = construct_command(setting)
logger.info(command)
try:
message = os.popen(command).read()
except:
message = '进程启动失败!!'
logger.info(message)
if __name__ == '__main__':
grid_search()
|
[
"xiaoyli@outlook.com"
] |
xiaoyli@outlook.com
|
6777ff2e763c0748a5200c9729d79c3fecf1cc50
|
503d2f8f5f5f547acb82f7299d86886691966ca5
|
/atcoder/hhkb2020_b.py
|
c8dd560750da5c973e42132f7c0e4108860b8814
|
[] |
no_license
|
Hironobu-Kawaguchi/atcoder
|
3fcb649cb920dd837a1ced6713bbb939ecc090a9
|
df4b55cc7d557bf61607ffde8bda8655cf129017
|
refs/heads/master
| 2023-08-21T14:13:13.856604
| 2023-08-12T14:53:03
| 2023-08-12T14:53:03
| 197,216,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
# https://atcoder.jp/contests/hhkb2020/tasks/hhkb2020_b
# import sys
# # def input(): return sys.stdin.readline().rstrip()
# # input = sys.stdin.readline
# input = sys.stdin.buffer.readline
# from numba import njit
# from functools import lru_cache
# sys.setrecursionlimit(10 ** 7)
# @njit('(i8,i8[::1],i4[::1])', cache=True)
# def main():
# @lru_cache(None)
# def dfs():
# return
# return
# main()
H, W = map(int, input().split())
S = ['' for _ in range(H)]
for i in range(H):
S[i] = input()
ans = 0
for i in range(H-1):
for j in range(W):
if S[i][j] == '.' and S[i+1][j] == '.':
ans += 1
for i in range(H):
for j in range(W-1):
if S[i][j] == '.' and S[i][j+1] == '.':
ans += 1
print(ans)
# S = input()
# n = int(input())
# N, K = map(int, input().split())
# l = list(map(int, (input().split())))
# A = [[int(i) for i in input().split()] for _ in range(N)]
|
[
"hironobukawaguchi3@gmail.com"
] |
hironobukawaguchi3@gmail.com
|
c17f88aad274adc6efb8f07f2e1f91def04c6283
|
28a462a28f443c285ca5efec181ebe36b147c167
|
/tests/compile/basic/es2019/Array.prototype.toString.spec
|
85d911ac433e5e0bb58bf5affb55278c27760195
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
kaist-plrg/jstar
|
63e71f9156860dc21cccc33a9f6c638dfee448ea
|
1282919127ea18a7e40c7a55e63a1ddaaf7d9db4
|
refs/heads/main
| 2022-07-22T08:12:34.947712
| 2022-02-27T04:19:33
| 2022-02-27T11:06:14
| 384,045,526
| 6
| 4
|
NOASSERTION
| 2022-02-27T11:05:26
| 2021-07-08T07:53:21
|
Python
|
UTF-8
|
Python
| false
| false
| 256
|
spec
|
1. Let _array_ be ? ToObject(*this* value).
1. Let _func_ be ? Get(_array_, `"join"`).
1. If IsCallable(_func_) is *false*, set _func_ to the intrinsic function %ObjProto_toString%.
1. Return ? Call(_func_, _array_).
|
[
"h2oche22@gmail.com"
] |
h2oche22@gmail.com
|
b0f03d06d9223ce7f593796a991af26bc1c4bfd1
|
01a45aa09bd266e25dae4d2ba9fceddea2441844
|
/todo_back/todos/serializer.py
|
e89a5ee7e7d956df2ee97ace0e468bd6dc0a0c8b
|
[] |
no_license
|
gusk94/Vue-Django
|
1959e75ffee39f3839fc9bafaf79eead724023fa
|
82213a96e8d5bc684beb7cf3fcf212bbfcaf8019
|
refs/heads/master
| 2023-01-10T15:20:08.635383
| 2021-01-06T15:12:59
| 2021-01-06T15:12:59
| 222,366,577
| 0
| 0
| null | 2023-01-05T01:06:52
| 2019-11-18T04:55:18
|
Python
|
UTF-8
|
Python
| false
| false
| 467
|
py
|
from rest_framework import serializers
from django.contrib.auth import get_user_model
from .models import Todo
User = get_user_model()
class TodoSerializer(serializers.ModelSerializer):
class Meta:
model = Todo
fields = ('id', 'user', 'title', 'completed', )
class UserDetailSerializer(serializers.ModelSerializer):
todo_set = TodoSerializer(many=True)
class Meta:
model = User
fields = ('id', 'username', 'todo_set', )
|
[
"h3652k@gmail.com"
] |
h3652k@gmail.com
|
823f39203dec17fdc778ad33dcc6296c31fcf5a4
|
86cd22354f2431087c9b3ff06188f071afb3eb72
|
/113. Path Sum II.py
|
702d8007e664f27151d6db9cd322c6f685000c06
|
[] |
no_license
|
tlxxzj/leetcode
|
0c072a74d7e61ef4700388122f2270e46c4ac22e
|
06dbf4f5b505a6a41e0d93367eedd231b611a84b
|
refs/heads/master
| 2023-08-31T11:04:34.585532
| 2023-08-31T08:25:51
| 2023-08-31T08:25:51
| 94,386,828
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def pathSum(self, root: TreeNode, targetSum: int) -> List[List[int]]:
ret = []
q = []
if root:
q = [[root.val, root, [root.val]]]
while len(q) > 0:
q2 = []
for sum, node, path in q:
if (sum == targetSum) and (not node.left) and (not node.right):
ret.append(path)
else:
if node.left:
q2.append([sum+node.left.val, node.left, path[:] + [node.left.val]])
if node.right:
path.append(node.right.val)
q2.append([sum+node.right.val, node.right, path])
q = q2
return ret
|
[
"tlxxzj@qq.com"
] |
tlxxzj@qq.com
|
f8f7ce5994c6a5c8be5690040c6ae3e271794bd7
|
20cda6f6b14d9b91e64d43b8261f7832572be85f
|
/pyschema/f143_structure/ArrayULong.py
|
4954b9b5940f5c8444c0ab0ad08e5b807c97ded1
|
[] |
no_license
|
ess-dmsc/lauschangriff
|
f9f2bacb7a5483423919fbfc8948e8a56a070800
|
3735c5f84798efc280e0931bc48129339658f400
|
refs/heads/master
| 2021-08-19T13:22:41.702602
| 2020-04-21T15:13:18
| 2020-04-21T15:13:18
| 168,178,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,603
|
py
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: f143_structure
import flatbuffers
class ArrayULong(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsArrayULong(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ArrayULong()
x.Init(buf, n + offset)
return x
# ArrayULong
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ArrayULong
def Value(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Uint64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# ArrayULong
def ValueAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint64Flags, o)
return 0
# ArrayULong
def ValueLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
def ArrayULongStart(builder): builder.StartObject(1)
def ArrayULongAddValue(builder, value): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(value), 0)
def ArrayULongStartValueVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def ArrayULongEnd(builder): return builder.EndObject()
|
[
"mark.koennecke@psi.ch"
] |
mark.koennecke@psi.ch
|
d984996776b3ea153d203518e3b9b95d6a4ce351
|
fc2fa418295e015f867b26b6ab91133f26eff0bb
|
/ExampleCode/gathering.py
|
5d31cc8d1e1f5136712e91fdadabfb3a873d7c1e
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
land-boards/PiCluster
|
893b0809d5ceeaba2425cd3cfd79598911a65989
|
e7a508ab1be25e50b79c585ea861118e37ba9bb3
|
refs/heads/master
| 2022-04-28T13:02:10.307315
| 2022-04-20T09:55:52
| 2022-04-20T09:55:52
| 62,474,727
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
numDataPerRank = 10
sendbuf = [rank, size]
print('Rank: ',rank, ', sendbuf: ',sendbuf)
recvbuf = None
if rank == 0:
recvbuf = []
comm.Gather(sendbuf, recvbuf, root=0)
if rank == 0:
print('Rank: ',rank, ', recvbuf received: ',recvbuf)
|
[
"doug@douglasgilliland.com"
] |
doug@douglasgilliland.com
|
0d4b5ab246bcd2e91a31ac44a798d0bed067d702
|
bc233c24523f05708dd1e091dca817f9095e6bb5
|
/bitmovin_api_sdk/models/ad_analytics_contains_filter.py
|
354ab8dfcae40bdd9aa2cf0bd8e53fb1c6d34042
|
[
"MIT"
] |
permissive
|
bitmovin/bitmovin-api-sdk-python
|
e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd
|
b0860c0b1be7747cf22ad060985504da625255eb
|
refs/heads/main
| 2023-09-01T15:41:03.628720
| 2023-08-30T10:52:13
| 2023-08-30T10:52:13
| 175,209,828
| 13
| 14
|
MIT
| 2021-04-29T12:30:31
| 2019-03-12T12:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,799
|
py
|
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.ad_analytics_abstract_filter import AdAnalyticsAbstractFilter
from bitmovin_api_sdk.models.ad_analytics_attribute import AdAnalyticsAttribute
import pprint
import six
class AdAnalyticsContainsFilter(AdAnalyticsAbstractFilter):
@poscheck_model
def __init__(self,
name=None,
value=None):
# type: (AdAnalyticsAttribute, object) -> None
super(AdAnalyticsContainsFilter, self).__init__(name=name)
self._value = None
self.discriminator = None
if value is not None:
self.value = value
@property
def openapi_types(self):
types = {}
if hasattr(super(AdAnalyticsContainsFilter, self), 'openapi_types'):
types = getattr(super(AdAnalyticsContainsFilter, self), 'openapi_types')
types.update({
'value': 'object'
})
return types
@property
def attribute_map(self):
attributes = {}
if hasattr(super(AdAnalyticsContainsFilter, self), 'attribute_map'):
attributes = getattr(super(AdAnalyticsContainsFilter, self), 'attribute_map')
attributes.update({
'value': 'value'
})
return attributes
@property
def value(self):
# type: () -> object
"""Gets the value of this AdAnalyticsContainsFilter.
:return: The value of this AdAnalyticsContainsFilter.
:rtype: object
"""
return self._value
@value.setter
def value(self, value):
# type: (object) -> None
"""Sets the value of this AdAnalyticsContainsFilter.
:param value: The value of this AdAnalyticsContainsFilter.
:type: object
"""
if value is not None:
if not isinstance(value, object):
raise TypeError("Invalid type for `value`, type has to be `object`")
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
if hasattr(super(AdAnalyticsContainsFilter, self), "to_dict"):
result = super(AdAnalyticsContainsFilter, self).to_dict()
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AdAnalyticsContainsFilter):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"openapi@bitmovin.com"
] |
openapi@bitmovin.com
|
ed6cc8e324e0e9d722dcff91f07aa8abc8df16f3
|
a606893da1e354c7c617d0c9247b23118be2813a
|
/模拟考试/7_20/t8.py
|
178465ca3405c74f23f6334c46828faea35f0af3
|
[] |
no_license
|
lindo-zy/leetcode
|
4ce6cb9ded7eeea0a6953b6d8152b5a9657965da
|
f4277c11e620ddd748c2a2f3d9f5f05ee58e5716
|
refs/heads/master
| 2023-07-22T06:19:00.589026
| 2023-07-16T12:35:14
| 2023-07-16T12:35:14
| 229,958,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
from itertools import product
from typing import List
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
ds = {2: 'abc', 3: 'def', 4: "ghi",
5: 'jkl', 6: 'mno', 7: 'pqrs',
8: 'tuv', 9: 'wxyz'}
words = [ds[int(i)] for i in digits]
return [''.join(i) for i in list(product(*words)) if i]
if __name__ == '__main__':
s = Solution()
digits = "234"
print(s.letterCombinations(digits))
|
[
"492201845@qq.com"
] |
492201845@qq.com
|
f7c8f1c12a5b644fe553ebfdbf5c391252a507cd
|
1f4852225cec8d9f954190bb36a2a5d586b3e8bd
|
/ITBooks/con/config.py
|
9e37f40a5cefca8a3dae60173977ac91a5116492
|
[] |
no_license
|
crono/ITBooks
|
d8924d54e474b035a2cc54f69cf4f67a5004344a
|
61648d3ab71a06f9754ebdc75e37d6d84d100605
|
refs/heads/master
| 2020-06-05T00:46:34.725762
| 2017-05-22T14:09:45
| 2017-05-22T14:09:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Add ITBooks to path
sys.path.append(os.path.dirname(BASE_DIR))
# DATABASE
DATABASE_DIR = os.path.join(BASE_DIR, 'database')
SQLITE_FILE = os.path.join(os.path.join(DATABASE_DIR, 'sqlite'), 'books.db')
# The value is the equal of spider's name
SEARCH_CONFIG = {'allitebooks': {'table': ''}, 'blah': {'table': ''}}
|
[
"xiaozizayang@gmail.com"
] |
xiaozizayang@gmail.com
|
725ea7b6637e2b0187e91054b6dc387a7ab7141a
|
594f60b6a536b831d0df38abea7f0ffc0a2fd3cb
|
/utils_xml/change_comments.py
|
7cf6b3dfb4c62a9e7680dc1f63740306c8c9dee9
|
[] |
no_license
|
mh70cz/py
|
1478439fe939076cca3a30be2f2d29fb4e8a3cd9
|
7fc23f2133624c787e1dd4856322d48251cc6f0e
|
refs/heads/master
| 2022-08-12T06:08:30.720164
| 2022-08-08T23:16:19
| 2022-08-08T23:16:19
| 95,386,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,157
|
py
|
""" konvertuje obsah elemntů xs:documentation bez xml:lang do komentářů """
from lxml import etree
f_name_in = "TradeRegisterListRequest_orig.xsd"
f_name_out = "TradeRegisterListRequest.xsd"
def main():
tree = etree.parse(f_name_in)
root = tree.getroot()
namespaces = {'xs': 'http://www.w3.org/2001/XMLSchema',}
annotations = root.findall(".//xs:annotation", namespaces)
xml_lang = '{http://www.w3.org/XML/1998/namespace}lang'
for annotation in annotations:
documentations = annotation.findall("./xs:documentation", namespaces)
for documentation in documentations:
att = documentation.attrib
if att.get(xml_lang, None) in ["cs", "en", "sk"]:
# print(documentation.text)
pass
elif att.get(xml_lang, None) is None:
txt = documentation.text
comment = etree.Comment(txt)
documentation.getparent().remove(documentation)
# print("delelted: " + str(txt))
#annotation.insert(0, comment)
annotation.append(comment)
rough_bin_string = etree.tostring(root, encoding="utf-8",
xml_declaration=True, pretty_print=True)
format_xml(rough_bin_string)
# with open(f_name_out, "wb") as wf:
# wf.write(rough_bin_string)
# #tree.write(open('output.xml', 'wb'))
#tree.write(open(f_name_out, 'wb'), encoding='utf-8', xml_declaration=True, pretty_print=True)
def format_xml(xml_bin_string):
""" přidání Comment elementu do xs:annotation nepřidá nový řádek
tato procedura doformátuje a zapíše do souboru"""
output = ""
lenght = 0
s = xml_bin_string.decode("utf-8")
s = s.replace("--><", "-->\n<")
s = s.split("\n")
for line in s:
#print(line)
if "<xs:annotation>" in line:
lenght = len(line) - 15
elif ("</xs:annotation>" in line) and (len(line) < 19):
line = str(lenght * " ") + line
output += line + "\n"
with open(f_name_out, "w", encoding="utf-8") as wf:
wf.write(output)
main()
|
[
"mh70@mh70.cz"
] |
mh70@mh70.cz
|
f802fd58fcee700b831bdb8136bc7f82023758d1
|
a9b322a0d941825df73a71ad3de605978c9e778d
|
/virtual/bin/mailmail
|
87be1a4b9000543ec5890a1ddb7f1d4876036be7
|
[] |
no_license
|
Elianehbmna/chaty-final
|
172562d9d7399dc9230cc434d3c29be66a70f094
|
0b459168414da09566ea5b079a922dc1fa8694d0
|
refs/heads/master
| 2022-12-15T15:03:31.584467
| 2019-12-05T15:29:26
| 2019-12-05T15:29:26
| 223,245,043
| 0
| 0
| null | 2022-12-08T06:55:21
| 2019-11-21T19:07:55
|
Python
|
UTF-8
|
Python
| false
| false
| 256
|
#!/home/wecode/Documents/chat/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from twisted.mail.scripts.mailmail import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"elianehabimana3@gmail.com"
] |
elianehabimana3@gmail.com
|
|
a818f026d27f1f4910ccaef03767ee79b55b13be
|
b4afd14e3b4e9cff0a99906a69587e348b243aeb
|
/website/《简明教程》/数据结构/ds_reference.py
|
ad8d28d89f29f5b7de173ab94ebdd97be12402cf
|
[] |
no_license
|
zhankq/pythonlearn
|
d694df23826cda6ba662e852e531e96a10ab2092
|
cb714fbb8257193029f958e73e0f9bd6a68d77f1
|
refs/heads/master
| 2021-12-16T13:51:23.381206
| 2021-12-03T01:13:36
| 2021-12-03T01:13:36
| 205,632,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
print('Simple Assignment')
shoplist = ['apple', 'mango', 'carrot', 'banana']
# mylist 只是指向同一对象的另一种名称
mylist = shoplist
# 我购买了第一项项目,所以我将其从列表中删除
del shoplist[0]
print('shoplist is', shoplist)
print('mylist is', mylist)
print('Copy by making a full slice')
# 通过生成一份完整的切片制作一份列表的副本
mylist = shoplist[:]
# 删除第一个项目
del mylist[0]
print('shoplist is', shoplist)
print('mylist is', mylist)
# 注意到现在两份列表已出现不同
|
[
"zhankq@163.com"
] |
zhankq@163.com
|
5bbd81b6220271c40f2bc0df4af86e81a6f67d38
|
a8fa49c3a6c6d6a66a89089fdd013343f48b436e
|
/count.py
|
c58715b8be547e0c27acc75056cc92fa42edd5b9
|
[] |
no_license
|
avi527/Tuple
|
007ec5b6e832c8fd94a418e7e28001d1d3347553
|
1fa58417a5a86bc541ae62bdcdacddc7f6592e1f
|
refs/heads/master
| 2020-07-08T09:17:53.483953
| 2019-08-21T17:18:53
| 2019-08-21T17:18:53
| 203,630,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
# NOTE :- the count() method is used to return the number of elements with
#a specific value in a tuple
#programm to count the number of times letter 'x' appears in specified string
tub='asasasnbcsdjbhjhfaaaaaaabjsdbfdhvfdjhb'
print(tub.count('a'))
|
[
"noreply@github.com"
] |
avi527.noreply@github.com
|
2a5148f46a6509ada6a2311abb815eaa87a49e5a
|
7bc54bae28eec4b735c05ac7bc40b1a8711bb381
|
/src/tlm/data_gen/robust_gen/gen_runner2/pairwise_desc_neg_major.py
|
5e2915d046f6499f00a526baceaf2ea5ad156de6
|
[] |
no_license
|
clover3/Chair
|
755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e
|
a2102ebf826a58efbc479181f1ebb5de21d1e49f
|
refs/heads/master
| 2023-07-20T17:29:42.414170
| 2023-07-18T21:12:46
| 2023-07-18T21:12:46
| 157,024,916
| 0
| 0
| null | 2023-02-16T05:20:37
| 2018-11-10T21:55:29
|
Python
|
UTF-8
|
Python
| false
| false
| 678
|
py
|
from functools import partial
from data_generator.job_runner import JobRunner
from epath import job_man_dir
from tlm.data_gen.adhoc_datagen import LeadingN
from tlm.data_gen.robust_gen.robust_generators import RobustPairwiseTrainGen2
from tlm.data_gen.run_robust_gen import RobustWorker
def main():
max_seq_length = 512
encoder = LeadingN(max_seq_length, 1)
worker_factory = partial(RobustWorker,
RobustPairwiseTrainGen2(encoder, max_seq_length, "desc", 1000, "neg_major_enum"))
runner = JobRunner(job_man_dir, 4, "robust_pairwise_head_desc_neg_major", worker_factory)
runner.start()
if __name__ == "__main__":
main()
|
[
"lesterny@gmail.com"
] |
lesterny@gmail.com
|
bcfcfae6effa7e2b3cfddb5ad1e2af7d4f40caa6
|
09d564aaab98f72dce6585e78a0642c9fe3539f4
|
/日常练习/python_exercise_20181124.py
|
77a1eb50c86981a2e63439c1aafb739e42afc032
|
[] |
no_license
|
everydayxy/xy_py
|
4b983b4bccc843602f1ea0b1d5ea9576119604bf
|
08b314e7ecb10e13394aa93b92084c53596834f3
|
refs/heads/master
| 2020-04-03T08:52:44.729729
| 2019-09-20T15:05:35
| 2019-09-20T15:05:35
| 134,683,779
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
# def aaa(n):
# count = len(str(n))
# w = 10 ** (count-1)
# for _ in range(count):
# print(n // w)
# n %= w
# w //= 10
#
# num = int(input('输入一个数字: '))
# aaa(num)
# #输入一个数字,打印最大值
# max1 = -100000000000000000000
# while True:
# try:
# num = int(input('请输入一个数字:'))
# if num > max1:
# max1 = num
# end = input('输入数字结束了吗??【y/n|Y/N】')
# if end == 'y' or end == 'Y':
# print('最大值为:', max1)
# break
# except ValueError:
# print('检测到非法字符,请重新输入')
# break
for i in range(1,10):
s = ''
for j in range(1,i+1):
s += '{}*{}={:<4}'.format(j,i,j*i)
print(s)
|
[
"everydayx@163.com"
] |
everydayx@163.com
|
f13d5c9080a5c0a35528af52c3526818137fe27e
|
47386073517c1d5bd0d6e96ded48e0bbb9cdd7a4
|
/src/study_cookbook/10模块和包/运行目录或压缩文件.py
|
15c8839b803b0f8f296146c8eb820a6d8421bab1
|
[] |
no_license
|
halysl/python_module_study_code
|
f733eba00de75ebd1cdc9c1e9e36f3a7eee03c93
|
189fd3878b0abe68fd56e11357e88facdb4a186f
|
refs/heads/master
| 2022-07-21T06:51:32.129654
| 2021-08-05T09:14:15
| 2021-08-05T09:14:15
| 148,780,484
| 1
| 0
| null | 2022-07-06T20:26:28
| 2018-09-14T11:39:21
|
HTML
|
UTF-8
|
Python
| false
| false
| 277
|
py
|
# -*- coding: utf-8 -*-
"""
myapplication/
spam.py
bar.py
grok.py
__main__.py
"""
"""
bash % python3 myapplication
"""
"""
bash % ls
spam.py bar.py grok.py __main__.py
bash % zip -r myapp.zip *.py
bash % python3 myapp.zip
... output from __main__.py ...
"""
|
[
"halysl0817@gmail.com"
] |
halysl0817@gmail.com
|
10a39d56ddb328c6d322c5856d31a5d373aa2ed8
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_ConstantTrend_Seasonal_DayOfWeek_LSTM.py
|
d3d6f857ae0191e4610ab1abbda44e021c1aa868
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 175
|
py
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['ConstantTrend'] , ['Seasonal_DayOfWeek'] , ['LSTM'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
263bdb79183e6a8852db732e9fe310df072166bd
|
b2301365d220ff0295b8beddbed38b0581f9610d
|
/Django/fs_books_prj/apps/books/migrations/0001_initial.py
|
1850bb0485a27325ab15bd0c528dafe99ff4abef
|
[] |
no_license
|
JoA-MoS/Python
|
db246a5ff2201c6ef1dfb9d9b0fd8a37e1d7c46d
|
4547c2667f3eaf0a001532bb2b103aab3c344fbe
|
refs/heads/master
| 2021-08-16T11:18:20.420868
| 2017-07-21T05:52:18
| 2017-07-21T05:52:18
| 96,125,892
| 0
| 0
| null | 2021-06-10T18:40:09
| 2017-07-03T15:34:52
|
Python
|
UTF-8
|
Python
| false
| false
| 676
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-19 19:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('author', models.CharField(max_length=255)),
('category', models.CharField(max_length=100)),
],
),
]
|
[
"justin.r.dietz@gmail.com"
] |
justin.r.dietz@gmail.com
|
9346e299b29cdb9fc9e6cd49bfae383ada7dd18e
|
957ab2916bb75edc78b9d7598b4f890b80687ea8
|
/core_library/game/file_processing.py
|
54792eabf69578013dbe142f86df137299ab7598
|
[] |
no_license
|
doug3230/Slayer
|
9e91f5db01702c206c3d52460bfb880691d3eb6a
|
1dd7e72779e060c397f1e68b829004e147e15f84
|
refs/heads/master
| 2016-09-06T03:32:53.914322
| 2014-11-01T07:27:12
| 2014-11-01T07:27:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,956
|
py
|
'''
Created on Oct 25, 2014
@author: Richard
'''
import pygame, customization
from pygame.freetype import Font, SysFont
def path_to_file(dir_name, file_name):
if dir_name:
return "{0}/{1}".format(dir_name, file_name)
else:
return file_name
def path_to_image(file_name):
return path_to_file(customization.file_settings.FILE_IMAGE_DIRECTORY, file_name)
def path_to_music(file_name):
return path_to_file(customization.file_settings.FILE_MUSIC_DIRECTORY, file_name)
def path_to_level(file_name):
return path_to_file(customization.file_settings.FILE_LEVEL_DIRECTORY, file_name)
def path_to_font(file_name):
return path_to_file(customization.file_settings.FILE_FONT_DIRECTORY, file_name)
def load_music(file_name, path_included = False):
if not path_included:
pygame.mixer.music.load(path_to_music(file_name))
else:
pygame.mixer.music.load(file_name)
return
def play_music(loop = True):
if loop:
pygame.mixer.music.play(-1)
else:
pygame.mixer.music.play()
return
def stop_music():
pygame.mixer.music.stop()
return
def load_image(file_name, path_included = False):
if not path_included:
image = pygame.image.load(path_to_image(file_name))
else:
image = pygame.image.load(file_name)
return image.convert()
def resize_image(image, new_width, new_height):
image = pygame.transform.scale(image, (int(new_width), int(new_height)))
return image.convert()
def load_font(file_name, size, bold = False, italic = False, path_included = False):
if not path_included:
font = Font(path_to_font(file_name), size, bold, italic)
else:
font = Font(file_name, size, bold, italic)
return font
def load_system_font(file_name, size, bold = False, italic = False):
font = SysFont(file_name, size, bold, italic)
return font
|
[
"doug3230@mylaurier.ca"
] |
doug3230@mylaurier.ca
|
683c8168877dd2b6b8a0c8527e1f7de813a4bdfd
|
4c6e0771833c087876b91962ca0f7c2ef821daa4
|
/numscrypt/random.py
|
1eecb29cb5e905085a0f3250913c2166291b3135
|
[
"Apache-2.0"
] |
permissive
|
fangbei/Numscrypt
|
fb8a57d57ee1fad39ed9789f4e6241ae152ca563
|
cf92b8b8edc57b08d24e8db482b5ea9ee8f494cd
|
refs/heads/master
| 2021-01-17T05:18:43.257510
| 2016-03-30T10:27:13
| 2016-03-30T10:27:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
import numscrypt as ns
def rand (*dims):
result = ns.empty (dims, 'float64')
for i in range (result.data.length):
result.data [i] = Math.random ()
return result
|
[
"info@qquick.org"
] |
info@qquick.org
|
dca261af830e2fc00b8bbeb22fa8e92af90f3b9d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_oppressors.py
|
794179fbab874c62e6c90e5f1fc1331b1f5c3cc8
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
#calss header
class _OPPRESSORS():
def __init__(self,):
self.name = "OPPRESSORS"
self.definitions = oppressor
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['oppressor']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
3bf1dfaa5339532ee42b32558cc1c9e8bb8157b2
|
9142f290c2ca89e53ced306804fece05043c3aa0
|
/py/trash/bk0/908_predict_1026-1.py
|
fc46045263cfc0793b058c3a73dbecb9567b93fc
|
[
"MIT"
] |
permissive
|
norihitoishida/PLAsTiCC-2018
|
aea5ecff5c06c46e3097673228054726fb1cc972
|
f7f3e86fd3b01145de6f96ebe9a7bdad49439956
|
refs/heads/master
| 2021-10-09T00:19:27.389096
| 2018-12-19T06:29:55
| 2018-12-19T06:29:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,969
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 26 15:46:49 2018
@author: kazuki.onodera
"""
import numpy as np
import pandas as pd
import os, gc
from glob import glob
from tqdm import tqdm
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count
import utils
utils.start(__file__)
#==============================================================================
SUBMIT_FILE_PATH = '../output/1026-1.csv.gz'
COMMENT = 'f001~011'
EXE_SUBMIT = True
DROP = ['f001_hostgal_specz']
SEED = np.random.randint(9999)
np.random.seed(SEED)
print('SEED:', SEED)
NFOLD = 5
LOOP = 5
param = {
'objective': 'multiclass',
'num_class': 14,
'metric': 'multi_logloss',
'learning_rate': 0.01,
'max_depth': 6,
'num_leaves': 63,
'max_bin': 255,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.5,
'subsample': 0.5,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
}
# =============================================================================
# load
# =============================================================================
files_tr = sorted(glob('../data/train_f*.pkl'))
[print(f) for f in files_tr]
X = pd.concat([
pd.read_pickle(f) for f in tqdm(files_tr, mininterval=60)
], axis=1)
y = utils.load_target().target
X.drop(DROP, axis=1, inplace=True)
target_dict = {}
target_dict_r = {}
for i,e in enumerate(y.sort_values().unique()):
target_dict[e] = i
target_dict_r[i] = e
y = y.replace(target_dict)
if X.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X.columns[X.columns.duplicated()] }')
print('no dup :) ')
print(f'X.shape {X.shape}')
gc.collect()
COL = X.columns.tolist()
#CAT = list( set(X.columns)&set(utils_cat.ALL))
#print(f'CAT: {CAT}')
# =============================================================================
# cv
# =============================================================================
dtrain = lgb.Dataset(X, y, #categorical_feature=CAT,
free_raw_data=False)
gc.collect()
model_all = []
nround_mean = 0
wloss_list = []
for i in range(LOOP):
gc.collect()
param['seed'] = np.random.randint(9999)
ret, models = lgb.cv(param, dtrain, 99999, nfold=NFOLD,
feval=utils.lgb_multi_weighted_logloss,
early_stopping_rounds=100, verbose_eval=50,
seed=SEED)
model_all += models
nround_mean += len(ret['multi_logloss-mean'])
wloss_list.append( ret['wloss-mean'][-1] )
nround_mean = int((nround_mean/LOOP) * 1.3)
result = f"CV wloss: {np.mean(wloss_list)} + {np.std(wloss_list)}"
print(result)
imp = ex.getImp(model_all)
imp['split'] /= imp['split'].max()
imp['gain'] /= imp['gain'].max()
imp['total'] = imp['split'] + imp['gain']
imp.sort_values('total', ascending=False, inplace=True)
imp.reset_index(drop=True, inplace=True)
imp.to_csv(f'LOG/imp_{__file__}.csv', index=False)
png = f'LOG/imp_{__file__}.png'
utils.savefig_imp(imp, png, x='total', title=f'{__file__}')
utils.send_line(result, png)
COL = imp[imp.gain>0].feature.tolist()
# =============================================================================
# model
# =============================================================================
dtrain = lgb.Dataset(X[COL], y, #categorical_feature=CAT,
free_raw_data=False)
gc.collect()
np.random.seed(SEED)
model_all = []
for i in range(LOOP):
print('building', i)
gc.collect()
param['seed'] = np.random.randint(9999)
model = lgb.train(param, dtrain, num_boost_round=nround_mean, valid_sets=None,
valid_names=None, fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
early_stopping_rounds=None, evals_result=None,
verbose_eval=True, learning_rates=None,
keep_training_booster=False, callbacks=None)
model_all.append(model)
del dtrain, X; gc.collect()
# =============================================================================
# test
# =============================================================================
files_te = sorted(glob('../data/test_f*.pkl'))
X_test = pd.concat([
pd.read_pickle(f) for f in tqdm(files_te, mininterval=60)
], axis=1)[COL]
for i,model in enumerate(tqdm(model_all)):
y_pred = model.predict(X_test)
if i==0:
y_pred_all = y_pred
else:
y_pred_all += y_pred
y_pred_all /= len(model_all)
sub = pd.read_csv('../input/sample_submission.csv.zip')
df = pd.DataFrame(y_pred_all, columns=sub.columns[1:-1])
# Compute preds_99 as the proba of class not being any of the others
# preds_99 = 0.1 gives 1.769
preds_99 = np.ones(df.shape[0])
for i in range(df.shape[1]):
preds_99 *= (1 - df.iloc[:, i])
df['class_99'] = preds_99
sub = pd.concat([sub[['object_id']], df], axis=1)
sub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip')
sub.iloc[:, 1:].hist(bins=30, figsize=(16, 12))
png = f'LOG/sub_{__file__}.png'
utils.savefig_sub(sub, png)
utils.send_line('DONE!', png)
# =============================================================================
# submission
# =============================================================================
if EXE_SUBMIT:
print('submit')
utils.submit(SUBMIT_FILE_PATH, COMMENT)
#==============================================================================
utils.end(__file__)
utils.stop_instance()
|
[
"kazuki.onodera@dena.com"
] |
kazuki.onodera@dena.com
|
00d487751a336a68638c30b21a13815a2a96c309
|
07ecc53b5be6b1a34914a0e02265e847f3ac1a65
|
/Python/Greedy Algorithm/984_Medium_不含AAA或BBB的字符串.py
|
22600af30df05dac1d9060917965bf75ff165bad
|
[] |
no_license
|
JasmineRain/Algorithm
|
764473109ad12c051f5337ed6f22b517ed9bff30
|
84d7e11c1a01b1994e04a3ab446f0a35eb3d362a
|
refs/heads/master
| 2023-03-14T00:39:51.767074
| 2021-03-09T12:41:44
| 2021-03-09T12:41:44
| 289,603,630
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
class Solution:
def strWithout3a3b(self, a: int, b: int) -> str:
ans = [""] * (a + b)
index = 0
round = 1
ca = a
cb = b
if a >= b:
while ca > 0:
ans[index] = "a"
ca -= 1
index += 3
if index >= (a + b):
index = round
round += 1
while cb > 0:
ans[index] = "b"
cb -= 1
index += 3
if index >= (a + b):
index = round
round += 1
return "".join(ans)
else:
while cb > 0:
ans[index] = "b"
cb -= 1
index += 3
if index >= (a + b):
index = round
round += 1
while ca > 0:
ans[index] = "a"
ca -= 1
index += 3
if index >= (a + b):
index = round
round += 1
return "".join(ans)
if __name__ == "__main__":
S = Solution()
print(S.strWithout3a3b(a=1, b=2))
print(S.strWithout3a3b(a=4, b=1))
print(S.strWithout3a3b(a=1, b=3))
|
[
"530781348@qq.com"
] |
530781348@qq.com
|
c3f8c9326a98788d32a1dd6f6cb2abac77136527
|
7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0
|
/1301-1400/1342-Number of Steps to Reduce a Number to Zero/1342-Number of Steps to Reduce a Number to Zero.py
|
ff2024f01916125f617a9207453ff82c87002008
|
[
"MIT"
] |
permissive
|
jiadaizhao/LeetCode
|
be31bd0db50cc6835d9c9eff8e0175747098afc6
|
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
|
refs/heads/master
| 2021-11-05T04:38:47.252590
| 2021-10-31T09:54:53
| 2021-10-31T09:54:53
| 99,655,604
| 52
| 28
|
MIT
| 2020-10-02T12:47:47
| 2017-08-08T05:57:26
|
C++
|
UTF-8
|
Python
| false
| false
| 239
|
py
|
class Solution:
def numberOfSteps (self, num: int) -> int:
step = 0
while num != 0:
if num & 1:
num -= 1
else:
num >>= 1
step += 1
return step
|
[
"jiadaizhao@gmail.com"
] |
jiadaizhao@gmail.com
|
6a1b9b2699b5d40ab586304e0361f170ab18ac56
|
ed8cdcce521b8cab33c66f716c0886e17f035d21
|
/.history/public/publicfunction_20191221202740.py
|
99378bfc5a5a438a74f015f442e55bdf06ce5ce6
|
[] |
no_license
|
deancsdfy/AndroidPerformanceTool_windows
|
8ac35729bc651c3af551f090d6788b6ee3f17eb5
|
c4906aa9347e8e5eca68dbb7cf2d66a327c70d1f
|
refs/heads/master
| 2020-11-27T20:38:55.014228
| 2020-01-09T15:55:52
| 2020-01-09T15:55:52
| 229,593,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,646
|
py
|
#coding=utf-8
import os,platform
import subprocess
import re
serialno_num=''
#判断系统类型,windows使用findstr,linux使用grep
system = platform.system()
if system is "Windows":
find_util = "findstr"
else:
find_util = "grep"
#判断是否设置环境变量ANDROID_HOME
# if "ANDROID_HOME" in os.environ:
# if system == "Windows":
# command = os.path.join(os.environ["ANDROID_HOME"], "platform-tools", "adb.exe")
# else:
# command = os.path.join(os.environ["ANDROID_HOME"], "platform-tools", "adb")
# else:
# raise EnvironmentError(
# "Adb not found in $ANDROID_HOME path: %s." %os.environ["ANDROID_HOME"])
#获取手机
def get_devices():
devices=[]
result = subprocess.Popen("adb devices", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.readlines()
for line in result[1:]:
if 'device' in line.strip():
devices.append(line.split()[0])
else:
break
return devices
#adb命令
def adb(args):
# global serialno_num
# if serialno_num == "":
# devices = get_devices()
# if len(devices) == 1:
# serialno_num = devices[0]
# else:
# raise EnvironmentError("more than 1 device")
cmd = "adb %s" %(str(args))
return os.popen(cmd)
#adb shell命令
def shell(args):
# global serialno_num
# if serialno_num == "":
# devices = get_devices()
# serialno_num = devices[0]
cmd = '\'adb shell \"%s\"\'' %( str(args))
print(cmd)
# cmd = str(args)
return subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def get_current_packagename():
#正则匹配出package和activity
pattern = re.compile(r"[a-zA-Z0-9\.]+/.[a-zA-Z0-9\.]+")
# package = shell('adb shell "dumpsys activity top| grep ACTIVITY"').stdout.read()
package = shell('dumpsys activity top| grep ACTIVITY').stdout.read()
#用-1,是因为部分机型上,还会返回一些系统进程和包,比如小米8
print(pattern.findall(package.decode())[-1].split('/')[0])
# return pattern.findall(package.decode())[-1].split('/')[0]
def get_current_activity():
#正则匹配出package和activity
pattern = re.compile(r"[a-zA-Z0-9\.]+/.[a-zA-Z0-9\.]+")
#新的adb命令行,这个已经取不到activity了
# package = shell('dumpsys activity top| grep ACTIVITY').stdout.read()
# print(pattern.findall(package.decode())[-1].split('/')[1])
# return pattern.findall(package.decode())[-1].split('/')[1]
if __name__ == "__main__":
get_current_activity()
get_current_packagename()
|
[
"denacsdfy@gmail.com"
] |
denacsdfy@gmail.com
|
288b5306f323b0024d8bdacbb104e4fef3aef131
|
f31fda8014ecadf6af7d4e3392fb917c49e0352a
|
/HeavyIonsAnalysis/VertexAnalysis/python/__init__.py
|
33e53c5f468fb19b01b06f042239cab6d2875ca9
|
[] |
no_license
|
jniedzie/lightbylight
|
acea5051f053c49824a49a0b78bac3a2247ee75f
|
f5a4661fcf3fd3c0e9ccd8893a46a238e30c2aa8
|
refs/heads/master
| 2020-03-18T12:24:31.970468
| 2018-02-09T15:50:00
| 2018-02-09T15:50:00
| 134,724,759
| 0
| 1
| null | 2018-05-24T14:11:12
| 2018-05-24T14:11:12
| null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
#Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/HeavyIonsAnalysis/VertexAnalysis/',1)[0])+'/cfipython/slc6_amd64_gcc491/HeavyIonsAnalysis/VertexAnalysis')
|
[
"rchudasa@cern.ch"
] |
rchudasa@cern.ch
|
45ffaf4426419e33898e073ea47511d7dd5e942c
|
632d417159013940d612f7364c2a7c5c88b52a56
|
/esign/esign_app/migrations/0011_auto_20180119_1323.py
|
1a72ae50f09c5a75f3a94acd9845c1324ab2f88a
|
[] |
no_license
|
cityking/esign
|
e553d6197f383fab0435dec5805f140592e2fdfc
|
f88279e3b7f5800bd5ad3a0bd95ebf494078da4c
|
refs/heads/master
| 2022-11-02T20:12:54.927931
| 2018-02-09T03:02:37
| 2018-02-09T03:02:37
| 120,849,522
| 0
| 0
| null | 2022-10-20T20:28:59
| 2018-02-09T03:07:20
|
Python
|
UTF-8
|
Python
| false
| false
| 968
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-01-19 05:23
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('esign_app', '0010_auto_20180119_1322'),
]
operations = [
migrations.AlterField(
model_name='appversion',
name='url',
field=models.CharField(max_length=100, verbose_name='下载地址'),
),
migrations.AlterField(
model_name='myuser',
name='join_date',
field=models.DateTimeField(default=datetime.datetime(2018, 1, 19, 13, 23, 34, 652029), verbose_name='加入时间'),
),
migrations.AlterField(
model_name='sign',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2018, 1, 19, 13, 23, 34, 654114), verbose_name='创建时间'),
),
]
|
[
"cityking@citykingdeMacBook-Air.local"
] |
cityking@citykingdeMacBook-Air.local
|
7edd65620a859a56b61c8982fe8c8e7e7b8822cf
|
e8274f167fd219ef78241ba8ea89e5d5875ed794
|
/cloud/quantum/quantum/api/v2/resource.py
|
757d20061e74194d59943d327de57d095cd418a3
|
[
"Apache-2.0"
] |
permissive
|
virt2x/folsomCloud
|
02db0147f7e0f2ab0375faf4f36ca08272084152
|
e6fd612dd77f35a72739cf4d4750e9795c0fa508
|
refs/heads/master
| 2021-01-01T17:26:28.405651
| 2013-10-17T12:36:04
| 2013-10-17T12:36:04
| 13,647,787
| 0
| 1
| null | 2020-07-24T08:25:22
| 2013-10-17T12:10:24
|
Python
|
UTF-8
|
Python
| false
| false
| 4,991
|
py
|
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods for working with WSGI servers redux
"""
import netaddr
import webob
import webob.dec
import webob.exc
from quantum.common import exceptions
from quantum import context
from quantum.openstack.common import jsonutils as json
from quantum.openstack.common import log as logging
from quantum import wsgi
LOG = logging.getLogger(__name__)
class Request(webob.Request):
"""Add some Openstack API-specific logic to the base webob.Request."""
def best_match_content_type(self):
supported = ('application/json', )
return self.accept.best_match(supported,
default_match='application/json')
@property
def context(self):
#Eventually the Auth[NZ] code will supply this. (mdragon)
#when that happens this if block should raise instead.
if 'quantum.context' not in self.environ:
self.environ['quantum.context'] = context.get_admin_context()
return self.environ['quantum.context']
def Resource(controller, faults=None, deserializers=None, serializers=None):
"""Represents an API entity resource and the associated serialization and
deserialization logic
"""
default_deserializers = {'application/xml': wsgi.XMLDeserializer(),
'application/json': lambda x: json.loads(x)}
default_serializers = {'application/xml': wsgi.XMLDictSerializer(),
'application/json': lambda x: json.dumps(x)}
format_types = {'xml': 'application/xml',
'json': 'application/json'}
action_status = dict(create=201, delete=204)
default_deserializers.update(deserializers or {})
default_serializers.update(serializers or {})
deserializers = default_deserializers
serializers = default_serializers
faults = faults or {}
@webob.dec.wsgify(RequestClass=Request)
def resource(request):
route_args = request.environ.get('wsgiorg.routing_args')
if route_args:
args = route_args[1].copy()
else:
args = {}
# NOTE(jkoelker) by now the controller is already found, remove
# it from the args if it is in the matchdict
args.pop('controller', None)
fmt = args.pop('format', None)
action = args.pop('action', None)
content_type = format_types.get(fmt,
request.best_match_content_type())
deserializer = deserializers.get(content_type)
serializer = serializers.get(content_type)
try:
if request.body:
args['body'] = deserializer(request.body)
method = getattr(controller, action)
result = method(request=request, **args)
except (ValueError, AttributeError,
exceptions.QuantumException,
netaddr.AddrFormatError) as e:
LOG.exception('%s failed' % action)
body = serializer({'QuantumError': str(e)})
kwargs = {'body': body, 'content_type': content_type}
for fault in faults:
if isinstance(e, fault):
raise faults[fault](**kwargs)
raise webob.exc.HTTPInternalServerError(**kwargs)
except webob.exc.HTTPException as e:
LOG.exception('%s failed' % action)
e.body = serializer({'QuantumError': str(e)})
e.content_type = content_type
raise
except Exception as e:
# NOTE(jkoelker) Everyting else is 500
LOG.exception('%s failed' % action)
# Do not expose details of 500 error to clients.
msg = _('Request Failed: internal server error while '
'processing your request.')
body = serializer({'QuantumError': msg})
kwargs = {'body': body, 'content_type': content_type}
raise webob.exc.HTTPInternalServerError(**kwargs)
status = action_status.get(action, 200)
body = serializer(result)
# NOTE(jkoelker) Comply with RFC2616 section 9.7
if status == 204:
content_type = ''
body = None
return webob.Response(request=request, status=status,
content_type=content_type,
body=body)
return resource
|
[
"quan.xu@intel.com"
] |
quan.xu@intel.com
|
f01a21e3061792d927357110f3970d7c03ba9050
|
8ed86b8e9c451abcb2ce0ddf2f2067c11f3993d8
|
/tests/test_osmnx.py
|
33ec027e2986a7c620183d88cd5c271556bd3600
|
[
"MIT"
] |
permissive
|
surfcao/osmnx
|
65830096c21b8353a536f776dfedba7de20eac4c
|
51c9addb42425657fa6b11c7442f79f10b9e3e22
|
refs/heads/master
| 2021-01-19T23:32:40.068378
| 2017-04-19T20:22:01
| 2017-04-19T20:22:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,082
|
py
|
"""
OSMnx tests
-----------
"""
import matplotlib as mpl
mpl.use('Agg') #use agg backend so you don't need a display on travis-ci
import os, shutil
if os.path.exists('.temp'):
shutil.rmtree('.temp')
import osmnx as ox, logging as lg
ox.config(log_console=True, log_file=True, use_cache=True,
data_folder='.temp/data', logs_folder='.temp/logs', imgs_folder='.temp/imgs', cache_folder='.temp/cache')
ox.log('test debug', level=lg.DEBUG)
ox.log('test info', level=lg.INFO)
ox.log('test warning', level=lg.WARNING)
ox.log('test error', level=lg.ERROR)
def test_imports():
import json, math, sys, os, io, ast, unicodedata, hashlib, re, random, time, warnings, datetime as dt, logging as lg
from collections import OrderedDict, Counter
from itertools import groupby, chain
from dateutil import parser as date_parser
import requests, numpy as np, pandas as pd, geopandas as gpd, networkx as nx, matplotlib.pyplot as plt, matplotlib.cm as cm
from matplotlib.collections import LineCollection
from shapely.geometry import Point, LineString, Polygon, MultiPolygon
from shapely import wkt
from shapely.ops import unary_union
from descartes import PolygonPatch
from rtree.index import Index as RTreeIndex
def test_gdf_shapefiles():
city = ox.gdf_from_place('Manhattan, New York City, New York, USA')
city_projected = ox.project_gdf(city, to_crs={'init':'epsg:3395'})
ox.save_gdf_shapefile(city_projected)
city = ox.gdf_from_place('Manhattan, New York City, New York, USA', buffer_dist=100)
ox.plot_shape(city)
def test_network_saving_loading():
G = ox.graph_from_place('Piedmont, California, USA')
G_projected = ox.project_graph(G)
ox.save_graph_shapefile(G_projected)
ox.save_graphml(G_projected)
G2 = ox.load_graphml('graph.graphml')
gdf_edges = ox.graph_to_gdfs(G, nodes=False, edges=True, fill_edge_geometry=False)
gdf_nodes, gdf_edges = ox.graph_to_gdfs(G, nodes=True, edges=True, node_geometry=True, fill_edge_geometry=True)
G3 = ox.gdfs_to_graph(gdf_nodes, gdf_edges)
def test_get_network_methods():
import geopandas as gpd
north, south, east, west = 37.79, 37.78, -122.41, -122.43
G1 = ox.graph_from_bbox(north, south, east, west, network_type='drive_service')
G1 = ox.graph_from_bbox(north, south, east, west, network_type='drive_service', truncate_by_edge=True)
location_point = (37.791427, -122.410018)
bbox = ox.bbox_from_point(location_point, project_utm=True)
G2 = ox.graph_from_point(location_point, distance=750, distance_type='bbox', network_type='drive')
G3 = ox.graph_from_point(location_point, distance=500, distance_type='network')
G4 = ox.graph_from_address(address='350 5th Ave, New York, NY', distance=1000, distance_type='network', network_type='bike')
places = ['Los Altos, California, USA', {'city':'Los Altos Hills', 'state':'California'}, 'Loyola, California']
G5 = ox.graph_from_place(places, network_type='all', clean_periphery=False)
calif = gpd.read_file('examples/input_data/ZillowNeighborhoods-CA')
mission_district = calif[(calif['CITY']=='San Francisco') & (calif['NAME']=='Mission')]
polygon = mission_district['geometry'].iloc[0]
G6 = ox.graph_from_polygon(polygon, network_type='walk')
def test_stats():
location_point = (37.791427, -122.410018)
G = ox.graph_from_point(location_point, distance=500, distance_type='network')
stats1 = ox.basic_stats(G)
stats1 = ox.basic_stats(G, area=1000)
stats2 = ox.extended_stats(G, connectivity=True, anc=True, ecc=True, bc=True, cc=True)
def test_plots():
G = ox.graph_from_place('Piedmont, California, USA', network_type='drive', simplify=False)
G2 = ox.simplify_graph(G, strict=False)
nc = ox.get_node_colors_by_attr(G2, 'osmid')
ec = ox.get_edge_colors_by_attr(G2, 'length')
fig, ax = ox.plot_graph(G, save=True, file_format='png')
G_simplified = ox.simplify_graph(G)
fig, ax = ox.plot_graph(G_simplified, show=False, save=True, close=True, file_format='svg')
G_projected = ox.project_graph(G_simplified)
fig, ax = ox.plot_graph(G_projected)
fig, ax = ox.plot_graph(G_projected, fig_height=5, fig_width=5, margin=0.05, axis_off=False, bgcolor='y',
file_format='png', filename='x', dpi=180, annotate=True, node_color='k', node_size=5,
node_alpha=0.1, node_edgecolor='b', node_zorder=5, edge_color='r', edge_linewidth=2,
edge_alpha=0.1, use_geom=False, show=False, save=True, close=True)
fig, ax = ox.plot_figure_ground(G=G_simplified, file_format='png')
fig, ax = ox.plot_figure_ground(point=(33.694981, -117.841375), file_format='png')
fig, ax = ox.plot_figure_ground(address='Denver, Colorado, USA', file_format='png')
def test_routing_folium():
import networkx as nx
G = ox.graph_from_address('N. Sicily Pl., Chandler, Arizona', distance=800, network_type='drive')
origin = (33.307792, -111.894940)
destination = (33.312994, -111.894998)
origin_node = ox.get_nearest_node(G, origin)
destination_node = ox.get_nearest_node(G, destination)
route = nx.shortest_path(G, origin_node, destination_node)
attributes = ox.get_route_edge_attributes(G, route, 'length')
fig, ax = ox.plot_graph_route(G, route, save=True, filename='route', file_format='png')
fig, ax = ox.plot_graph_route(G, route, origin_point=origin, destination_point=destination,
save=True, filename='route', file_format='png')
graph_map = ox.plot_graph_folium(G, popup_attribute='name')
route_map = ox.plot_route_folium(G, route)
def test_buildings():
gdf = ox.buildings_from_place(place='Piedmont, California, USA')
gdf = ox.buildings_from_address(address='San Francisco, California, USA', distance=300)
fig, ax = ox.plot_buildings(gdf)
|
[
"gboeing@berkeley.edu"
] |
gboeing@berkeley.edu
|
47f0c143537b99ffb3e3284dfa05d57756f3b79d
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_1_1_neat/16_1_1_DanielBraithwaite_last_word.py
|
f312da31370a6413f77cbed11a1a43de2b88ee5d
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 554
|
py
|
def last_word(s):
w = s[0]
for i in range(1,len(s)):
si = ord(w[0])
ei = ord(w[len(w)-1])
ci = ord(s[i])
if ci >= si:
w = s[i] + w
else:
w = w + s[i]
return w
o = open('output.txt', 'w+')
f = open('A-large.in', 'r+')
##f = open('test.txt', 'r+')
N = int(f.readline())
for i in range(N):
s = f.readline().strip()
res = last_word(s)
print(res)
o.write("Case #" + str(i + 1) + ": " + str(res) + "\n")
f.close()
o.close()
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
b100512b577573496e2a4b06b4ecba61f76b9160
|
0cb8f0f422c790e75da3d3e4d277390039a72637
|
/assignment1/question.py
|
f461ff4af675756623bb5336d0721daddea9cf3d
|
[] |
no_license
|
curow/CS231N
|
aa62812bb5698e5b10856dd8f700f56bca2e980a
|
feafeee3081e22cfc23f5643d71f45e0e6e636f4
|
refs/heads/master
| 2021-09-04T17:44:25.291119
| 2018-01-20T16:57:51
| 2018-01-20T16:57:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,303
|
py
|
def svm_loss_vectorized(W, X, y, reg):
"""
Structured SVM loss function, vectorized implementation.
Inputs and outputs are the same as svm_loss_naive.
"""
loss = 0.0
dW = np.zeros(W.shape) # initialize the gradient as zero
# transpose X and W
# X.shape will be (D,N)
# W.shape will be (C,D)
X = X.T
W = W.T
dW = dW.T
num_train = X.shape[1]
# W_y shape from (N,D) to (D,N)
W_y = W[y].T
S_y = np.sum(W_y*X ,axis=0)
margins = np.dot(W,X) + 1 - S_y
mask = np.array(margins>0)
# get the value of num_train examples made on W's gradient
# that is,only when the mask is positive
# the train example has impact on W's gradient
dW_j = np.dot(mask, X.T)
dW += dW_j
mul_mask = np.sum(mask, axis=0, keepdims=True).T
# dW[y] -= mul_mask * X.T
dW_y = mul_mask * X.T
for i,label in enumerate(y):
dW[label] -= dW_y[i]
loss = np.sum(margins*mask) - num_train
loss /= num_train
dW /= num_train
# add regularization term
loss += reg * np.sum(W*W)
dW += reg * 2 * W
dW = dW.T
return loss, dW
|
[
"TiwazBiu@gmail.com"
] |
TiwazBiu@gmail.com
|
0331c7d64d9c4561496104cd1e73f30ef345945b
|
403a8c7d9ba2956c3f5873d0721921e0d8ae7c65
|
/tests/test_cli.py
|
13cee7104e6617fae0738f12d22c77060adeb91a
|
[
"MIT"
] |
permissive
|
kazhala/fzf.aws
|
b0c83f0ac47f1b2da0d0b064d6a688ba2e69028c
|
4abefb2301f7b489b11ed3f0b303faafa5941d5b
|
refs/heads/master
| 2021-07-05T00:50:12.632284
| 2021-05-25T23:09:51
| 2021-05-25T23:09:51
| 242,327,229
| 68
| 3
|
MIT
| 2021-03-25T23:42:00
| 2020-02-22T11:09:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,065
|
py
|
from botocore.exceptions import ClientError
from fzfaws.utils.exceptions import InvalidFileType
import os
from fzfaws.utils.fileloader import FileLoader
import unittest
from unittest.mock import patch
from fzfaws.cli import main, copy_config
import sys
import io
from pathlib import Path
import tempfile
class TestCLI(unittest.TestCase):
def setUp(self):
self.capturedOuput = io.StringIO()
sys.stdout = self.capturedOuput
config_path = Path(__file__).resolve().parent.joinpath("../fzfaws/fzfaws.yml")
fileloader = FileLoader()
fileloader.load_config_file(config_path=str(config_path))
def tearDown(self):
sys.stdout = sys.__stdout__
@patch("fzfaws.cli.s3")
@patch("fzfaws.cli.ec2")
@patch("fzfaws.cli.cloudformation")
def test_subparser(self, mocked_cloudformation, mocked_ec2, mocked_s3):
sys.argv = [__file__, "cloudformation", "-h"]
main()
mocked_cloudformation.assert_called_once_with(["-h"])
sys.argv = [__file__, "ec2", "ssh", "-A"]
main()
mocked_ec2.assert_called_once_with(["ssh", "-A"])
mocked_ec2.reset_mock()
sys.argv = [__file__, "ec2", "start"]
main()
mocked_ec2.assert_called_once_with(["start", "--wait"])
sys.argv = [__file__, "s3", "download"]
main()
mocked_s3.assert_called_once_with(["download", "--hidden"])
@patch("fzfaws.cli.copy_config")
def test_parser(self, mocked_copy):
sys.argv = [__file__, "-h"]
self.assertRaises(SystemExit, main)
self.assertRegex(
self.capturedOuput.getvalue(), r"usage: fzfaws .*",
)
sys.argv = [__file__, "--copy-config"]
self.assertRaises(SystemExit, main)
mocked_copy.assert_called_once()
self.capturedOuput.truncate(0)
self.capturedOuput.seek(0)
sys.argv = [__file__]
self.assertRaises(SystemExit, main)
self.assertRegex(self.capturedOuput.getvalue(), r"^usage: fzfaws \[-h\].*")
def test_copy_config(self):
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ["XDG_CONFIG_HOME"] = tmpdirname
copy_config()
if not Path("%s/fzfaws/fzfaws.yml" % tmpdirname).is_file():
self.fail("config file not properly copied")
@patch("fzfaws.cli.get_default_args")
def test_exceptions(self, mocked_args):
mocked_args.side_effect = InvalidFileType
sys.argv = [__file__, "s3"]
self.assertRaises(SystemExit, main)
self.assertEqual(
self.capturedOuput.getvalue(), "Selected file is not a valid file type\n"
)
mocked_args.side_effect = SystemExit
sys.argv = [__file__, "s3"]
self.assertRaises(SystemExit, main)
mocked_args.side_effect = KeyboardInterrupt
sys.argv = [__file__, "s3"]
self.assertRaises(SystemExit, main)
mocked_args.side_effect = ClientError
sys.argv = [__file__, "s3"]
self.assertRaises(SystemExit, main)
|
[
"kevin7441@gmail.com"
] |
kevin7441@gmail.com
|
536610ba716a9b8715ef45dffd40ac555213c201
|
1dcea2a511f14a43701994f6a7785afd21a20d74
|
/Algorithm/61_RotateList.py
|
ad33302d777bb10f3daee4eb21b7a5bd9a4a46c7
|
[] |
no_license
|
lingtianwan/Leetcode2
|
66031e256a2928c6197516f83f14748c52e91b8c
|
80a604cc09d5d2d62dd05157d8b829de675e4404
|
refs/heads/master
| 2021-01-13T11:17:18.238465
| 2017-02-09T01:43:38
| 2017-02-09T01:43:38
| 81,395,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
# Given a list, rotate the list to the right by k places, where k is non-negative.
#
# For example:
# Given 1->2->3->4->5->NULL and k = 2,
# return 4->5->1->2->3->NULL.
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head:
return None
fast = head
cnt = 0
while fast:
fast = fast.next
cnt += 1
k %= cnt
if k == 0:
return head
fast = head
slow = head
for _ in range(k):
fast = fast.next
while fast.next:
fast = fast.next
slow = slow.next
res = slow.next
slow.next = None
fast.next = head
return res
|
[
"lingtian.wan@gmail.com"
] |
lingtian.wan@gmail.com
|
7f0416307b8c694260d09aa9e48f3b5b5eef0c40
|
71460476c5f5ebdca719def124f1a0650861fdab
|
/mint_work/custom/pos_order_history_type/models/pos_sales_multi_report.py
|
af374a7c0fa77f540ef7d46732c590d9499bb3eb
|
[] |
no_license
|
merdhah/dubai_work
|
fc3a70dc0b1db6df19c825a3bf1eef2a373d79c0
|
e24eb12b276a4cd5b47a4bd5470d915179872a4f
|
refs/heads/master
| 2022-01-07T11:22:07.628435
| 2018-10-17T13:37:24
| 2018-10-17T13:37:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,051
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Bista Solutions Pvt. Ltd
# Copyright (C) 2018 (http://www.bistasolutions.com)
#
##############################################################################
from odoo import models, fields, api, _
from odoo.exceptions import UserError
class PosSalesReportProductType(models.TransientModel):
_name = 'pos.sales.report.type'
company_id = fields.Many2one('res.company', default=lambda self: self.env.user.company_id.id)
type = fields.Selection([
('consu', 'Consumable'),
('service', 'Service'),
('product', 'Stockable Product')], string = 'Product Type',
default = 'consu',
help = 'A stockable product is a product for which you manage stock. The "Inventory" app has to be installed.\n'
'A consumable product, on the other hand, is a product for which stock is not managed.\n'
'A service is a non-material product you provide.\n'
'A digital content is a non-material product you sell online. The files attached to the products are the one that are sold on '
'the e-commerce such as e-books, music, pictures,... The "Digital Product" module has to be installed.')
state = fields.Selection(
[('draft', 'New'), ('cancel', 'Cancelled'), ('paid', 'Paid'), ('done', 'Posted'), ('invoiced', 'Invoiced')],
'State')
# This method is called from the wizard which will get all the pos order
# which will have the the product type which is selected in wizard.
@api.multi
def sales_order_report_type(self):
self.ensure_one()
data = {
'ids': self.id,
'model': 'pos.sales.report',
'form': self.read()[0],
}
query = """
select po.name as order,pt.name,pp.barcode, pol.qty, pol.price_unit
from pos_order_line pol
left join pos_order po ON (po.id = pol.order_id)
left join product_product pp ON (pp.id = pol.product_id)
left join product_template pt ON (pt.id = pp.product_tmpl_id)
where pt.type='%s'""" % (self.type)
if self.state :
query += """ and po.state='%s'""" % (self.state)
self.env.cr.execute(query)
result = self._cr.dictfetchall()
if result :
data.update({
'company_logo' : self.company_id.logo,
'company_name' : self.company_id.partner_id.name,
'company_street' : self.company_id.partner_id.street,
'company_street2' : self.company_id.partner_id.street2,
'company_city' : self.company_id.partner_id.city,
'company_state_id' :
self.company_id.partner_id.state_id.name,
'company_country_id' :
self.company_id.partner_id.country_id.name,
'company_zip' : self.company_id.partner_id.zip,
'company_phone' : self.company_id.partner_id.phone,
'company_mobile' : self.company_id.partner_id.mobile,
'company_fax' : self.company_id.partner_id.fax,
'company_email' : self.company_id.partner_id.email,
'company_website' : self.company_id.partner_id.website,
'product_type_name' : self.type,
'lines' : result,
})
else :
raise UserError(
_('There is no Record related to this Product Type.'))
return self.env['report'].get_action(self,
'pos_order_history_type.report_sale_orders_type', data=data)
class ReportPOSSaleOrderProductTypeMulti(models.AbstractModel):
_name = 'report.pos_order_history_type.report_sale_orders_type'
@api.multi
def render_html(self, docids, data=None):
return self.env['report'].render('pos_order_history_type.report_sale_orders_type', dict(data or {}))
|
[
"asghar0517@gmail.com"
] |
asghar0517@gmail.com
|
3d60ed7e99f218433773775f5e56aec334e9fb8d
|
52b2e3470cd4b91975b2e1caed8d1c93c20e5d05
|
/tools/parsertools/testbuffer.py
|
f8f8dd1bfbe81182baf42dae60fe879de314da4a
|
[] |
no_license
|
xprime480/projects
|
c2f9a82bbe91e00859568dc27ae17c3b5dd873e3
|
3c5eb2d53bd7fa198edbe27d842ee5b5ff56e226
|
refs/heads/master
| 2020-04-27T03:51:29.456979
| 2019-04-12T14:34:39
| 2019-04-12T14:34:39
| 174,037,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
class TestBuffer(object) :
def __init__(self, tokens) :
self.tokens = tokens[:]
self.size = len(self.tokens)
self.index = 0
def get(self) :
rv = self.peek()
if rv :
self.index += 1
return rv
def peek(self) :
if self.index >= self.size :
rv = None
else :
rv = self.tokens[self.index]
#print 'TestBuffer returning %s' % rv
return rv
|
[
"mi.davis@sap.com"
] |
mi.davis@sap.com
|
df6bb199aa33221b02a071eebb1a06018e2600d3
|
e35fd52fe4367320024a26f2ee357755b5d5f4bd
|
/Chapter 3 - Stacks and Queues/linkedlist.py
|
b949d5439e2e9c0378f1d78a0e7ded42d00de4ba
|
[] |
no_license
|
liseyko/CtCI
|
a451967b0a0ce108c491d30b81e88d20ad84d2cd
|
c27f19fac14b4acef8c631ad5569e1a5c29e9e1f
|
refs/heads/master
| 2020-03-21T14:28:47.621481
| 2019-11-12T22:59:07
| 2019-11-12T22:59:07
| 138,658,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,690
|
py
|
from random import randint
class Node():
"""implementation of a simple Node object"""
def __init__(self,data=None,next=None):
self.next = next
self.data = data
def __str__(self):
return str(self.data)
class LinkedList():
"""linked list implementation"""
def __init__(self,lst=[]):
self.head = None
self.len = 0
for n in reversed(lst):
self.insert(n)
def insert(self,data):
self.head = Node(data,self.head)
self.len += 1
def populate(self,q=10, rng=16):
while self.len < q:
self.insert(randint(0,rng))
def append(self,data):
if self.head is None:
self.head = Node(data)
return
end = Node(data)
n = self.head
while n.next is not None:
n = n.next
n.next = end
self.len += 1
def deleteNode(self,n):
cn = self.head
if not cn:
return False
while cn.next:
if cn.next == n:
cn.next = cn.next.next
return True
cn = cn.next
return False
def deleteNode_fast(self,n):
if not n:
return False
if not n.next:
return self.deleteNode(n)
n.data = n.next.data
n.next = n.next.next
return True
def mkunique(self):
buffer = set()
n = self.head
if n:
buffer.add(n.data)
else:
return
while n.next:
if n.next.data not in buffer:
buffer.add(n.next.data)
n = n.next
else:
n.next = n.next.next
self.len -= 1
def print_data(self):
n=self.head
while n:
print(n.data,end=', ')
n = n.next
print(n)
if not self.head:
print("The list is empty.")
def __str__(self):
l = []
n=self.head
while n:
l.append(n.data)
n = n.next
return str(l)
def __iter__(self):
cur_node = self.head
while cur_node:
yield cur_node
cur_node = cur_node.next
def __len__(self):
return self.len
def deleteNodeByData(self, data):
""" deletes the first occurance of node, containing <data> from <head> list """
if self.head.data == data:
self.head = self.head.next
return
n = self.head
while n.next is not None:
if n.next.data == data:
n.next = n.next.next
return self
n = n.next
return
|
[
"liseyko@gmail.com"
] |
liseyko@gmail.com
|
6e18893137c3c85ef8e273ad56b8170fbe604a00
|
e2b9873da7723ef8ae505c4286e4eccbf7416315
|
/leagueofclass/cadastros/migrations/0013_remove_atividadesprofessor_teste.py
|
aca869bbd971ba5dda0a5981a69d7e9f85b5439c
|
[] |
no_license
|
lucaasaragao/LOC_PAS
|
94fc50dd429ce2e9ec71cebdd748f3ff9df1ceac
|
22939ab9f7b54ddc6355dce11e55e801e9501327
|
refs/heads/master
| 2020-03-27T17:57:57.824525
| 2018-11-01T05:22:20
| 2018-11-01T05:22:20
| 146,888,554
| 0
| 1
| null | 2018-10-31T21:37:59
| 2018-08-31T12:23:48
|
CSS
|
UTF-8
|
Python
| false
| false
| 349
|
py
|
# Generated by Django 2.0.7 on 2018-09-28 04:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cadastros', '0012_atividadesprofessor_teste'),
]
operations = [
migrations.RemoveField(
model_name='atividadesprofessor',
name='teste',
),
]
|
[
"osvaldo_cavalcanti_filho@hotmail.com"
] |
osvaldo_cavalcanti_filho@hotmail.com
|
38a73f29cb633dc9396633c72607a3415fa7ed7e
|
0640c53cd62def556a098f1e732deee8c1348c9e
|
/IIITSERC-ssad_2015_a3_group1-88a823ccd2d0/Abhishek Vinjamoori/DonkeyKongFinal/src/player.py
|
c284e27402935cc64dd01b598004dd4b0546ffa9
|
[] |
no_license
|
anirudhdahiya9/Open-data-projecy
|
579867fe8716076819734cebdbc6e15bb471bb39
|
26d629f8348f0110fa84b02009e787a238aff441
|
refs/heads/master
| 2021-01-10T13:50:19.855983
| 2016-03-23T22:46:03
| 2016-03-23T22:46:03
| 54,598,189
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
import pygame
size=16
class player(person):
def __init__(self,name,initX,initY):
person.__init__(name,initX,initY)
self.marioright =pygame.image.load('../images/m1.png')
self.marioright=pygame.transform.scale(self.marioright,(size,size))
self.mariowalkright=pygame.image.load('../images/m2.png')
self.mariowalkright=pygame.transform.scale(self.mariowalkright,(size,size))
self.mariowalkleft=pygame.transform.scale(self.mariowalkright,(size,size))
self.mariowalkleft=pygame.transform.flip(mariowalkright,1,0)
self.marioleft=pygame.transform.scale(self.marioright,(size,size))
self.marioleft=pygame.transform.flip(marioleft,1,0)
self.mario=self.marioright
|
[
"anirudhdahiya9@gmail.com"
] |
anirudhdahiya9@gmail.com
|
dc70e874342123a38005f05ad3a80c1ee0045ec1
|
2c69245fa6b65affaa40755785504df4c12dd3b5
|
/phraser/tools/fix_header_guards.py
|
c4547145f126923db909ff79524774271c76e916
|
[
"MIT"
] |
permissive
|
knighton/phraser
|
1b711a20193e4722e50d41e0ea11c69dca7bfcef
|
a4b213260cd9b24fb3052973a1268c021f965ce8
|
refs/heads/master
| 2021-01-17T09:04:22.561009
| 2016-04-01T21:32:10
| 2016-04-01T21:32:10
| 34,379,599
| 1
| 2
| null | 2016-04-04T21:12:41
| 2015-04-22T08:51:15
|
C++
|
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
#!/usr/bin/python
#
# Fix each .h header guard (useful after moving files around).
import os
DOIT = True
def each_header(root_dir):
for root, dirs, files in os.walk(root_dir):
for name in files:
if name.endswith('.h'):
f = os.path.join(root, name)
yield f
def header_guard_from_file_name(f):
if f.startswith('./'):
f = f[2:]
return f.replace('/', '_').replace('.h', '_H_').upper()
def fix_header_guards(root_dir):
for fn in each_header(root_dir):
new_header = header_guard_from_file_name(fn)
text = open(fn).read()
ss = text.split()
try:
assert ss[0] == '#ifndef'
except:
print 'WTF:', fn
assert False
old_header = ss[1]
if old_header != new_header:
if DOIT:
open(fn, 'wb').write(text.replace(old_header, new_header))
else:
print 'Would change a header:'
print ' file:', fn
print ' old: ', old_header
print ' new: ', new_header
def main():
fix_header_guards('.')
if __name__ == '__main__':
main()
|
[
"iamknighton@gmail.com"
] |
iamknighton@gmail.com
|
64bdac7133fc0b26f17c3e74ef60a624dea2bf9a
|
f4b79529109fbb4055f334d0d9c7c96cb0710447
|
/colour/utilities/tests/test_deprecated.py
|
bfbb70c00812c9381bc5e3c93242eec9a75ef368
|
[
"BSD-3-Clause"
] |
permissive
|
trevorandersen/colour
|
167381b3d03e506a270a8d2a519a164808995437
|
02b595b26313c4b4f55adc41d599f90c4c9edbcd
|
refs/heads/develop
| 2021-07-15T04:48:19.585586
| 2021-01-23T23:51:44
| 2021-01-23T23:51:44
| 230,421,054
| 0
| 0
|
BSD-3-Clause
| 2019-12-28T12:54:20
| 2019-12-27T10:10:30
| null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
# -*- coding: utf-8 -*-
import sys
from colour.utilities.deprecation import (ModuleAPI, ObjectRenamed,
ObjectRemoved)
class deprecated(ModuleAPI):
def __getattr__(self, attribute):
return super(deprecated, self).__getattr__(attribute)
NAME = None
"""
An non-deprecated module attribute.
NAME : object
"""
NEW_NAME = None
"""
A module attribute with a new name.
NAME : object
"""
sys.modules['colour.utilities.tests.test_deprecated'] = (deprecated(
sys.modules['colour.utilities.tests.test_deprecated'], {
'OLD_NAME':
ObjectRenamed(
name='colour.utilities.tests.test_deprecated.OLD_NAME',
new_name='colour.utilities.tests.test_deprecated.NEW_NAME'),
'REMOVED':
ObjectRemoved(name='colour.utilities.tests.test_deprecated.REMOVED'
)
}))
del ModuleAPI
del ObjectRenamed
del ObjectRemoved
del sys
|
[
"thomas.mansencal@gmail.com"
] |
thomas.mansencal@gmail.com
|
6fd9f967435ec0b4885b32a2c99175b9c69f4247
|
c2849586a8f376cf96fcbdc1c7e5bce6522398ca
|
/ch28/ex28-25.py
|
d6a7549a80d0c5a59f5e9b81527f0036703e34dc
|
[] |
no_license
|
freebz/Learning-Python
|
0559d7691517b4acb0228d1cc76de3e93915fb27
|
7f577edb6249f4bbcac4f590908b385192dbf308
|
refs/heads/master
| 2020-09-23T01:48:24.009383
| 2019-12-02T12:26:40
| 2019-12-02T12:26:40
| 225,371,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
# 셸브에서 객체 업데이트하기
# updatedb.py 파일: 데이터베이스의 Person 객체를 업데이트
import shelve
db = shelve.open('persondb') # 동일한 파일명으로 셸브를 다시 염
for key in sorted(db): # 데이터베이스 객체들을 보여 주기 위해 반복
print(key, '\t=>', db[key]) # 커스터마이즈 포맷으로 출력
sue = db['Sue Jones'] # 가져오기 위해 키에 의해 인덱싱
sue.giveRaise(.10) # 클래스의 메서드를 사용하여 메모리를 업데이트함
db['Sue Jones'] = sue # 셸브에 업데이트하기 위해 키에 할당함
db.close() # 변경 후 데이터베이스를 닫음
# python updatedb.py
# Bob Smith => [Person: job=None, name=Bob Smith, pay=0]
# Sue Jones => [Person: job=dev, name=Sue Jones, pay=100000]
# Tom Jones => [Manager: job=mgr, name=Tom Jones, pay=50000]
# python updatedb.py
# Bob Smith => [Person: job=None, name=Bob Smith, pay=0]
# Sue Jones => [Person: job=dev, name=Sue Jones, pay=110000]
# Tom Jones => [Manager: job=mgr, name=Tom Jones, pay=50000]
# python updatedb.py
# Bob Smith => [Person: job=None, name=Bob Smith, pay=0]
# Sue Jones => [Person: job=dev, name=Sue Jones, pay=121000]
# Tom Jones => [Manager: job=mgr, name=Tom Jones, pay=50000]
# python updatedb.py
# Bob Smith => [Person: job=None, name=Bob Smith, pay=0]
# Sue Jones => [Person: job=dev, name=Sue Jones, pay=133100]
# Tom Jones => [Manager: job=mgr, name=Tom Jones, pay=50000]
# python
import shelve
db = shelve.open('persondb') # 데이터베이스를 다시 염
rec = db['Sue Jones'] # 객체를 키에 의해 가져옴
rec
# [Person: Sue Jones, 146410]
rec.lastName()
# 'Jones'
rec.pay
# 146410
|
[
"freebz@hananet.net"
] |
freebz@hananet.net
|
3e0a856177c0d402b98ed229c8529ec154be6332
|
8419c3c826dd41630e57c6523fe6de79eca2facb
|
/workshop of python/samples/ex02Main.py
|
7176c022b4c56ea5e02bb7f818bec18b4d7d882a
|
[] |
no_license
|
thomasernste/python
|
4d009f5a891fd7c4a3432a42ea94f94379f6d0de
|
7a59d2e37626d4de3a3b7e6942363c424798ad46
|
refs/heads/master
| 2016-09-05T23:17:18.093787
| 2012-04-29T14:38:28
| 2012-04-29T14:38:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 965
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""docstring
"""
__revision__ = '0.1'
import sys,os
def usage():
#定义函数usage
print "inputFile"
pass
def error():
usage()
sys.exit(-1)
#强制该脚本马上结束,并返回-1作为error code
def readLines(inf):
for line in open(inf):
#打开文件inf,并按行读入
print line
if __name__=="__main__":
argvNum=2
if len(sys.argv)<argvNum:
#获取命令行的参数,sys.argv为数组,len(list)为求该数组list的元素个数。
error()
print sys.argv[0]
inf=sys.argv[1]
#sys.argv的index从0开始,但是sys.argv[0]为该脚本的名字
readLines(inf)
#函数传递按照引用(即c里面的指针)的方法传递。
#如果该参数引用的值本身不能改变如string类/int类,可以看成是pass by value
#如果该参数引用的值能改变如数组,可以看成是pass by reference
|
[
"wangchj04@gmail.com"
] |
wangchj04@gmail.com
|
54390514accfef3d5b46666a5f12457557475660
|
b75918b2ac1dfaf2c1219f40d63004900c9338b1
|
/app/main.py
|
bb88fce0c41afd8a113ef6dc5777bfc4d1d5a774
|
[] |
no_license
|
solashirai/ExplainableCourseRecommender
|
e0f036da9814a0187daa5635da0ff2f86386026d
|
6a2795cfc4536548ac3679b3d23b953e55a50a37
|
refs/heads/main
| 2023-04-14T14:27:36.054830
| 2021-04-19T02:29:48
| 2021-04-19T02:29:48
| 302,346,189
| 1
| 0
| null | 2021-04-18T16:13:48
| 2020-10-08T13:17:44
|
Python
|
UTF-8
|
Python
| false
| false
| 4,531
|
py
|
from flask import Flask, request, abort
import rdflib
from escore.pipeline import RecommendCoursesPipeline
from escore.services.course import GraphCourseQueryService
from escore.services import PlanOfStudyRecommenderService
from escore.utils.path import DATA_DIR
from escore.models import StudentPOSRequirementContext, CourseCandidate, Student, PlanOfStudy
from typing import Tuple
from frex.stores import LocalGraph
app = Flask(__name__)
# for testing locally
kg_files = tuple((DATA_DIR / file).resolve() for file in [
"courses.ttl",
"scheduled_courses.ttl",
"rpi_departments.ttl",
"parsed_grad_requirements.ttl",
"users.ttl",
])
COURSEKG_GRAPH = LocalGraph(file_paths=kg_files)
# COURSEKG_GRAPH = RemoteGraph(
# sparql_endpoint="?"
# )
COURSE_QS = GraphCourseQueryService(queryable=COURSEKG_GRAPH)
PLACEHOLDER_PIPE = RecommendCoursesPipeline(course_query_service=COURSE_QS)
PR_SERVICE = PlanOfStudyRecommenderService(
course_query_service=COURSE_QS
)
@app.route("/escore_api/")
def hello_world():
return "Hello, World!"
@app.route("/escore_api/dummy_get_rec", methods=["GET"])
def dummy_recommend_courses():
args = request.args
# dummy plan of study and student to test
pos = PlanOfStudy(
uri=rdflib.URIRef('placeholder_pos1'),
class_year=2022,
planned_major=None,
planned_degree=None,
completed_courses=frozenset(),
completed_course_sections=frozenset(),
ongoing_course_sections=frozenset(),
planned_courses=frozenset()
)
student = Student(
uri=rdflib.URIRef('placeholder_stud1'),
study_plan=pos,
name='john doe',
class_year=2022,
topics_of_interest=frozenset(),
registered_courses=frozenset(),
advisor=None,
)
context = StudentPOSRequirementContext(student=student, plan_of_study=pos,
requirements=frozenset(COURSE_QS.get_all_requirements()))
rec_courses: Tuple[CourseCandidate, ...] = PLACEHOLDER_PIPE(context=context)
app.logger.info(f'retrieved recommended courses.')
rec_course_codes = [rc.domain_object.course_code.name for rc in rec_courses]
return {'recommend_course_codes': rec_course_codes}
@app.route("/escore_api/get_recommended_courses_for_student", methods=["GET"])
def get_course_recommendation_for_student():
args = request.args
#https%3A%2F%2Ftw.rpi.edu%2Fontology-engineering%2Foe2020%2Fcourse-recommender-individuals%2Fusrowen
student_uri = rdflib.URIRef(args["student_uri"])
student = COURSE_QS.get_student_by_uri(student_uri=student_uri)
print(f'got student {student.name}')
# will plan of study be saved somehow...? or have person input and pass it via this method...?
# assuming POS will have some structure... ignoring for now since it's not properly used anyways
pos = args.get('plan_of_study', None)
if pos is None:
pos = student.study_plan
print(f'got student plan of study')
context = StudentPOSRequirementContext(student=student, plan_of_study=pos,
requirements=frozenset(COURSE_QS.get_all_requirements()))
rec_courses: Tuple[CourseCandidate, ...] = PLACEHOLDER_PIPE(context=context)
app.logger.info(f'retrieved recommended courses.')
rec_course_codes = [rc.domain_object.course_code.name for rc in rec_courses]
return {'recommend_course_codes': rec_course_codes}
#
# except NotFoundException as e:
# abort(404, description=e)
# except MalformedContentException as e:
# abort(500, description=e)
@app.route("/escore_api/get_pos_rec_for_student", methods=["GET"])
def get_pos_recommendation_for_student():
args = request.args
# ?student_uri=https%3A%2F%2Ftw.rpi.edu%2Fontology-engineering%2Foe2020%2Fcourse-recommender-individuals%2Fusrowen
student_uri = rdflib.URIRef(args["student_uri"])
student = COURSE_QS.get_student_by_uri(student_uri=student_uri)
print(f'got student {student.name}')
pos_rec = PR_SERVICE.get_pos_recommendation_for_target_student(student=student)
rec_sem_courses = {f'{sec.section_object.term} {sec.section_object.year} semester': [cand.domain_object.name
for cand in sec.section_candidates]
for sec in pos_rec.solution_section_sets[1].sections}
return {'recommend_course_per_semester': rec_sem_courses}
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
[
"solashakashirai@gmail.com"
] |
solashakashirai@gmail.com
|
337555e92f42e1fa73ca39f8a52b53366558da88
|
bc8f02c870e939359c32d04016f989f0c7526226
|
/constraint_adder.py
|
be8ac25b95b5f8d031b9f6af0994cabbceb480ea
|
[] |
no_license
|
iamgroot42/gpu_image_segmentation
|
c53a85267cd373a391c27297ac1befb944067550
|
ca97a78a28bf5b76659dcb990a3a312d6d2c6fe3
|
refs/heads/master
| 2021-01-18T23:46:45.615098
| 2017-10-01T11:35:17
| 2017-10-01T11:35:17
| 80,756,196
| 1
| 0
| null | 2017-10-01T11:35:18
| 2017-02-02T18:37:35
|
Cuda
|
UTF-8
|
Python
| false
| false
| 952
|
py
|
import cv2
import sys
object_points = []
background_points = []
counter = 0
data = None
def mouse_callback(event, x, y, flags, params):
global object_points
global background_points
global counter
global data
if event == cv2.EVENT_LBUTTONDOWN:
object_points.append((x, y))
elif event == cv2.EVENT_RBUTTONDOWN:
background_points.append((x, y))
def annotate_images(img_path):
global data
data = cv2.imread(img_path)
cv2.imshow('Image',data)
cv2.setMouseCallback('Image', mouse_callback)
cv2.waitKey(0)
cv2.destroyAllWindows()
def write_points(data, filename):
f = open(filename, 'w')
for point in data:
x,y = point
f.write(str(x) + " " + str(y) + "\n")
f.close()
if __name__ == "__main__":
file_path = sys.argv[1]
print("Left click to label object points")
print("Right click to label background points")
annotate_images(file_path)
write_points(object_points, "OBJECT")
write_points(background_points, "BACKGROUND")
|
[
"anshuman14021@iiitd.ac.in"
] |
anshuman14021@iiitd.ac.in
|
bce2bf6200fd54e982429be07bc78ceb941aa813
|
e0d9844e123fa0706388814b9f29758258589487
|
/version_info.py
|
0e3295c8309a411c0cfb18f664f84b223ed0a2ca
|
[] |
no_license
|
pigpigman8686/seg
|
b5cf5261a5744e89ed5e5b145f60b0ccc3ba2c0c
|
61c3816f7ba76243a872fe5c5fc0dede17026987
|
refs/heads/master
| 2023-04-10T22:22:35.035542
| 2021-04-22T06:24:36
| 2021-04-22T06:24:36
| 360,398,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,701
|
py
|
# UTF-8
#
# For more details about fixed file info 'ffi' see:
# http://msdn.microsoft.com/en-us/library/ms646997.aspx
VSVersionInfo(
ffi=FixedFileInfo(
# filevers and prodvers should be always a tuple with four items: (1, 2, 3, 4)
# Set not needed items to zero 0. Must always contain 4 elements.
filevers=(
int('0.0.0'.split('.')[0]),
int('0.0.0'.split('.')[1]),
int('0.0.0'.split('.')[2]),
0
),
prodvers=(
int('0.0.0'.split('.')[0]),
int('0.0.0'.split('.')[1]),
int('0.0.0'.split('.')[2]),
0
),
# Contains a bitmask that specifies the valid bits 'flags'r
mask=0x3f,
# Contains a bitmask that specifies the Boolean attributes of the file.
flags=0x0,
# The operating system for which this file was designed.
# 0x4 - NT and there is no need to change it.
OS=0x40004,
# The general type of file.
# 0x1 - the file is an application.
fileType=0x1,
# The function of the file.
# 0x0 - the function is not defined for this fileType
subtype=0x0,
# Creation date and time stamp.
date=(0, 0)
),
kids=[
StringFileInfo(
[
StringTable(
'040904B0',
[StringStruct('CompanyName', 'caicy'),
StringStruct('FileDescription', 'seg'),
StringStruct('FileVersion', '0.0.0.0'),
StringStruct('InternalName', 'seg'),
StringStruct('LegalCopyright', '© caicy. All rights reserved.'),
StringStruct('OriginalFilename', 'seg.exe'),
StringStruct('ProductName', 'seg'),
StringStruct('ProductVersion', '0.0.0.0')])
]),
VarFileInfo([VarStruct('Translation', [1033, 1200])])
]
)
|
[
"952361195@qq.com"
] |
952361195@qq.com
|
729ffe87975dbbc443c0865d023fd89fd57f7aa9
|
99e57f00fcaf4469c1c1b79f2d17176aaef9a790
|
/sales_forecast/models/sale_allocation.py
|
cdce00e85495ffde7e478e89044f78b1410f3649
|
[] |
no_license
|
detian08/mcl
|
d007ffd0e869f3bd9a8c74bc8473119901f0de2a
|
32d61148326c931aca0107c3894061773f287e33
|
refs/heads/master
| 2022-03-23T19:36:29.608645
| 2019-12-11T10:15:50
| 2019-12-11T10:15:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import uuid
from itertools import groupby
from datetime import datetime, timedelta
from werkzeug.urls import url_encode
from odoo import api, fields, models, _,exceptions
from odoo.exceptions import UserError, AccessError
from odoo.osv import expression
from odoo.tools import float_is_zero, float_compare, DEFAULT_SERVER_DATETIME_FORMAT
from odoo.tools.misc import formatLang
from odoo.addons import decimal_precision as dp
class SaleAllocation(models.Model):
_name = "sale.forecaste"
z_period = fields.Selection([('Monthly','Monthly'),('weekly','weekly'),('Daily','Daily')],string ='Period',store = True)
z_from_date = fields.Date(string = 'From Date',store = True)
z_to_date = fields.Date(string = 'To Date',store = True)
z_allow_linw = fields.One2many('sale.forecaste.line', 'z_allow_id', string='allow Lines', copy=True, auto_join=True)
@api.constrains('z_to_date')
def _check_date(self):
for r in self:
if r.z_to_date < self.z_from_date:
raise models.ValidationError('To Date should be greater than From Date')
class SaleAllocationLine(models.Model):
_name = "sale.forecaste.line"
z_allow_id = fields.Many2one('sale.forecaste',string = 'allow id',store = True)
z_team_id = fields.Many2one('crm.team',string = 'Sale Team',store = True)
z_user_id = fields.Many2one('res.users',string = 'Sale Person',store = True)
z_product_id = fields.Many2one('product.product',string = 'Product',store = True)
z_forecasted_qnty = fields.Float(string = 'Forecasted quantity',store = True)
z_forecasted_val = fields.Float(string = 'Forecasted Value',store = True)
|
[
"adarsh@prixgen.com"
] |
adarsh@prixgen.com
|
93d874fcb0503c0266f53ab533313773a94261c8
|
159d4ae61f4ca91d94e29e769697ff46d11ae4a4
|
/venv/lib/python3.9/site-packages/nbclient/util.py
|
9b672357b05be0de493e5f59054ae05e0086f448
|
[
"MIT"
] |
permissive
|
davidycliao/bisCrawler
|
729db002afe10ae405306b9eed45b782e68eace8
|
f42281f35b866b52e5860b6a062790ae8147a4a4
|
refs/heads/main
| 2023-05-24T00:41:50.224279
| 2023-01-22T23:17:51
| 2023-01-22T23:17:51
| 411,470,732
| 8
| 0
|
MIT
| 2023-02-09T16:28:24
| 2021-09-28T23:48:13
|
Python
|
UTF-8
|
Python
| false
| false
| 3,205
|
py
|
"""General utility methods"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import asyncio
import sys
import inspect
from typing import Callable, Awaitable, Any, Union
def check_ipython() -> None:
# original from vaex/asyncio.py
IPython = sys.modules.get('IPython')
if IPython:
version_str = IPython.__version__ # type: ignore
# We get rid of any trailing ".dev"
version_str = version_str.replace(".dev", "")
IPython_version = tuple(map(int, version_str.split('.')))
if IPython_version < (7, 0, 0):
raise RuntimeError(f'You are using IPython {IPython.__version__} ' # type: ignore
'while we require 7.0.0+, please update IPython')
def check_patch_tornado() -> None:
"""If tornado is imported, add the patched asyncio.Future to its tuple of acceptable Futures"""
# original from vaex/asyncio.py
if 'tornado' in sys.modules:
import tornado.concurrent # type: ignore
if asyncio.Future not in tornado.concurrent.FUTURES:
tornado.concurrent.FUTURES = \
tornado.concurrent.FUTURES + (asyncio.Future, ) # type: ignore
def just_run(coro: Awaitable) -> Any:
"""Make the coroutine run, even if there is an event loop running (using nest_asyncio)"""
# original from vaex/asyncio.py
loop = asyncio._get_running_loop()
if loop is None:
had_running_loop = False
try:
loop = asyncio.get_event_loop()
except RuntimeError:
# we can still get 'There is no current event loop in ...'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
else:
had_running_loop = True
if had_running_loop:
# if there is a running loop, we patch using nest_asyncio
# to have reentrant event loops
check_ipython()
import nest_asyncio
nest_asyncio.apply()
check_patch_tornado()
return loop.run_until_complete(coro)
def run_sync(coro: Callable) -> Callable:
"""Runs a coroutine and blocks until it has executed.
An event loop is created if no one already exists. If an event loop is
already running, this event loop execution is nested into the already
running one if `nest_asyncio` is set to True.
Parameters
----------
coro : coroutine
The coroutine to be executed.
Returns
-------
result :
Whatever the coroutine returns.
"""
def wrapped(*args, **kwargs):
return just_run(coro(*args, **kwargs))
wrapped.__doc__ = coro.__doc__
return wrapped
async def ensure_async(obj: Union[Awaitable, Any]) -> Any:
"""Convert a non-awaitable object to a coroutine if needed,
and await it if it was not already awaited.
"""
if inspect.isawaitable(obj):
try:
result = await obj
except RuntimeError as e:
if str(e) == 'cannot reuse already awaited coroutine':
# obj is already the coroutine's result
return obj
raise
return result
# obj doesn't need to be awaited
return obj
|
[
"davidycliao@gmail.com"
] |
davidycliao@gmail.com
|
16f08340f13e5ef8e599df67e8d5494e198b58e8
|
cb8c63aea91220a9272498d5ea6cca0a0738b16a
|
/numberfun.py
|
1215b93e182f08eb2efaf9e5f70760eb790eb933
|
[] |
no_license
|
akantuni/Kattis
|
1265de95bfe507ce7b50451a16f19720b86bef44
|
12f31bb31747096bf157fcf6b1f9242d91654533
|
refs/heads/master
| 2021-12-14T11:18:27.723045
| 2021-12-11T05:43:37
| 2021-12-11T05:43:37
| 111,472,667
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
n = int(input())
for i in range(n):
nums = input().split()
nums = [int(num) for num in nums]
a, b, c = nums
if a + b == c:
print("Possible")
elif a * b == c:
print("Possible")
elif a - b == c or b - a == c:
print("Possible")
elif a == c * b or b == c * a:
print("Possible")
else:
print("Impossible")
a, b = b, a
|
[
"akantuni@gmail.com"
] |
akantuni@gmail.com
|
fbd79e2ea249c672bce152386465c6b69ec3b0fa
|
c67d38c1417c6579175ab4716ac0d84441b5aaa6
|
/format_check.py
|
d7e2d8fa0bdca686a461112932e8dd8839d4259f
|
[] |
no_license
|
huangy10/AutoUpdateArtworks
|
348ff549f40b1c895e186a8a6753c071592e70d0
|
eae91f486213624619ad9481351bac22af6df0d1
|
refs/heads/master
| 2021-01-20T06:22:30.394478
| 2017-03-07T04:51:21
| 2017-03-07T04:51:21
| 83,503,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
# check the format of file names to guarantee the integrity
def check_folder_structure(root_path):
pass
def check_data_integrity(root_path):
"""
Check the integrity of image set
:param root_path: the root path where images are stored
:return: Boolean
"""
pass
|
[
"woody1573@163.com"
] |
woody1573@163.com
|
62ef51945e8fd4e850c7446372a0058b0ce54a21
|
33f2f4ed5242f256e2a31145125dad91699c1ead
|
/Leetcode/Contests/weekly_200_find_winner.py
|
4d5aff5422154a9770e74affeafcf60927731bf5
|
[] |
no_license
|
Zahidsqldba07/competitive-programming-1
|
b04b2962ce7bc4454008a3cbb7bee88c0e02251d
|
e35b3a1c95c559b7754d4fabf8d2c4f09b0c42b2
|
refs/heads/master
| 2023-07-11T01:39:38.188489
| 2021-08-29T19:29:47
| 2021-08-29T19:29:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,714
|
py
|
# 5476. Find the Winner of an Array Game
'''
Given an integer array arr of distinct integers and an integer k.
A game will be played between the first two elements of the array (i.e. arr[0] and arr[1]). In each round of the game, we compare arr[0] with arr[1], the larger integer wins and remains at position 0 and the smaller integer moves to the end of the array. The game ends when an integer wins k consecutive rounds.
Return the integer which will win the game.
It is guaranteed that there will be a winner of the game.
'''
# TO COMPLETE/CORRECT
class Solution:
def getWinner(self, arr: List[int], k: int) -> int:
past_winner = -1
won_rounds = 0
while won_rounds <= k:
if arr[0] > arr[1]:
# print("winner arr[0]", arr[0], "won_rounds", won_rounds)
# arr[-1], arr[1] = arr[1], arr[-1]
temp = arr.pop(1)
arr.append(temp)
if arr[0] == past_winner:
won_rounds += 1
# if arr[0] > arr[1]:
# will win all the rest
# return arr[0]
else:
won_rounds = 1
past_winner = arr[0]
else: # new winner
# print("winner arr[1]", arr[1], "won_rounds", won_rounds)
# arr[-1], arr[1] = arr[1], arr[-1]
# arr[0], arr[-1] = arr[-1], arr[0]
temp = arr.pop(0)
arr.append(temp)
if arr[0] == past_winner:
won_rounds += 1
else:
won_rounds = 1
past_winner = arr[0]
return past_winner
|
[
"shinghos@mit.edu"
] |
shinghos@mit.edu
|
d6133e030913d6f52d6462bdb35d3f36d7348abf
|
dd861ad8a33e1ec43a969746ec58efbbd877ca58
|
/telusko/urls.py
|
607c4907b6abde60fe968f17fa05fe98bf638a5c
|
[] |
no_license
|
sajibuzzaman/telusko-DjangoProject
|
54c74b1136f4d69dda092fe4ab03958214bc4e60
|
c4a8cadfa18544bbfe4c359c730cbc4e2ef318e8
|
refs/heads/master
| 2023-03-05T22:10:58.583721
| 2021-02-14T20:39:09
| 2021-02-14T20:39:09
| 338,895,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
"""telusko URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('travello.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls'))
]
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"muhammadsajibuzzaman1998@gmail.com"
] |
muhammadsajibuzzaman1998@gmail.com
|
07fccbff6dc36b6923a3339363c214e6ebc79309
|
ecce8a10aabb24019296cebaa46503f91876796f
|
/football_app/football_app/referee/urls.py
|
da7d9c7f2911a0055d6de80c5285c7b36bb0e0b1
|
[] |
no_license
|
Yeldarmt/DJangoFootballApp
|
28450d60fbd0ec98bdf6d223545e17062442f970
|
d9568cd48089c0be55217d8aecadf65053b72420
|
refs/heads/master
| 2022-11-26T16:19:34.252927
| 2020-04-26T18:09:52
| 2020-04-26T18:09:52
| 237,893,654
| 0
| 0
| null | 2022-11-22T05:28:37
| 2020-02-03T05:42:24
|
Python
|
UTF-8
|
Python
| false
| false
| 216
|
py
|
from django.urls import path
from football_app.referee.views import RefereesListView, RefereeDetailView
urlpatterns = [
path('', RefereesListView.as_view()),
path('<int:pk>/', RefereeDetailView.as_view())
]
|
[
"eldarmukhametkazin@gmail.com"
] |
eldarmukhametkazin@gmail.com
|
31d025fb82a70ab3eec3800b715a265d2df76bf7
|
5301656114df4d80c3353536d85e1d15829b9839
|
/Frontales/.metadata/.plugins/org.eclipse.core.resources/.history/4b/3004f7adcb4500111f76ccb337b0ec7c
|
6cadea6b43bd7bbc821b8e36d65d68fbe7346c30
|
[] |
no_license
|
Ducatel/TeXloud
|
5f383c3fa2c27b01555574fd513d6f551e302b81
|
38f8be8857e1c6de2e103bbbe39707b49e1375aa
|
refs/heads/master
| 2021-01-01T19:34:51.563829
| 2012-03-01T12:05:13
| 2012-03-01T12:05:13
| 2,885,054
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,575
|
#!/usr/bin/python
# -*-coding:utf-8 -*
'''
Created on 10 dec. 2011
@author: Davis Ducatel
'''
import socket
from re import match
import threading
import json
import Ordonnanceur
class Frontal(object):
'''
Classe qui gere le serveur de la frontale de compilation grace à
Son adresse IP
Son port d'ecoute
Son socket
Son ordonnanceur de serveur de compilation
Son ordonnanceur de serveur de données
'''
def __init__(self,adresse,port):
'''
Constructeur du serveur de la frontale
@param adresse: Adresse IP du serveur
@param port: Numéro du port d'ecoute
@raise ValueError: Declencher si le port ou l'adresse est incorrect
'''
regexAdresse="^[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}$"
regexPort="^[0-9]{1,5}$"
if match(regexAdresse,adresse) and match(regexPort,str(port)):
if isinstance(port, int):
self._adresse = adresse
self._port = port
else:
raise ValueError
else:
raise ValueError
self._ordonnanceurData=Ordonnanceur.Ordonnanceur("./../fichierServeur.xml","data")
self._ordonnanceurCompilation=Ordonnanceur.Ordonnanceur("./../fichierServeur.xml","compilation")
def lanceServeur(self):
"""
Methode qui lance la socket en ecoute
Puis qui accepte les connexions et appelle pour chacune d'elle la methode voulu dans un nouveau thread
"""
self._sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.bind((self._adresse, self._port))
self._sock.listen(5)
while 1:
client, addr=self._sock.accept()
threading.Thread(target=self.getTrameOfHTTPServer,args=(client,addr)).start()
def getRequestOfHTTPServer(self,client,addr):
"""
Methode qui va recupere la demande du serveur web
et la traiter
"""
taille=1
messageComplet=""
while taille>0:
message=client.recv(1024)
message=message.decode()
messageComplet+=message
taille=len(message)
client.close()
self.examineRequete(messageComplet)
def routeRequest(self,requeteJSON):
'''
Méthode qui va router la requete vers le bon traitement afin de reformater la requete
puis qui va renvoyé au bon serveur de données
@param requeteJSON: la requete a examiner (au format json)
'''
requete=json.loads(requeteJSON)
if requete['label']=="create":
adresseIP,port,req=self.requestCreateNewUserDataSpace(requete)
elif requete['label']=="getProject":
adresseIP,port,req=self.requestGetProject(requete)
elif requete['label']=="compile":
adresseIP,port,req=self.requestCompile(requete)
elif requete['label']=="getFile":
adresseIP,port,req=self.requestGetFile(requete)
elif requete['label']=="deleteFile":
adresseIP,port,req=self.requestDeleteFile(requete)
elif requete['label']=="deleteProject":
adresseIP,port,req=self.requestDeleteProject(requete)
elif requete['label']=="sync":
adresseIP,port,req=self.requestSync(requete)
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((adresseIP,port))
s.send(json.dumps(req))
s.send(self._messageEnd)
s.close()
def sendRequest
def requestCreateNewUserDataSpace(self,requete):
"""
Méthode qui va demande au serveur de donnée de créer un nouvel
espace de stockage pour l'utilisateur
@param requete: requete a reformater et a router (dico python)
@return: l'adresse IP du serveur de données
@return: le port de connexion sur le serveur de données
@return: la requete (dico)
"""
# Obtention d'un serveur de données
serveur=self._ordonnanceurData.getServeur()
return serveur.adresseIP,serveur.port,requete
def requestGetProject(self,requete):
"""
Méthode qui va demande au serveur de donnée de renvoyer un projet
@param requete: requete a reformater et a router (dico python)
"""
adresseIP=requete.pop('servDataIp')
port=requete.pop('servDataPort')
return adresseIP,port,requete
def requestCompile(self,requete):
"""
Méthode qui va demande au serveur de donnée de lancer une compilation
via un serveur de compilation
@param requete: requete a reformater et a router (dico python)
"""
adresseIP=requete.pop('servDataIp')
port=requete.pop('servDataPort')
serveur=self._ordonnanceurCompilation.getServeur()
requete['servCompileIP']=serveur.adresseIP
requete['servCompilePort']=serveur.port
return adresseIP,port,requete
def requestGetFile(self,requete):
"""
Méthode qui va demande au serveur de donnée de renvoyer un fichier
@param requete: requete a reformater et a router (dico python)
"""
adresseIP=requete.pop('servDataIp')
port=requete.pop('servDataPort')
return adresseIP,port,requete
def requestDeleteFile(self,requete):
"""
Méthode qui va demande au serveur de donnée de supprimer un fichier
@param requete: requete a reformater et a router (dico python)
"""
adresseIP=requete.pop('servDataIp')
port=requete.pop('servDataPort')
return adresseIP,port,requete
def requestDeleteProject(self,requete):
"""
Méthode qui va demande au serveur de donnée de supprimer un projet
@param requete: requete a reformater et a router (dico python)
"""
adresseIP=requete.pop('servDataIp')
port=requete.pop('servDataPort')
return adresseIP,port,requete
def requestSync(self,requete):
"""
Méthode qui va demande au serveur de donnée de faire une synchro
@param requete: requete a reformater et a router (dico python)
"""
adresseIP=requete.pop('servDataIp')
port=requete.pop('servDataPort')
return adresseIP,port,requete
|
[
"hannibal@hannibal-laptop.(none)"
] |
hannibal@hannibal-laptop.(none)
|
|
7972415cb517ef3bfc66323af6cb5649b2c53fb7
|
c705252e5368efab6324f2c1716d50002ad22e80
|
/1-2 first_exam_2.py
|
7898110c7f46dc85665e8b2452fb488bb3fa3e84
|
[] |
no_license
|
younkyounghwan/python-exam
|
dd93124b91bc1d4f2690e9e3e9cb58ff8ef5623d
|
c0ed718d5dfcde65320da7c30f9a014b00e35151
|
refs/heads/master
| 2020-04-05T03:48:50.878125
| 2018-11-07T10:20:38
| 2018-11-07T10:20:38
| 156,528,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
"""
day 2
"""
print("day 2")
"""
"""
#
f = 3.4
print(f)
i = 1
print(i)
b = True
print(b)
s ="1"
print(i+f)
#print(s+i)
print(int(s)+i)
print(s+int(s))
i = 57
j = 28
|
[
"youn7117@naver.com"
] |
youn7117@naver.com
|
e0a494ce4ad4d72e812f2860cf7f862e5d9881f4
|
bfce201a7971b05b2fbe0af4819081d71c3850db
|
/fermi/Pass7Validation/compare_flight_mc_psf/v1/plot.py
|
8b5dbc36066c171de57395421b6155e1fcbbbf1a
|
[] |
no_license
|
joshualande/PhD-Work
|
2fe52f82f726ad6166937a3daed342c8cd9aee2f
|
1d834a19b5a9347ccad75bd5a76126d5fd840c64
|
refs/heads/master
| 2020-04-18T15:14:19.127171
| 2014-01-26T22:58:10
| 2014-01-26T22:58:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,911
|
py
|
from matplotlib import rc
rc('ps',usedistiller='xpdf')
rc('text', usetex=True)
rc('font', family='serif', serif="Computer Modern Roman")
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
from matplotlib.patheffects import withStroke
import matplotlib
import h5py
import pylab as P
import numpy as np
from scipy.stats import chi2
file=h5py.File('/nfs/slac/g/ki/ki03/lande/fermi/data/monte_carlo/compare_flight_mc_psf/v1/merged.hdf5')
flux=np.asarray(file['flux_mc'])
ts_ext_P7SOURCE_V4=np.asarray(file['ts_ext_P7SOURCE_V4'])
ts_ext_P7SOURCE_V6=np.asarray(file['ts_ext_P7SOURCE_V6'])
ts=np.asarray(file['ts_P7SOURCE_V6'])
ts_point = ts - ts_ext_P7SOURCE_V6
index=np.asarray(file['index_mc'])
fig=P.figure(figsize=(5,3))
fig.subplots_adjust(right=0.95, top=0.95, bottom=0.15)
from mpl_toolkits.axes_grid.axes_grid import Grid
grid = Grid(fig, 111, nrows_ncols = (1, 2), axes_pad=0.0)
min_cdf = 1e-4
format_float = lambda f: r'$%s$' % str(f).replace('e-0',r'\times 10^')
print format_float(1e-4)
for i,(name,irf,all_ts_ext) in enumerate([
['(a)','P7SOURCE_V6',ts_ext_P7SOURCE_V6],
['(b)','P7SOURCE_V4',ts_ext_P7SOURCE_V4]
]):
max_ts=max(all_ts_ext) + 1
axes = grid[i]
grid[i].add_artist(
AnchoredText(name, frameon=False, loc=2, prop=dict(size=14,
path_effects=[withStroke(linewidth=5,foreground='w')])))
index_mc=2
for flux_mc, color in zip(
reversed([ 1e-8, 3e-8, 1e-7, 3e-7, 1e-6, 3e-6 ]),
['red', 'blue', 'green', 'black', 'orange', 'gray']
):
kwargs=dict(color=color)
select = (flux==flux_mc) & (index==index_mc) & (ts_point>25)
print 'index=%s, flux=%s, num=%s' % (index_mc,flux_mc,np.sum(select))
print np.mean(ts_point[select])
if np.sum(select) < 100:
continue
print irf, flux_mc, select
ts_ext = all_ts_ext[select]
ts_ext[ts_ext<0] = 0
bins=np.linspace(0,max_ts,1e3)
bin_center=bins[:-1] + (bins[1]-bins[0])/2
binned=np.histogram(ts_ext,bins=bins)[0]
if any(ts_ext>max_ts):
print '> max: ',irf,ts_ext[np.where(ts_ext>max_ts)]
cdf=np.cumsum(binned[::-1])[::-1]
cdf=cdf.astype(float)/cdf[0] # normalize
cdf[cdf == 0] = min_cdf
axes.semilogy(bin_center,cdf,linewidth=1,label=format_float(flux_mc), **kwargs)
y = chi2.sf(bins,1)/2
axes.semilogy(bins, y, 'red', linewidth=1, label='$\chi^2_1/2$', zorder=0, dashes=(5,3))
axes.set_ylim(min_cdf,1)
axes.set_xlabel(r'$\mathrm{TS}_\mathrm{ext}$')
axes.set_ylabel('Cumulative Density')
from lande_plotting import fix_axesgrid
fix_axesgrid(grid)
prop = matplotlib.font_manager.FontProperties(size=10)
grid[0].legend(loc=1, prop=prop, columnspacing=1)
grid[1].set_xlim(0,100)
P.savefig('extension_test.eps')
P.savefig('extension_test.pdf')
|
[
"lande@37a9682d-6443-41a2-8582-b44379b6e86f"
] |
lande@37a9682d-6443-41a2-8582-b44379b6e86f
|
5ad042bd73c818fb6e254df6b4cf72c179ab9b10
|
87b904ebf11d416567a7e49b91b8e9934f67c6f3
|
/insert_row_simple.py
|
e642a918cd16c4942e211712d5181e4c9c09765b
|
[
"MIT"
] |
permissive
|
NathanKr/pandas-playground
|
a701f524aa48f22f6680e48c597206e10f8222e5
|
a5355c59cb61ca3a7dcce590ed42d56a6b943783
|
refs/heads/main
| 2023-06-05T11:07:52.061327
| 2021-07-02T02:35:15
| 2021-07-02T02:35:15
| 328,917,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
import pandas as pd
df = pd.DataFrame([])
# most simple insert row
df = df.append(pd.Series([1,2,3]) , ignore_index=True) # insert at the end
df = df.append(pd.Series([4,5,6]) , ignore_index=True) # insert at the end
print(df)
|
[
"natankrasney@gmail.com"
] |
natankrasney@gmail.com
|
c116c570c2a56abc674c9fb63d86381c90576e47
|
4d6975caece0acdc793a41e8bc6d700d8c2fec9a
|
/leetcode/1484.linked-list-in-binary-tree/1484.linked-list-in-binary-tree.py
|
9077aff3937204041003e1371d38f9ce9273451b
|
[] |
no_license
|
guiconti/workout
|
36a3923f2381d6e7023e127100409b3a2e7e4ccb
|
5162d14cd64b720351eb30161283e8727cfcf376
|
refs/heads/master
| 2021-08-03T10:32:02.108714
| 2021-07-26T04:38:14
| 2021-07-26T04:38:14
| 221,025,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isSubPath(self, head: ListNode, root: TreeNode) -> bool:
|
[
"guibasconti@gmail.com"
] |
guibasconti@gmail.com
|
7e8bd342fc0bb96f6ba727e866bcc18731ac5afa
|
d5735851b605e8960ca321c0e332c7ed810f3e6d
|
/Python/files/eatenApples.py
|
213e017a02f984a27f4332e28fc80d8f3097ee15
|
[] |
no_license
|
pyj4104/LeetCode-Practice
|
6ed0cffd3605be6e187bedeb99e3b4b430604913
|
6a7d033bfd687ad2a0d79ac6a7f50ace1625f631
|
refs/heads/master
| 2023-03-12T00:23:50.913239
| 2021-02-25T03:38:11
| 2021-02-25T03:38:11
| 306,699,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
import heapq as h
class Solution:
def eatenApples(self, apples: [int], days: [int]) -> int:
pQueue = []
dateToday = 0
numEaten = 0
for i in range(len(days)):
h.heappush(pQueue, [days[dateToday]+dateToday, apples[dateToday]])
foundStoredApple = False
while pQueue and not foundStoredApple:
thingsToEat = h.heappop(pQueue)
if thingsToEat[0] > dateToday and thingsToEat[1] > 0:
foundStoredApple = True
if foundStoredApple:
numEaten += 1
thingsToEat[1] -= 1
if thingsToEat[0] > dateToday and thingsToEat[1] > 0:
h.heappush(pQueue, thingsToEat)
dateToday += 1
while pQueue:
thingsToEat = h.heappop(pQueue)
if thingsToEat[0] > dateToday and thingsToEat[1] > 0:
numEaten += 1
thingsToEat[1] -= 1
else:
continue
h.heappush(pQueue, thingsToEat)
dateToday += 1
return numEaten
|
[
"pyj4104@hotmail.com"
] |
pyj4104@hotmail.com
|
d16ae67b41b1528bb0116d6a8e0870f587fefd41
|
89e4c3dd91ceb3a4a5e74cfaedbb795152ebd1f9
|
/lc105_bt.py
|
dd379a2e68cceb2a57826262c2634dc06cff9c75
|
[] |
no_license
|
Mela2014/lc_punch
|
a230af2c9d40b1af4932c800e72698de5b77d61a
|
498308e6a065af444a1d5570341231e4c51dfa3f
|
refs/heads/main
| 2023-07-13T03:44:56.963033
| 2021-08-25T05:44:40
| 2021-08-25T05:44:40
| 313,742,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
if not preorder: return None
root_val = preorder[0]
root = TreeNode(root_val)
idx = inorder.index(root_val)
root.left = self.buildTree(preorder[1:idx+1], inorder[:idx])
root.right = self.buildTree(preorder[idx+1:], inorder[idx+1:])
return root
|
[
"noreply@github.com"
] |
Mela2014.noreply@github.com
|
5cd707b6becf1983598806138c1b602763026b7a
|
1f5f8f95530003c6c66419519d78cb52d21f65c0
|
/projects/golem_api/tests/users/edit_user.py
|
8ede3ba2a8db79addec2b7d17f38ee7b733cf6f1
|
[] |
no_license
|
golemhq/golem-tests
|
c5d3ab04b1ea3755d8b812229feb60f513d039ac
|
dff8fd3a606c3d1ef8667aece6fddef8ac441230
|
refs/heads/master
| 2023-08-17T23:05:26.286718
| 2021-10-04T20:34:17
| 2021-10-04T20:34:17
| 105,579,436
| 4
| 1
| null | 2018-11-19T00:14:24
| 2017-10-02T20:05:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,058
|
py
|
from golem import actions
from projects.golem_api.pages import users
def test_edit_user(data):
username = actions.random_str()
users.create_new_user(username, '123456', 'test@test.com')
new_username = actions.random_str()
new_email = 'test2@test.com'
new_project_permissions = [{'project': "projectname", 'permission': "admin"}]
response = users.edit_user(username, new_username, new_email, False, new_project_permissions)
assert response.status_code == 200
response = users.get_user(new_username)
assert response.json()['username'] == new_username
assert response.json()['email'] == new_email
assert response.json()['is_superuser'] is False
assert response.json()['projects'] == {'projectname': 'admin'}
def test_edit_user_convert_to_superuser(data):
username = actions.random_str()
users.create_new_user(username, '123456', 'test@test.com')
response = users.get_user(username)
assert response.json()['is_superuser'] is False
users.edit_user(username, new_is_superuser=True)
response = users.get_user(username)
assert response.json()['is_superuser'] is True
def test_edit_user_invalid_email(data):
username = actions.random_str()
users.create_new_user(username, '123456', 'test@test.com')
invalid_email = 'test@test'
response = users.edit_user(username, new_email=invalid_email)
assert response.status_code == 200
assert response.json() == ['{} is not a valid email address'.format(invalid_email)]
def test_edit_user_existing_username(data):
username1 = actions.random_str()
username2 = actions.random_str()
users.create_new_user(username1, '123456')
users.create_new_user(username2, '123456')
response = users.edit_user(username1, new_username=username2)
assert response.status_code == 200
assert response.json() == ['Username {} already exists'.format(username2)]
def test_edit_user_blank_username(data):
username = actions.random_str()
users.create_new_user(username, '123456', 'test@test.com')
response = users.edit_user(username, new_username='')
assert response.status_code == 200
assert response.json() == ['Username cannot be blank']
def test_edit_user_doesnt_exist(data):
username = actions.random_str()
response = users.edit_user(username, new_username=actions.random_str())
assert response.status_code == 200
assert response.json() == ['Username {} does not exist'.format(username)]
def test(data):
username = actions.random_str()
users.create_new_user(username, '123456', 'test@test.com')
new_username = actions.random_str()
users.edit_user(username, new_username=new_username)
response = users.get_user(new_username)
assert response.json()['username'] == new_username
assert response.json()['email'] == 'test@test.com'
users.edit_user(new_username, new_email='test2@test.com')
response = users.get_user(new_username)
assert response.json()['username'] == new_username
assert response.json()['email'] == 'test2@test.com'
|
[
"luciano@lucianorenzi.com"
] |
luciano@lucianorenzi.com
|
6c942b2bf53f2c5dd3278b4989aed9c2f3790bae
|
c6ec292a52ea54499a35a7ec7bc042a9fd56b1aa
|
/Python/1396.py
|
42df2b39f50cb44cdc466e7f32d62dd8cd8ccc59
|
[] |
no_license
|
arnabs542/Leetcode-38
|
ad585353d569d863613e90edb82ea80097e9ca6c
|
b75b06fa1551f5e4d8a559ef64e1ac29db79c083
|
refs/heads/master
| 2023-02-01T01:18:45.851097
| 2020-12-19T03:46:26
| 2020-12-19T03:46:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
class UndergroundSystem:
def __init__(self):
self.history = collections.defaultdict(dict)
self.idToStation = dict()
self.stationTostation = collections.defaultdict(dict)
def checkIn(self, id: int, stationName: str, t: int) -> None:
self.history[stationName][id] = t
self.idToStation[id] = stationName
def checkOut(self, id: int, stationName: str, t: int) -> None:
startStation = self.idToStation[id]
startTime = self.history[startStation][id]
self.stationTostation[startStation][stationName] = self.stationTostation[startStation].get(stationName, [])
self.stationTostation[startStation][stationName].append(t - startTime)
def getAverageTime(self, startStation: str, endStation: str) -> float:
return mean(self.stationTostation[startStation][endStation])
# Your UndergroundSystem object will be instantiated and called as such:
# obj = UndergroundSystem()
# obj.checkIn(id,stationName,t)
# obj.checkOut(id,stationName,t)
# param_3 = obj.getAverageTime(startStation,endStation)
|
[
"lo_vegood@126.com"
] |
lo_vegood@126.com
|
af8b45eb284b3530ad3e6119002d939b6b2c6eed
|
d87243c4f3bdd058115846b267964a8b513457a5
|
/shortstories/migrations/0001_initial.py
|
cda48a52ee47a215154c75720299e5dfa2e06f0f
|
[
"MIT"
] |
permissive
|
evenset/ketabdan-project
|
33678b1afafe3cd0f969f624e4aabac10fae718b
|
ea56ad18f64b35714c6c3a0d85e59a3f8514057a
|
refs/heads/develop
| 2021-07-26T16:29:24.011024
| 2018-09-24T23:20:10
| 2018-09-24T23:20:10
| 125,778,476
| 4
| 0
|
MIT
| 2018-09-28T04:00:46
| 2018-03-18T23:49:39
|
Python
|
UTF-8
|
Python
| false
| false
| 946
|
py
|
# Generated by Django 2.0.3 on 2018-07-13 01:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ShortStory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=500)),
('status', models.CharField(choices=[('dr', 'Draft'), ('p', 'Published'), ('b', 'Banned'), ('de', 'Deleted')], max_length=1)),
('publication_date', models.DateField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"you@example.com"
] |
you@example.com
|
82a1d4a6d1260f47e4cd6b966110c9fd65ca757c
|
1fa6c2650c791e35feaf57b87e832613e98797dd
|
/LeetCode/DS - Heap/M K Closest Points to Origin.py
|
2659259e57c56c802f5c7355b47c50a2a063b30e
|
[] |
no_license
|
hz336/Algorithm
|
415a37313a068478225ca9dd1f6d85656630f09a
|
0d2d956d498742820ab39e1afe965425bfc8188f
|
refs/heads/master
| 2021-06-17T05:24:17.030402
| 2021-04-18T20:42:37
| 2021-04-18T20:42:37
| 194,006,383
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,591
|
py
|
"""
We have a list of points on the plane. Find the K closest points to the origin (0, 0).
(Here, the distance between two points on a plane is the Euclidean distance.)
You may return the answer in any order. The answer is guaranteed to be unique (except for the order that it is in.)
Example 1:
Input: points = [[1,3],[-2,2]], K = 1
Output: [[-2,2]]
Explanation:
The distance between (1, 3) and the origin is sqrt(10).
The distance between (-2, 2) and the origin is sqrt(8).
Since sqrt(8) < sqrt(10), (-2, 2) is closer to the origin.
We only want the closest K = 1 points from the origin, so the answer is just [[-2,2]].
Example 2:
Input: points = [[3,3],[5,-1],[-2,4]], K = 2
Output: [[3,3],[-2,4]]
(The answer [[-2,4],[3,3]] would also be accepted.)
"""
"""
Quick Select
Time Complexity: O(n)
Space Complexity: O(1)
"""
class Solution:
def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:
vectors = [(p[0] ** 2 + p[1] ** 2, p[0], p[1]) for p in points]
self.quick_select_with_target(vectors, 0, len(vectors) - 1, K)
return [[x, y] for _, x, y in vectors[:K]]
def quick_select_with_target(self, vectors, start, end, target):
if start >= end:
return start
left, right = start, end
pivot = vectors[(start + end) // 2]
while left <= right:
while left <= right and vectors[left] < pivot:
left += 1
while left <= right and vectors[right] > pivot:
right -= 1
if left <= right:
vectors[left], vectors[right] = vectors[right], vectors[left]
left += 1
right -= 1
if target - 1 <= right:
return self.quick_select_with_target(vectors, start, right, target)
if target - 1 >= left:
return self.quick_select_with_target(vectors, left, end, target)
return target
"""
Priority Queue
Time Complexity: O(nlogk)
Space Complexity: O(k)
"""
import heapq
class Solution:
def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:
heap = []
for point in points:
distance = self.dist(point, [0, 0])
heapq.heappush(heap, [-distance, point[0], point[1]])
if len(heap) > K:
heapq.heappop(heap)
heap.sort(key=lambda x: (-x[0], x[1], x[2]))
return [[x, y] for _, x, y in heap]
def dist(self, a, b):
if a is None or b is None:
return float('-inf')
return (a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2
|
[
"hz336@cornell.edu"
] |
hz336@cornell.edu
|
63f331aa35178b96b3b7a5bff53d76affbd12d84
|
687a57837c2ce1ec366ce05d1a3a3a113552137e
|
/src/neurounits/unit_term_parsing/__init__.py
|
a794123e054901c0ba4e9d7778810a3f939fb2b4
|
[] |
no_license
|
mikehulluk/NeuroUnits
|
ba9974897b2a1807010fdcd141eac7503ba09766
|
ee59a8f7dcce382cb28a0f87b56952e0b7c59f17
|
refs/heads/master
| 2020-04-05T08:07:13.422241
| 2013-07-29T09:06:06
| 2013-07-29T09:06:06
| 2,848,923
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,555
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -------------------------------------------------------------------------------
from .unitterm_parsing import parse_term
|
[
"mikehulluk@googlemail.com"
] |
mikehulluk@googlemail.com
|
43c114108be58675f3315ffc4f23538067730145
|
15581a76b36eab6062e71d4e5641cdfaf768b697
|
/LeetCode_30days_challenge/2021/August/Set Matrix Zeroes.py
|
51c881a0fe2ad8b202fa896fd86dffc76c4635b2
|
[] |
no_license
|
MarianDanaila/Competitive-Programming
|
dd61298cc02ca3556ebc3394e8d635b57f58b4d2
|
3c5a662e931a5aa1934fba74b249bce65a5d75e2
|
refs/heads/master
| 2023-05-25T20:03:18.468713
| 2023-05-16T21:45:08
| 2023-05-16T21:45:08
| 254,296,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,805
|
py
|
from typing import List
# Approach 1 with O(M + N) extra memory
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
rows = set()
cols = set()
n = len(matrix)
m = len(matrix[0])
for i in range(n):
for j in range(m):
if matrix[i][j] == 0:
rows.add(i)
cols.add(j)
for row in rows:
for col in range(m):
matrix[row][col] = 0
for col in cols:
for row in range(n):
matrix[row][col] = 0
# Approach 2 with O(1) extra memory
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
n = len(matrix)
m = len(matrix[0])
first_row = first_col = False
for i in range(n):
for j in range(m):
if matrix[i][j] == 0:
if i == 0:
first_row = True
if j == 0:
first_col = True
matrix[0][j] = matrix[i][0] = 2 ** 31
for row in range(1, n):
if matrix[row][0] == 2 ** 31:
for col in range(m):
matrix[row][col] = 0
for col in range(1, m):
if matrix[0][col] == 2 ** 31:
for row in range(n):
matrix[row][col] = 0
if matrix[0][0] == 2 ** 31:
if first_row:
for col in range(m):
matrix[0][col] = 0
if first_col:
for row in range(n):
matrix[row][0] = 0
|
[
"mariandanaila01@gmail.com"
] |
mariandanaila01@gmail.com
|
c6672d7dd3e2446b3f16cf09954a42762c8fceef
|
88b7c57a0d9a7a3b28ebd9d6c12ecbbebc50e8a5
|
/config/settings/dev.py
|
fb7a3dc769830e8f5120adf3b2bf7efddccd22d8
|
[] |
no_license
|
largerbigsuper/beep
|
71438a4c2feae1afd6ecd25899e95f441bf2165b
|
a5d84437d79f065cec168f68210c4344a60d08d1
|
refs/heads/master
| 2022-09-23T02:09:37.117676
| 2020-01-03T06:21:57
| 2020-01-03T06:21:57
| 209,052,138
| 0
| 0
| null | 2022-09-13T23:03:25
| 2019-09-17T12:47:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,456
|
py
|
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="1ReGaeINNTOIuHNczpQnKUf51jXoc7ZbELmcmgEJM5cun2L31vbVXfrQKPVimrLN",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
# DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': env('DB_NAME'),
'USER': env('DB_USER'),
'PASSWORD':env('DB_PASSWORD'),
'HOST': env('DB_HOST'),
'PORT': env('DB_PORT'),
'ATOMIC_REQUESTS': True,
'CONN_MAX_AGE': 10,
'OPTIONS': {
'init_command': 'SET CHARACTER SET utf8mb4',
'charset': 'utf8mb4',
}
}
}
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://redis:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
SESSION_COOKIE_AGE = 365 * 24 * 60 * 60
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
# INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
# if env("USE_DOCKER") == "yes":
# import socket
# hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
# INTERNAL_IPS += [ip[:-1] + "1" for ip in ips]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = False
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
CSRF_TRUSTED_ORIGINS = ['servicewechat.com', 'lhxq.top', 'beep.lhxq.top', 'test.beepcrypto.com', '127.0.0.1', '127.0.0.1:8080', '127.0.0.1:7788', '192.168.0.102:7788']
# 小程序
MINI_PRAGRAM_APP_ID = 'wx300f2f1d32b30613'
MINI_PRAGRAM_APP_SECRET = '2d6b9fef49827381af8dd26b4b66f5e5'
MINI_PRAGRAM_LOGIN_URL = 'https://api.weixin.qq.com/sns/jscode2session?appid={}&secret={}&grant_type=authorization_code&js_code='.format(MINI_PRAGRAM_APP_ID, MINI_PRAGRAM_APP_SECRET)
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": ['redis://redis:6379/0'],
},
},
}
CELERY_BROKER_URL = 'redis://redis:6379/2' # Broker配置,使用Redis作为消息中间件
CELERY_RESULT_BACKEND = 'redis://redis:6379/2' # BACKEND配置,这里使用redis
CELERY_RESULT_SERIALIZER = 'json' # 结果序列化方案
CELERY_TIMEZONE = "Asia/Shanghai"
CELERY_ENABLE_UTC = False
|
[
"zaihuazhao@163.com"
] |
zaihuazhao@163.com
|
62acac9f1ebfd24d7c48ad282623209de9fb6711
|
2626f6e6803c8c4341d01f57228a0fe117e3680b
|
/students/MikeShand/Lesson 04/json_save/test/test_savables.py
|
3121cade9de7f296765a3f5f547a6ab833befd8f
|
[] |
no_license
|
kmsnyde/SP_Online_Course2_2018
|
9e59362da253cdec558e1c2f39221c174d6216f3
|
7fe8635b47d4792a8575e589797260ad0a2b027e
|
refs/heads/master
| 2020-03-19T17:15:03.945523
| 2018-09-05T22:28:55
| 2018-09-05T22:28:55
| 136,750,231
| 0
| 0
| null | 2018-06-09T19:01:52
| 2018-06-09T19:01:51
| null |
UTF-8
|
Python
| false
| false
| 2,565
|
py
|
#!/usr/bin/env python
"""
tests for the savable objects
"""
import pytest
import json
from json_save.saveables import *
# The simple, almost json <-> python ones:
# Type, default, example
basics = [(String, "This is a string"),
(Int, 23),
(Float, 3.1458),
(Bool, True),
(Bool, False),
(List, [2, 3, 4]),
(Tuple, (1, 2, 3.4, "this")),
(List, [[1, 2, 3], [4, 5, 6]]),
(List, [{"3": 34}, {"4": 5}]), # list with dicts in it.
(Dict, {"this": {"3": 34}, "that": {"4": 5}}) # dict with dicts
]
@pytest.mark.parametrize(('Type', 'val'), basics)
def test_basics(Type, val):
js = json.dumps(Type.to_json_compat(val))
val2 = Type.to_python(json.loads(js))
assert val == val2
assert type(val) == type(val2)
nested = [(List, [(1, 2), (3, 4), (5, 6)]), # tuple in list
(Tuple, ((1, 2), (3, 4), (5, 6))), # tuple in tuple
]
# This maybe should be fixed in the future??
@pytest.mark.xfail(reason="nested not-standard types not supported")
@pytest.mark.parametrize(('Type', 'val'), nested)
def test_nested(Type, val):
print("original value:", val)
js = json.dumps(Type.to_json_compat(val))
print("js is:", js)
val2 = Type.to_python(json.loads(js))
print("new value is:", val2)
assert val == val2
assert type(val) == type(val2)
dicts = [{"this": 14, "that": 1.23},
{34: 15, 23: 5},
{3.4: "float_key", 1.2: "float_key"},
{(1, 2, 3): "tuple_key"},
{(3, 4, 5): "tuple_int", ("this", "that"): "tuple_str"},
{4: "int_key", 1.23: "float_key", (1, 2, 3): "tuple_key"},
]
@pytest.mark.parametrize('val', dicts)
def test_dicts(val):
js = json.dumps(Dict.to_json_compat(val))
val2 = Dict.to_python(json.loads(js))
assert val == val2
assert type(val) == type(val2)
# check that the types of the keys is the same
for k1, k2 in zip(val.keys(), val2.keys()):
assert type(k1) is type(k2)
# These are dicts that can't be saved
# -- mixing string and non-string keys
bad_dicts = [{"this": "string_key", 4: "int_key"},
{3: "int_key", "this": "string_key"},
{None: "none_key", "this": "string_key"},
{"this": "string_key", None: "none_key"},
]
@pytest.mark.parametrize("val", bad_dicts)
def test_bad_dicts(val):
with pytest.raises(TypeError):
Dict.to_json_compat(val)
|
[
"kmsnyder2@verizon.net"
] |
kmsnyder2@verizon.net
|
2d670cf46ab518d12618a5c7cd214f15721b1946
|
1afec7d1d3099138b5afe5fd73dfd3d24ff4eb15
|
/test/functional/feature_minchainwork.py
|
bf9177d0a7c94fd1dfa263477971b818dbe15ed0
|
[
"MIT"
] |
permissive
|
republic-productions/finalcoin
|
5c7c6b0734178fe22db63f0946ec555f59e8d0eb
|
7c0f335ded1e5c662034c822ca2c474b8e62778f
|
refs/heads/main
| 2023-09-04T17:04:32.683667
| 2021-10-14T17:45:22
| 2021-10-14T17:45:22
| 417,209,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,109
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Finalcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import FinalcoinTestFramework
from test_framework.util import assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(FinalcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
self.connect_nodes(i+1, i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info(f"Testing relay across node 1 (minChainWork = {self.node_min_work[1]})")
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info(f"Generating {num_blocks_to_generate} blocks on node0")
hashes = self.generatetoaddress(self.nodes[0], num_blocks_to_generate,
self.nodes[0].get_deterministic_priv_key().address)
self.log.info(f"Node0 current chain work: {self.nodes[0].getblockheader(hashes[-1])['chainwork']}")
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info(f"Blockcounts: {[n.getblockcount() for n in self.nodes]}")
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.generatetoaddress(self.nodes[0], 1, self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info(f"Blockcounts: {[n.getblockcount() for n in self.nodes]}")
if __name__ == '__main__':
MinimumChainWorkTest().main()
|
[
"republicproductions@protonmail.com"
] |
republicproductions@protonmail.com
|
0283e43ce1b3b31585f53812085b759b79811cc6
|
5182897b2f107f4fd919af59c6762d66c9be5f1d
|
/.history/src/Individuo_20200710164705.py
|
ffc91aee5f741018ab08a59df5c8052c8ae54c56
|
[
"MIT"
] |
permissive
|
eduardodut/Trabalho_final_estatistica_cd
|
422b7e702f96291f522bcc68d2e961d80d328c14
|
fbedbbea6bdd7a79e1d62030cde0fab4e93fc338
|
refs/heads/master
| 2022-11-23T03:14:05.493054
| 2020-07-16T23:49:26
| 2020-07-16T23:49:26
| 277,867,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,332
|
py
|
import random
class Individuo():
SADIO = 0
INFECTADO_TIPO_1 = 1 #assintomáticos e o infectado inicial
INFECTADO_TIPO_2 = 2 #sintomático
CURADO = 3
MORTO = 4
def __init__(
self,
status,
atualizacoes_cura,
posicao):
self.status = status
self.atualizacoes_cura = atualizacoes_cura
self.posicao = posicao
def __repr__(self):
return string(self.status)
def infectar(self, chance_infeccao, chance_infeccao_tipo2):
saida = Individuo.SADIO
if (self.status == Individuo.INFECTADO_TIPO_2 or self.status == Individuo.INFECTADO_TIPO_1):
#número aleatório para chance de infectar o vizinho
rng_infeccao = random.random()
if rng_infeccao <= chance_infeccao:
#número aleatório para chance de infecção tipo 1 ou 2
rng_infeccao_tipo2 = random.random()
if rng_infeccao_tipo2 <= chance_infeccao_tipo2:
saida = Individuo.INFECTADO_TIPO_2
else:
saida = Individuo.INFECTADO_TIPO_1
return saida
def checagem_morte(self, chance_morte):
if self.status == Individuo.INFECTADO_TIPO_2:
rng_morte = random.random()
if rng_morte <= chance_morte:
self.status = Individuo.MORTO
return self.status
return self.checagem_cura()
def checagem_cura(self):
if self.status == Individuo.INFECTADO_TIPO_2 or self.status == Individuo.INFECTADO_TIPO_1:
self.atualizacoes_cura = self.atualizacoes_cura - 1
if self.atualizacoes_cura == 0:
self.status = Individuo.CURADO
return self.status
class Fabrica_individuo():
def __init__(
self,
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.atualizacoes_cura = atualizacoes_cura
def criar_individuo(self, status_inicial, posicao):
return Individuo(
status_inicial,
self.atualizacoes_cura,
posicao)
|
[
"eduardo_dut@edu.unifor.br"
] |
eduardo_dut@edu.unifor.br
|
d29f521a654b15c312751d8f72d1ec6c1fa0ff3d
|
d0081f81996635e913b1f267a4586eb0bfd3dcd5
|
/tests/unit/dataactvalidator/test_fabsreq4.py
|
489d9acbfbffa7cd67a5f92be51975de4852decc
|
[
"CC0-1.0"
] |
permissive
|
fedspendingtransparency/data-act-broker-backend
|
71c10a6c7c284c8fa6556ccc0efce798870b059b
|
b12c73976fd7eb5728eda90e56e053759c733c35
|
refs/heads/master
| 2023-09-01T07:41:35.449877
| 2023-08-29T20:14:45
| 2023-08-29T20:14:45
| 57,313,310
| 55
| 36
|
CC0-1.0
| 2023-09-13T16:40:58
| 2016-04-28T15:39:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,561
|
py
|
from tests.unit.dataactcore.factories.staging import FABSFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabsreq4'
def test_column_headers(database):
expected_subset = {'row_number', 'business_funds_indicator', 'correction_delete_indicatr',
'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test BusinessFundsIndicator is required for all submissions except delete records. """
fabs = FABSFactory(correction_delete_indicatr='C', business_funds_indicator='REC')
fabs_2 = FABSFactory(correction_delete_indicatr='', business_funds_indicator='NON')
# Test ignoring for D records
fabs_3 = FABSFactory(correction_delete_indicatr='d', business_funds_indicator=None)
fabs_4 = FABSFactory(correction_delete_indicatr='D', business_funds_indicator='')
fabs_5 = FABSFactory(correction_delete_indicatr='D', business_funds_indicator='RE')
errors = number_of_errors(_FILE, database, models=[fabs, fabs_2, fabs_3, fabs_4, fabs_5])
assert errors == 0
def test_failure(database):
""" Test fail BusinessFundsIndicator is required for all submissions except delete records. """
fabs = FABSFactory(correction_delete_indicatr='c', business_funds_indicator=None)
fabs_2 = FABSFactory(correction_delete_indicatr=None, business_funds_indicator='')
errors = number_of_errors(_FILE, database, models=[fabs, fabs_2])
assert errors == 2
|
[
"Burdeyny_Alisa@bah.com"
] |
Burdeyny_Alisa@bah.com
|
c3b8845978fac8cfa735af881c0a55ce00ccf926
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_326/ch6_2020_03_09_19_29_35_692924.py
|
62c78804fbf0b43e0866b1d8788774568a3b42ba
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
def celsius_para_fahrenheit(celsius):
temperatura_equivalente_em_F = (celsius * 9/5) + 32
return temperatura_equivalente_em_F
|
[
"you@example.com"
] |
you@example.com
|
167189898c959abc7ed28e564880ee1069d227f1
|
2d4380518d9c591b6b6c09ea51e28a34381fc80c
|
/CIM16/CDPSM/Balanced/IEC61970/LoadModel/__init__.py
|
9ee7be64a7b50687c12f23c691687acf992d4b74
|
[
"MIT"
] |
permissive
|
fran-jo/PyCIM
|
355e36ae14d1b64b01e752c5acd5395bf88cd949
|
de942633d966bdf2bd76d680ecb20517fc873281
|
refs/heads/master
| 2021-01-20T03:00:41.186556
| 2017-09-19T14:15:33
| 2017-09-19T14:15:33
| 89,480,767
| 0
| 1
| null | 2017-04-26T12:57:44
| 2017-04-26T12:57:44
| null |
UTF-8
|
Python
| false
| false
| 1,650
|
py
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""This package is responsible for modeling the energy consumers and the system load as curves and associated curve data. Special circumstances that may affect the load, such as seasons and daytypes, are also included here. This information is used by Load Forecasting and Load Management.
"""
from CIM16.CDPSM.Balanced.IEC61970.LoadModel.LoadResponseCharacteristic import LoadResponseCharacteristic
nsURI = "http://iec.ch/TC57/2010/CIM-schema-cim15?profile=http://iec.ch/TC57/2011/iec61968-13/CDPSM/Balanced#LoadModel"
nsPrefix = "cimLoadModel"
|
[
"fran_jo@hotmail.com"
] |
fran_jo@hotmail.com
|
8461e2f548998a35f94100eb6fdd0f429b1d5ab8
|
c68268657c1a94c09271a124b200b0aeb85bb05e
|
/angulardjangorest/angular/views.py
|
2fc517fe4100318c5e1a5d13e2a29905c476cc33
|
[] |
no_license
|
photonkhan/angulardjangorest
|
146960801c8fdab924c4012271075a04c1379d91
|
3357066ab094ae152b138a506f3e2d41588ecf68
|
refs/heads/master
| 2022-12-12T02:09:56.248353
| 2018-07-25T13:12:39
| 2018-07-25T13:12:39
| 142,123,874
| 0
| 0
| null | 2022-11-17T05:58:28
| 2018-07-24T07:49:54
|
HTML
|
UTF-8
|
Python
| false
| false
| 278
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
# Create your views here.
def index(request):
context = {
'header' : 'Angular with Djano Rest API'
}
return render(request, 'angular/index.html', context)
|
[
"you@example.com"
] |
you@example.com
|
ff6750998ace4ef5d00078ea55ba213c8bdec0e3
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-bcs/huaweicloudsdkbcs/v2/model/dimension.py
|
cd981259ff98407818c27a6f0bc3680ff3fc3da4
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,440
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class Dimension:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'value': 'str'
}
attribute_map = {
'name': 'name',
'value': 'value'
}
def __init__(self, name=None, value=None):
"""Dimension - a model defined in huaweicloud sdk"""
self._name = None
self._value = None
self.discriminator = None
if name is not None:
self.name = name
if value is not None:
self.value = value
@property
def name(self):
"""Gets the name of this Dimension.
维度名称。
:return: The name of this Dimension.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Dimension.
维度名称。
:param name: The name of this Dimension.
:type: str
"""
self._name = name
@property
def value(self):
"""Gets the value of this Dimension.
维度取值。
:return: The value of this Dimension.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this Dimension.
维度取值。
:param value: The value of this Dimension.
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Dimension):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
c728ae87bedf61ae87a3b9715df359479018fde4
|
a08cbd5e9b4e4a037deaaae1749ed4dc55c79661
|
/test/IECoreMaya/ObjectDataTest.py
|
b61ac51f2f90642c99a252ee1471e27d8838e253
|
[] |
no_license
|
victorvfx/cortex
|
46385788b12dae375c1a5ade26d8f403d2dbccff
|
deb23599c8c69eac5671e59fe1a8ca0d5e943a36
|
refs/heads/master
| 2021-01-16T23:11:39.139147
| 2017-06-23T12:39:41
| 2017-06-23T12:39:41
| 95,709,763
| 1
| 0
| null | 2017-06-28T20:40:12
| 2017-06-28T20:40:12
| null |
UTF-8
|
Python
| false
| false
| 3,927
|
py
|
##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import maya.cmds
import maya.OpenMaya
import IECore
import IECoreMaya
class ObjectDataTest( IECoreMaya.TestCase ) :
def setUp( self ) :
IECoreMaya.TestCase.setUp( self )
if not maya.cmds.pluginInfo( "ObjectDataTestNode.py", query=True, loaded=True ) :
maya.cmds.loadPlugin( "ObjectDataTestNode.py" )
def testReadWrite( self ) :
node = maya.cmds.createNode( "ieObjectDataTestNode" )
compoundData = IECore.CompoundData( {
"val1" : IECore.FloatData( 1 ),
"val2" : IECore.StringData( "val2Data" ),
"val3" : {
"val3.val1" : IECore.IntData( 100 ),
},
} )
IECoreMaya.ToMayaPlugConverter.create( compoundData ).convert( node + ".objectData" )
plugValue = IECoreMaya.FromMayaPlugConverter.create( node + ".objectData" ).convert()
self.assertEqual( plugValue, compoundData )
# try saving and loading an ascii file
maya.cmds.file( rename = os.getcwd() + "/test/IECoreMaya/objectDataTest.ma" )
sceneFileName = maya.cmds.file( force = True, type = "mayaAscii", save = True )
maya.cmds.file( new=True, force=True )
maya.cmds.file( sceneFileName, force=True, open=True )
loadedCompoundData = IECoreMaya.FromMayaPlugConverter.create( node + ".objectData" ).convert()
self.assertEqual( loadedCompoundData, compoundData )
# try saving and loading a binary file
maya.cmds.file( rename = os.getcwd() + "/test/IECoreMaya/objectDataTest.mb" )
sceneFileName = maya.cmds.file( force = True, type = "mayaBinary", save = True )
maya.cmds.file( new=True, force=True )
maya.cmds.file( sceneFileName, force=True, open=True )
loadedCompoundData = IECoreMaya.FromMayaPlugConverter.create( node + ".objectData" ).convert()
self.assertEqual( loadedCompoundData, compoundData )
def tearDown( self ) :
maya.cmds.file( new = True, force = True )
maya.cmds.flushUndo()
maya.cmds.unloadPlugin( "ObjectDataTestNode.py" )
for f in [
"./test/IECoreMaya/objectDataTest.ma",
"./test/IECoreMaya/objectDataTest.mb",
] :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
IECoreMaya.TestProgram( plugins = [ "ieCore" ] )
|
[
"john@image-engine.com"
] |
john@image-engine.com
|
2175f0c6c75b4f6669609f7a09d8ddb6bacc229e
|
93bf4bbafe0524335ea1216f7f2941348c2cd1bd
|
/tensorflow/python/kernel_tests/pad_op_test.py
|
1597a8c947eb82da5cb47f2e9aac7d6a2967bbbf
|
[
"Apache-2.0"
] |
permissive
|
sachinpro/sachinpro.github.io
|
c4951734b09588cad58711a76fe657f110163c11
|
c3bbd8d89818f5d8bb7296c851ed5e52c19728e3
|
refs/heads/master
| 2022-12-23T10:00:13.902459
| 2016-06-27T13:18:27
| 2016-06-27T13:25:58
| 25,289,839
| 1
| 1
|
Apache-2.0
| 2022-12-15T00:45:03
| 2014-10-16T06:44:30
|
C++
|
UTF-8
|
Python
| false
| false
| 6,742
|
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.nn_ops.Pad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class PadOpTest(tf.test.TestCase):
def _npPad(self, inp, paddings, mode):
return np.pad(inp, paddings, mode=mode.lower())
def testNpPad(self):
self.assertAllEqual(
np.array([[0, 0, 0, 0, 0, 0],
[0, 3, 3, 0, 0, 0],
[0, 4, 4, 0, 0, 0],
[0, 5, 5, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]),
self._npPad(
np.array([[3, 3], [4, 4], [5, 5]]),
[[1, 2], [1, 3]],
mode="constant"))
self.assertAllEqual(
np.array([[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0],
[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="reflect"))
self.assertAllEqual(
np.array([[0, 0, 1, 2, 2, 1],
[0, 0, 1, 2, 2, 1],
[3, 3, 4, 9, 9, 4],
[3, 3, 4, 9, 9, 4]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="symmetric"))
def _testPad(self, np_inputs, paddings, mode, use_gpu=False):
np_val = self._npPad(np_inputs, paddings, mode=mode)
with self.test_session(use_gpu=use_gpu):
tf_val = tf.pad(np_inputs, paddings, mode=mode)
out = tf_val.eval()
self.assertAllEqual(np_val, out)
self.assertShapeEqual(np_val, tf_val)
def _testGradient(self, x, a, mode):
with self.test_session():
inx = tf.convert_to_tensor(x)
xs = list(x.shape)
ina = tf.convert_to_tensor(a)
y = tf.pad(inx, ina, mode=mode)
# Expected y's shape to be:
ys = list(np.array(x.shape) + np.sum(np.array(a), axis=1))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
xs,
y,
ys,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_inputs, paddings):
for mode in ("CONSTANT", "REFLECT", "SYMMETRIC"):
self._testPad(np_inputs, paddings, mode=mode, use_gpu=False)
self._testPad(np_inputs, paddings, mode=mode, use_gpu=True)
if np_inputs.dtype == np.float32:
self._testGradient(np_inputs, paddings, mode=mode)
def testInputDims(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2, 1, 1, 1, 1]),
tf.reshape([1, 2], shape=[1, 2]))
def testPaddingsDim(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[2]))
def testPaddingsDim2(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[2, 1]))
def testPaddingsDim3(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[1, 2]))
def testPaddingsDim4(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2, 3, 4, 5, 6], shape=[3, 2]))
def testPaddingsNonNegative(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
tf.pad(
tf.constant([1], shape=[1]),
tf.constant([-1, 0], shape=[1, 2]))
def testPaddingsNonNegative2(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
tf.pad(
tf.constant([1], shape=[1]),
tf.constant([-1, 0], shape=[1, 2]))
def testPaddingsMaximum(self):
with self.test_session():
with self.assertRaises(Exception):
tf.pad(
tf.constant([1], shape=[2]),
tf.constant([2, 0], shape=[1, 2]),
mode="REFLECT").eval()
with self.assertRaises(Exception):
tf.pad(
tf.constant([1], shape=[2]),
tf.constant([0, 3], shape=[1, 2]),
mode="SYMMETRIC").eval()
def testIntTypes(self):
# TODO(touts): Figure out why the padding tests do not work on GPU
# for int types and rank > 2.
for t in [np.int32, np.int64]:
self._testAll((np.random.rand(4, 4, 3) * 100).astype(t),
[[1, 0], [2, 3], [0, 2]])
def testFloatTypes(self):
for t in [np.float32, np.float64, np.complex64]:
self._testAll(np.random.rand(2, 5).astype(t),
[[1, 0], [2, 0]])
def testShapeFunctionEdgeCases(self):
# Unknown paddings shape.
inp = tf.constant(0.0, shape=[4, 4, 4, 4])
padded = tf.pad(inp, tf.placeholder(tf.int32))
self.assertEqual([None, None, None, None], padded.get_shape().as_list())
# Unknown input shape.
inp = tf.placeholder(tf.float32)
padded = tf.pad(inp, [[2, 2], [2, 2]])
self.assertEqual([None, None], padded.get_shape().as_list())
# Unknown input and paddings shape.
inp = tf.placeholder(tf.float32)
padded = tf.pad(inp, tf.placeholder(tf.int32))
self.assertAllEqual(None, padded.get_shape().ndims)
def testScalars(self):
paddings = np.zeros((0, 2), dtype=np.int32)
inp = np.asarray(7)
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
tf_val = tf.pad(inp, paddings)
out = tf_val.eval()
self.assertAllEqual(inp, out)
self.assertShapeEqual(inp, tf_val)
if __name__ == "__main__":
tf.test.main()
|
[
"x0234443@ti.com"
] |
x0234443@ti.com
|
a5f111402661dc059cae8cb061839575063d1371
|
e84feabf99ff6e15df9eeee7b7c2595853fe746d
|
/app/utils/rabbit.py
|
34a303f35e2f07bacfe9fb45fc79e5897dccb18e
|
[] |
no_license
|
Ravillatypov/freeswitch-intergration
|
f0536b3abf3982cb2051291dbe101d07e59809b8
|
cac8cc829ca0d5734a5294847bf7587758b45eb1
|
refs/heads/main
| 2023-01-23T02:06:53.070823
| 2020-10-01T17:19:33
| 2020-10-01T17:19:33
| 300,362,493
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 943
|
py
|
from uuid import uuid4
from aio_pika import Message
from app.settings import MQ_CONVERTER_QUEUE_NAME, MQ_UPLOADS_QUEUE_NAME, ENVIRONMENT
from aio_pika import RobustConnection
async def send_message(rabbit_mq: RobustConnection, routing_key: str, body: bytes):
if ENVIRONMENT == 'test':
return
async with rabbit_mq:
channel = await rabbit_mq.channel()
await channel.default_exchange.publish(
Message(body=body),
routing_key=routing_key,
)
async def need_convert(rabbit_mq: RobustConnection, call_id: uuid4, path: str):
await send_message(
rabbit_mq,
body=f'{call_id}\n{path}'.encode(),
routing_key=MQ_CONVERTER_QUEUE_NAME,
)
async def need_upload(rabbit_mq: RobustConnection, call_id: uuid4, path: str):
await send_message(
rabbit_mq,
body=f'{call_id}\n{path}'.encode(),
routing_key=MQ_UPLOADS_QUEUE_NAME,
)
|
[
"ravillatypov12@gmail.com"
] |
ravillatypov12@gmail.com
|
dc4e68ac5b189ea2e0119cedefbd33e0a5c254e5
|
0cce9a9d9b9da4a820e9ed5fc674d06f0be9810a
|
/ch10_first_exercises.py
|
61b5a7953658f4cd352dcef9cd381f374e582ead
|
[] |
no_license
|
wbroach/python_work
|
3f4a85e998805f50b2400e64c5b7cbc31780b245
|
7e1842b317539d61bab0f04d72e71db893c865ff
|
refs/heads/master
| 2020-04-14T22:06:43.164595
| 2019-10-02T01:25:00
| 2019-10-02T01:25:00
| 164,151,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
#~ file_name = 'python_can.txt'
#~ with open(file_name) as file_object:
#~ lines = file_object.readlines()
#~ learning_summary = ''
#~ for line in lines:
#~ learning_summary += line.strip() + " \n"
#~ print(learning_summary)
file_name = 'python_can.txt'
with open(file_name) as file_object:
lines = file_object.readlines()
for i in range(len(lines)):
lines[i] = lines[i].replace('Python', 'C').strip()
for line in lines:
print(line)
|
[
"someone@someplace.com"
] |
someone@someplace.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.