blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f800f8822a575845596e69e4820327b694eb29c9 | 98dffa1d08cd2f7242650bb9eeacae42f6368300 | /scripting/layering.py | 4b4a6c016cf873d9c2462f7e3f1f67ad3ae0cb1f | [] | no_license | ucll-scripting/testing-framework | 01c93c666070776e75f63f647f125ecdeb49dc91 | 68452b00d25484d48af3087486b295d9f595a000 | refs/heads/master | 2023-02-26T01:22:06.326330 | 2021-02-02T14:47:47 | 2021-02-02T14:47:47 | 335,320,727 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | from contextlib import contextmanager
from scripting.dynamic import create_dynamic_variable, dynamic_bind
from scripting.testing import observers, skip_if
@contextmanager
def _layered_observers(counter, on_pass=None, on_fail=None, on_skip=None):
observer_layer = counter.value
def wrap(f):
def observer(*args):
if counter.value == observer_layer:
f(*args)
return observer
on_pass = wrap(on_pass) if on_pass else None
on_fail = wrap(on_fail) if on_fail else None
on_skip = wrap(on_skip) if on_skip else None
with observers(on_pass=on_pass, on_fail=on_fail, on_skip=on_skip):
yield
@contextmanager
def _layered_skip_if(counter, skip_if):
observer_layer = counter.value
def wrap(f):
def wrapped(*args):
if counter.value == observer_layer:
return f(*args)
else:
return False
return wrapped
with skip_if(wrap(skip_if)):
yield
@contextmanager
def _add_layer(counter):
with dynamic_bind(counter, counter.value + 1):
yield
class _Layering:
def __init__(self):
self.__counter = create_dynamic_variable().bind(0)
def add(self):
return _add_layer(self.__counter)
def observers(self, on_pass=None, on_fail=None, on_skip=None):
return _layered_observers(self.__counter, on_pass=on_pass, on_fail=on_fail, on_skip=on_skip)
def skip_if(self, predicate):
return _layered_skip_if(self.__counter, predicate)
def create_layering():
return _Layering()
| [
"frederic.vogels@ucll.be"
] | frederic.vogels@ucll.be |
9096a6363c0b799ebf47452ce695152375b0105e | 28ddc330bbfcebf3ce7d75643d06919ebed77f5f | /pymtl3/passes/WrapGreenletPass.py | c9b5bb8eee1e267730648dec2db763dff65ae88e | [
"BSD-3-Clause"
] | permissive | hsqforfun/pymtl3 | 848d642abcf539688750f4b26e93133191a88bae | 05e06601cf262a663a95d1235cb99056ece84580 | refs/heads/master | 2020-09-01T15:15:27.891486 | 2019-10-31T23:42:59 | 2019-10-31T23:42:59 | 218,990,327 | 1 | 0 | BSD-3-Clause | 2019-11-01T13:27:04 | 2019-11-01T13:27:03 | null | UTF-8 | Python | false | false | 1,816 | py | """
========================================================================
WrapGreenletPass.py
========================================================================
Wrap all update blocks that call methods with blocking decorator with
greenlet.
Author : Shunning Jiang
Date : May 20, 2019
"""
from graphviz import Digraph
from greenlet import greenlet
from pymtl3.dsl.errors import UpblkCyclicError
from .BasePass import BasePass
from .errors import PassOrderError
class WrapGreenletPass( BasePass ):
def __call__( self, top ):
if not hasattr( top, "_dag" ):
raise PassOrderError( "_dag" )
self.wrap_greenlet( top )
def wrap_greenlet( self, top ):
all_upblks = top._dag.final_upblks
all_constraints = top._dag.all_constraints
greenlet_upblks = top._dag.greenlet_upblks
if not greenlet_upblks:
return
def wrap_greenlet( blk ):
def greenlet_wrapper():
while True:
blk()
greenlet.getcurrent().parent.switch()
gl = greenlet( greenlet_wrapper )
def greenlet_ticker():
gl.switch()
# greenlet_ticker.greenlet = gl
greenlet_ticker.__name__ = blk.__name__
return greenlet_ticker
new_upblks = set()
wrapped_blk_mapping = {}
for blk in all_upblks:
if blk in greenlet_upblks:
wrapped = wrap_greenlet( blk )
wrapped_blk_mapping[ blk ] = wrapped
new_upblks.add( wrapped )
else:
new_upblks.add( blk )
new_constraints = set()
for (x, y) in all_constraints:
if x in greenlet_upblks:
x = wrapped_blk_mapping[ x ]
if y in greenlet_upblks:
y = wrapped_blk_mapping[ y ]
new_constraints.add( (x, y) )
top._dag.final_upblks = new_upblks
top._dag.all_constraints = new_constraints
| [
"sj634@cornell.edu"
] | sj634@cornell.edu |
3959ec371c5fcdf95fc0d3374d91d8402a7200ed | 3e50ed55208122b2f8b34e7f26f33c9ef70efce5 | /python/distributed_spider/distributed_spider/spiders/add_task.py | c98dc6dadbbf75c65779f451818ce7992048148c | [] | no_license | brady-wang/mac_home | b8343da428a4e6696b89d0e6a53ff0dfc87ffd21 | c56a739c31d3c0f62d26d8512fe1a90c036a1f96 | refs/heads/master | 2023-01-14T11:42:02.544322 | 2019-10-02T11:47:27 | 2019-10-02T11:47:27 | 193,177,718 | 0 | 0 | null | 2023-01-04T13:55:31 | 2019-06-22T01:27:10 | PHP | UTF-8 | Python | false | false | 321 | py | # -*- coding: utf-8 -*-
import scrapy
import redis
url = "https://www.pexels.com/photo/aerial-photo-of-high-rise-building-754587/"
url1 = "https://www.pexels.com/photo/waterfalls-688559/"
rds = redis.StrictRedis(host='192.168.33.10',port='6379')
# res = rds.rpush('yeves:urls',url)
# res = rds.rpush('yeves:urls',url1)
| [
"brady.wang@qq.com"
] | brady.wang@qq.com |
3db0a0d16fc6a8444278c81119ea8a2c2070ac2c | 466660115eafd99b72f81339d86c5bcbf4c7efb0 | /codes/12/opencv_warpPerspective_demo.py | 8cec5184ed3d80d675d2deace8ff2ccd3a81d8d2 | [] | no_license | CoryVegan/scipybook2 | c2bb68c169c632ab389600034beb33ac921b0ba1 | a8fd295c2f2d7ee18f351e5622ca7eeb4649ee50 | refs/heads/master | 2020-03-23T14:50:28.056482 | 2017-08-25T06:00:00 | 2018-06-02T14:18:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | # -*- coding: utf-8 -*-
"""
透视变换
"""
import pyopencv as cv
import numpy as np
from enthought.traits.api import HasTraits, Array
from enthought.traits.ui.api import View, Item
class PerspectiveDemo(HasTraits):
src = Array(shape=(4,2), dtype=np.float32)
dst = Array(shape=(4,2), dtype=np.float32)
View = View(
Item("dst", label="变换后坐标"),
title = "Perspective Demo控制面板"
)
def __init__(self, **traits):
super(PerspectiveDemo, self).__init__(**traits)
self.img = cv.imread("lena.jpg")
w = self.img.size().width
h = self.img.size().height
self.src = np.array([[0,0],[w,0],[0,h],[w,h]],dtype=np.float32)
self.dst = np.array([[0,0],[w,0],[0,h],[w,h]],dtype=np.float32)
self.on_trait_change(self.redraw, "src,dst")
self.redraw()
def redraw(self):
src = cv.asvector_Point2f(self.src)
dst = cv.asvector_Point2f(self.dst)
m = cv.getPerspectiveTransform(src, dst)
print(m)
img2 = cv.Mat()
cv.warpPerspective(self.img, img2, m, self.img.size())
cv.imshow("Perspective Demo", img2)
cv.namedWindow("Perspective Demo")
demo = PerspectiveDemo()
demo.configure_traits() | [
"qytang326@gmail.com"
] | qytang326@gmail.com |
9e80f8345769e92d4e8fb3b81070349c27728a06 | 6fadc260ab5c0109adf026cb8dae8eefcf0ba271 | /第五章:requests模块高级/3.代理操作.py | 0c1773ae5889a4b22a652ac9caca78f2e2f06950 | [] | no_license | Echo-yay/Crawler_base | 03d439f5fb76d98ef881dada4fec90e24689c424 | 808b40198c887739f2721bec47c61f255d76706a | refs/heads/master | 2023-04-02T17:57:16.318607 | 2021-04-13T02:34:04 | 2021-04-13T02:34:04 | 346,982,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | # 中国矿业大学(北京)/ 机电硕-6 / ZQT2000405103 / 李天鸽
# 编辑时间:2021/3/30 11:40
#需求:
import requests
url = 'https://www.baidu.com/s?wd=IP'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'
}
page_text = requests.get(url=url,headers=headers,proxies={'https':'121.20.48.98:37859'}).text
with open('ip.html','w',encoding='utf-8') as fp:
fp.write(page_text)
#反爬机制:封IP
#反反爬机制:使用代理进行请求发送
| [
"1740636835@qq.com"
] | 1740636835@qq.com |
3051b5bf40691b588759dbb19716f1297f6e2f24 | 7cd6a7bc72f0026056a7238c0feea081bfff13a7 | /bioprocs/scripts/vcf/pVcfSplit.py | e3c812d9b357910cb9cc4fd6481bd4e244304649 | [
"MIT"
] | permissive | shijianasdf/biopipen | 8d963ccca38e2a9d7a46582a5eec45c38924655c | d53b78aa192fd56a5da457463b099b2aa833b284 | refs/heads/master | 2023-08-18T18:28:03.306877 | 2019-12-31T16:17:35 | 2019-12-31T16:17:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,318 | py | from os import path
from sys import stderr
from diot import Diot
from bioprocs.utils import parallel, shell2 as shell
from bioprocs.utils.reference import vcfIndex
infile = {{i.infile | quote}}
prefix = {{i.infile | fn2 | quote}}
outdir = {{o.outdir | quote}}
samples = {{i.samples | quote}}
tool = {{args.tool | quote}}
bcftools = {{args.bcftools | quote}}
gatk = {{args.gatk | quote}}
tabix = {{args.tabix | quote}}
ref = {{args.ref | quote}}
params = {{args.params | repr}}
nthread = {{args.nthread | repr}}
shell.load_config(bcftools = bcftools, gatk = gatk)
vcfIndex(infile, tabix = tabix)
allsamples = shell.bcftools.query(l = infile).splitlines()
allsamples = [s.strip() for s in allsamples if s.strip()]
if samples:
with open(samples) as f:
samples = f.readlines()
samples = list(set(allsamples) & set(samples))
else:
samples = allsamples
def run_bcftools_one(sample):
shell.fg.bcftools.view(_ = infile, s = sample, o = path.join(outdir, '{}-{}.vcf'.format(prefix, sample)), **params)
def run_bcftools():
parallel.Parallel(nthread).run(run_bcftools_one, [(sample,) for sample in samples])
def run_awk_one(sample, index, awkfile):
shell.awk(
v = ["sample={!r}".format(sample), "index={}".format(index + 10)],
_stderr = stderr,
f = awkfile,
_ = infile,
_out = path.join(outdir, '{}-{}.vcf'.format(prefix, sample)))
def run_awk():
# write the awk script
awkfile = path.join(outdir, 'vcfsample.awk')
awkfh = open(awkfile, 'w')
awkfh.write("""
BEGIN {
OFS="\\t"
}
$0 ~ "^##" {
print
}
$0 ~ "^#CHROM" {
print "#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t"sample
}
$0 !~ "^#" {
print $1,$2,$3,$4,$5,$6,$7,$8,$9,$index
}
""")
awkfh.close()
parallel.Parallel(nthread).run(run_awk_one, [(sample, i, awkfile) for i, sample in enumerate(samples)])
def run_gatk_one(sample):
shell.fg.gatk(
R = ref,
V = infile,
o = path.join(outdir, '{}-{}.vcf'.format(prefix, sample)),
sample_name = sample,
T = 'SelectVariants',
excludeNonVariants = True,
**params
)
def run_gatk():
parallel.Parallel(nthread).run(run_gatk_one, [(sample, ) for sample in samples])
tools = dict(bcftools = run_bcftools, awk = run_awk, gatk = run_gatk)
try:
tools[tool]()
except KeyError:
raise ValueError('Tool {!r} not supported yet.'.format(tool))
| [
"pwwang@pwwang.com"
] | pwwang@pwwang.com |
67f1aeaeeb28bc19427a45aebf9aaacfbf59f93d | 308f5596f1c7d382520cfce13ceaa5dff6f4f783 | /hphp/runtime/ext/core/typecheck_systemlib.py | e9f409239b23debb5a63ec1f6f2cae32f105a0be | [
"PHP-3.01",
"Zend-2.0",
"MIT"
] | permissive | facebook/hhvm | 7e200a309a1cad5304621b0516f781c689d07a13 | d8203129dc7e7bf8639a2b99db596baad3d56b46 | refs/heads/master | 2023-09-04T04:44:12.892628 | 2023-09-04T00:43:05 | 2023-09-04T00:43:05 | 455,600 | 10,335 | 2,326 | NOASSERTION | 2023-09-14T21:24:04 | 2010-01-02T01:17:06 | C++ | UTF-8 | Python | false | false | 2,205 | py | #!/usr/bin/env python3
# Gather all of the relevant files from buck file groups and execute
# `hh_single_type_check` with the correct flags
import argparse
import os
import subprocess as p
import sys
from typing import List
FIXME_CODES: List[int] = [
# "Missing symbol:" used to break dependency cycles between files that might
# be mutually recursive or referential in some form (e.g.: any class with
# a `__Sealed` attribute).
2049,
# "Memoizing object parameters requires the capability AccessGlobals:" for
# now, we're allowing this in some places like `create_opaque_value`
4447,
# There are some functions that don't have *quite* correct coeffects; if
# we're going to change these it should be done separate from an initial
# pass making systemlib "clean."
4390,
]
FLAGS: List[str] = [
"--no-builtins",
"--is-systemlib",
# "--everything-sdt",
"--config",
"enable_no_auto_dynamic=true",
"--enable-sound-dynamic-type",
# TODO(T118594542)
"--allowed-fixme-codes-strict",
",".join(map(str, FIXME_CODES)),
"--allowed-decl-fixme-codes",
",".join(map(str, FIXME_CODES)),
]
def get_files_in(path: str) -> List[str]:
all_files = []
for root, _, files in os.walk(path):
all_files.extend(os.path.join(root, f) for f in files)
return all_files
def main():
parser = argparse.ArgumentParser(
description="Gather PHP files in given directories and run `hh_single_type_check`"
)
parser.add_argument("paths", type=str, help="paths to traverse", nargs="+")
parser.add_argument("--hhstc-path", type=str, help="`hh_single_type_check` to run")
parser.add_argument(
"--report-number-of-files",
action="store_true",
help="instead of running the typechecker, just print the number of files we'd typecheck",
)
args = parser.parse_args()
files = []
for path in args.paths:
files.extend(get_files_in(path))
if args.report_number_of_files:
print(len(list(filter(lambda f: f.endswith("php"), files))))
return
sys.exit(p.run([args.hhstc_path] + FLAGS + files).returncode)
if __name__ == "__main__":
main()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
a0198bbf0a9488b28990a3671c5ad8a112155fad | 47eb0c1ee39b673dc027f6e076b5065a76f3e002 | /setup.py | f4d718bc37e836453f711edc808cbcc7d40edcc5 | [
"BSD-3-Clause"
] | permissive | vodkabuaa/tushare | 6b814efd829519df596072b644f7c78c63c59289 | e55394e0fb6da0bd7652e11f806ad7e92b63c11c | refs/heads/master | 2021-01-15T11:20:49.527125 | 2015-01-16T16:06:55 | 2015-01-16T16:06:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | from distutils.core import setup
import codecs
import os
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
long_desc = """
tushare
===============
.. image:: https://api.travis-ci.org/waditu/tushare.png?branch=master
:target: https://travis-ci.org/waditu/tushare
.. image:: https://badge.fury.io/py/tushare.png
:target: http://badge.fury.io/py/tushare
* easy to use as most of the data returned are pandas DataFrame objects
* can be easily saved as csv, excel or json files
* can be inserted into MySQL or Mongodb
Target Users
--------------
* financial market analyst of China
* learners of financial data analysis with pandas/NumPy
* people who are interested in China financial data
Installation
--------------
pip install tushare
"""
setup(
name='tushare',
version='0.1.4',
description='TuShare is a utility for crawling historical and Realtime Quotes data of China stocks',
# long_description=read("READM.rst"),
long_description = long_desc,
author='Jimmy Liu',
author_email='jimmysoa@sina.cn',
license='BSD',
url='https://github.com/waditu/tushare',
keywords='china stock data',
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: BSD License'],
packages=['tushare','tushare.stock'],
) | [
"jimmysoa@sina.cn"
] | jimmysoa@sina.cn |
41177f6514241ed36d01522abc646d69ddfca634 | 6b7857c209b9c30ec6b1bb0c7437f8f9918044d7 | /2908.py | b053aee91226d487002bed4d9beab74ba1300acc | [] | no_license | rheehot/week01-algorithm | 1a18fe8eb76bed0b7d8f26fc10736c7e0c82ec12 | 0eab27bfcad265ca2dafaf64b2ae067a1ba17639 | refs/heads/main | 2023-02-05T06:46:38.006682 | 2020-12-16T14:47:26 | 2020-12-16T14:47:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # 상수 -
'''
a, b = input().split()
for i in range(2, -1, -1):
if a[i] == b[i]:
continue
elif a[i] > b[i]:
print(a[::-1])
break
else:
print(b[::-1])
break
'''
a, b = input().split()
for i in range(2,-1,-1):
if a[i] == b[i]:
continue
elif a[i] > b[i]:
print(a[::-1])
break
else:
print(b[::-1])
break
| [
"jeongseo21@gmail.com"
] | jeongseo21@gmail.com |
3ceda185f764f5098f155d032e070beabe840183 | 6a7ca83203b1757c57fde550dc38babcad60b4e1 | /web/opsgrid/core/migrations/0003_auto_20200606_1547.py | fdbc7d9f8267f2739bc58def3eacaf991f64f01e | [
"MIT"
] | permissive | simon-weber/opsgrid | 78b6e1b01079f3447ddb97d6e5bd93f6a39fc16b | 9719b9438a4a17eb75b638613a20b534ef82edc7 | refs/heads/master | 2023-03-30T13:45:09.195901 | 2021-04-01T19:58:38 | 2021-04-01T19:58:38 | 353,765,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,784 | py | # Generated by Django 2.2.9 on 2020-06-06 15:47
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
import opsgrid.core.models
class Migration(migrations.Migration):
dependencies = [
("core", "0002_auto_20200530_2351"),
]
operations = [
migrations.CreateModel(
name="Alert",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("jsonlogic_json", models.TextField()),
("last_updated_at", models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name="Host",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=256)),
(
"state",
models.CharField(
choices=[(opsgrid.core.models.HostState("ACT"), "ACT")],
default="ACT",
max_length=3,
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("last_data_at", models.DateTimeField(null=True)),
("last_metric_row_json", models.TextField(blank=True)),
("header_types_json", models.TextField(blank=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="AlertStatus",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"state",
models.CharField(
choices=[
("ALARM", "ALM"),
("OK", "OK"),
],
default="OK",
max_length=3,
),
),
(
"last_change_at",
models.DateTimeField(default=django.utils.timezone.now),
),
("change_metric_row_json", models.TextField(blank=True)),
(
"alert",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to="core.Alert"
),
),
],
),
migrations.AddField(
model_name="alert",
name="host",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="core.Host"
),
),
migrations.AddConstraint(
model_name="host",
constraint=models.UniqueConstraint(
fields=("name", "user"), name="unique_host_per_user"
),
),
]
| [
"simon@simonmweber.com"
] | simon@simonmweber.com |
10afc6582717758e6a1c4680f3330b5869a0a8ab | 8019f0df9a782b825132a328f1425fbe3028e657 | /odoo/addons/splashsync/models/__init__.py | 06a199a4ac86223a4739e58a11eaf9d1050c4bff | [] | no_license | p403n1x/odoo | 01b5e28eb1351c04d9045a1fb16e30de45c7929d | ce2cd03b3a9a8b5cfa5a81cf2b70ecafe5fb1ce2 | refs/heads/master | 2023-01-03T17:29:55.322847 | 2020-10-28T15:21:44 | 2020-10-28T15:21:44 | 280,176,919 | 0 | 1 | null | 2020-10-28T15:21:46 | 2020-07-16T14:33:56 | Python | UTF-8 | Python | false | false | 611 | py | # -*- coding: utf-8 -*-
#
# This file is part of SplashSync Project.
#
# Copyright (C) 2015-2019 Splash Sync <www.splashsync.com>
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
from . import authentification
from . import configuration
from . import partner
from . import product
from . import product_template
from . import order
| [
"eshop.bpaquier@gmail.com"
] | eshop.bpaquier@gmail.com |
018c098be0eb7d6c35d84417bd949c4b65953d0a | 290fa984448c3350fa4059fa8852f8a1321109ab | /services/users/src/tests/test_auth.py | 7dd4fe5f2cebc3ad872c2a74d435400353e40d41 | [] | no_license | testdrivenio/flask-react-aws | 673a612ae3368e7a9dcd7ddb50c0ea03e3221928 | 365f0771d5234b0b4dfe05d59bab29a03845af4f | refs/heads/master | 2023-07-19T19:45:14.042103 | 2022-05-04T16:08:52 | 2022-05-04T16:08:52 | 198,724,692 | 29 | 21 | null | 2023-07-19T14:39:18 | 2019-07-24T23:54:07 | Python | UTF-8 | Python | false | false | 6,254 | py | import json
import pytest
from flask import current_app
def test_user_registration(test_app, test_database):
client = test_app.test_client()
resp = client.post(
"/auth/register",
data=json.dumps(
{
"username": "justatest",
"email": "test@test.com",
"password": "123456",
}
),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 201
assert resp.content_type == "application/json"
assert "justatest" in data["username"]
assert "test@test.com" in data["email"]
assert "password" not in data
def test_user_registration_duplicate_email(test_app, test_database, add_user):
add_user("test", "test@test.com", "test")
client = test_app.test_client()
resp = client.post(
"/auth/register",
data=json.dumps(
{"username": "michael", "email": "test@test.com", "password": "test"}
),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 400
assert resp.content_type == "application/json"
assert "Sorry. That email already exists." in data["message"]
@pytest.mark.parametrize(
"payload",
[
{},
{"email": "me@testdriven.io", "password": "greaterthanten"},
{"username": "michael", "password": "greaterthanten"},
{"email": "me@testdriven.io", "username": "michael"},
],
)
def test_user_registration_invalid_json(test_app, test_database, payload):
client = test_app.test_client()
resp = client.post(
"/auth/register",
data=json.dumps(payload),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 400
assert resp.content_type == "application/json"
assert "Input payload validation failed" in data["message"]
def test_registered_user_login(test_app, test_database, add_user):
add_user("test3", "test3@test.com", "test")
client = test_app.test_client()
resp = client.post(
"/auth/login",
data=json.dumps({"email": "test3@test.com", "password": "test"}),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 200
assert resp.content_type == "application/json"
assert data["access_token"]
assert data["refresh_token"]
def test_not_registered_user_login(test_app, test_database):
client = test_app.test_client()
resp = client.post(
"/auth/login",
data=json.dumps({"email": "testnotreal@test.com", "password": "test"}),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 404
assert resp.content_type == "application/json"
assert "User does not exist." in data["message"]
def test_valid_refresh(test_app, test_database, add_user):
add_user("test4", "test4@test.com", "test")
client = test_app.test_client()
# user login
resp_login = client.post(
"/auth/login",
data=json.dumps({"email": "test4@test.com", "password": "test"}),
content_type="application/json",
)
# valid refresh
refresh_token = json.loads(resp_login.data.decode())["refresh_token"]
resp = client.post(
"/auth/refresh",
data=json.dumps({"refresh_token": refresh_token}),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 200
assert resp.content_type == "application/json"
assert data["access_token"]
assert data["refresh_token"]
assert resp.content_type == "application/json"
def test_invalid_refresh_expired_token(test_app, test_database, add_user):
add_user("test5", "test5@test.com", "test")
current_app.config["REFRESH_TOKEN_EXPIRATION"] = -1
client = test_app.test_client()
# user login
resp_login = client.post(
"/auth/login",
data=json.dumps({"email": "test5@test.com", "password": "test"}),
content_type="application/json",
)
# invalid token refresh
refresh_token = json.loads(resp_login.data.decode())["refresh_token"]
resp = client.post(
"/auth/refresh",
data=json.dumps({"refresh_token": refresh_token}),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 401
assert resp.content_type == "application/json"
assert "Signature expired. Please log in again." in data["message"]
def test_invalid_refresh(test_app, test_database):
client = test_app.test_client()
resp = client.post(
"/auth/refresh",
data=json.dumps({"refresh_token": "Invalid"}),
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 401
assert resp.content_type == "application/json"
assert "Invalid token. Please log in again." in data["message"]
def test_user_status(test_app, test_database, add_user):
add_user("test6", "test6@test.com", "test")
client = test_app.test_client()
resp_login = client.post(
"/auth/login",
data=json.dumps({"email": "test6@test.com", "password": "test"}),
content_type="application/json",
)
token = json.loads(resp_login.data.decode())["access_token"]
resp = client.get(
"/auth/status",
headers={"Authorization": f"Bearer {token}"},
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 200
assert resp.content_type == "application/json"
assert "test6" in data["username"]
assert "test6@test.com" in data["email"]
assert "password" not in data
def test_invalid_status(test_app, test_database):
client = test_app.test_client()
resp = client.get(
"/auth/status",
headers={"Authorization": "Bearer invalid"},
content_type="application/json",
)
data = json.loads(resp.data.decode())
assert resp.status_code == 401
assert resp.content_type == "application/json"
assert "Invalid token. Please log in again." in data["message"]
| [
"hermanmu@gmail.com"
] | hermanmu@gmail.com |
259eaa9eb150b7462d4c925d4708645aab5782b6 | f4e9721bd529541f2402472f201bb6fde66fea53 | /Lonely monk.py | d4a8ed8aa1175027efeb694272814602d7db99a5 | [] | no_license | wimpywarlord/hacker_earth_and_hacker_rank_solutions | 23d973778bceca5a395dd98b0b7252db49d02366 | 1277ba97e2744a7dab62f1e1319aac77f8ec6a28 | refs/heads/master | 2021-07-01T19:48:07.501021 | 2019-12-28T05:55:39 | 2019-12-28T05:55:39 | 172,307,339 | 10 | 3 | null | 2020-10-01T06:43:03 | 2019-02-24T07:08:29 | Python | UTF-8 | Python | false | false | 478 | py | n=int(input())
x=input()
a=x.split()
for i in range(0,n):
a[i]=int(a[i])
print(a)
d=[]
for i in range(0,n):
counter=n
for j in range(i,n):
summ=0
for k in range(i,counter):
print(a[k],end=' ')
summ=summ+a[k]
counter-=1
print()
print(summ)
d.append(summ)
print()
print()
print(d)
gg=0
for i in range(0,len(d)):
if d[i]%2==0:
gg+=1
print(gg)
| [
"wimpywarlord@gmail.com"
] | wimpywarlord@gmail.com |
e550a7da4602d02bd9ec286325f5bf90fb73f176 | 5c3d487c1f48e33c507a8aca1c2fc178cf95e17f | /interview/leet/004_2lists_median.py | 90e07fac5e0013b8cd475c180434cbb20983d40b | [
"MIT"
] | permissive | eroicaleo/LearningPython | 52f9bcda796ea8fcc40a2971f30102d2847c93a4 | ebebd1104b1947324fbaae304b44465f80803c8b | refs/heads/master | 2023-07-20T04:34:51.465451 | 2023-07-13T06:43:31 | 2023-07-13T06:43:31 | 14,948,231 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,043 | py | #!/usr/bin/env python
def process_lists(l1, l2, k):
# k is a index in range(0, k)
l1_lo, l1_hi, l2_lo, l2_hi = 0, len(l1)-1, 0, len(l2)-1
while True:
# break condition 2, lo > hi
if l1_hi < l1_lo:
print('crossing l1_lo: %d, l1_hi: %d.' % (l1_lo, l1_hi))
return l2[l2_lo+k]
if l2_hi < l2_lo:
print('crossing l2_lo: %d, l2_hi: %d.' % (l2_lo, l2_hi))
return l1[l1_lo+k]
print('#' * 80)
print("Initial: l1_lo: %d, l1_hi: %d, l2_lo: %d, l2_hi: %d" % (l1_lo, l1_hi, l2_lo, l2_hi))
print('looking for No. %d element in %s and %s' % (k, l1[l1_lo:l1_hi+1], l2[l2_lo:l2_hi+1]))
# After these two steps, l1 and l2 are at most length k-1
# Index k more close to 0 or len(l1) + len(l2) ?
j = (l1_hi-l1_lo+1) + (l2_hi-l2_lo+1) - 1 - k
if k <= j:
l1_hi = min(l1_hi, l1_lo+k)
l2_hi = min(l2_hi, l2_lo+k)
else:
l1_lo = max(l1_lo, l1_hi-j)
l2_lo = max(l2_lo, l2_hi-j)
print("Reduce length: l1_lo: %d, l1_hi: %d, l2_lo: %d, l2_hi: %d" % (l1_lo, l1_hi, l2_lo, l2_hi))
print('looking for the element in %s and %s' % (l1[l1_lo:l1_hi+1], l2[l2_lo:l2_hi+1]))
# temination condition:
if k == 0:
print('k = %d' % k)
return min(l1[l1_lo], l2[l2_lo])
if j == 0:
print('j = %d' % j)
return max(l1[l1_hi], l2[l2_hi])
if k <= j:
# Remove k/2 element
move = int((k-1)/2)
l1_mi, l2_mi = [l + move for l in [l1_lo, l2_lo]]
# they cann't be bigger than l?_hi
l1_mi, l2_mi = min(l1_mi, l1_hi), min(l2_mi, l2_hi)
print("l1[l1_mi] : %d, l2[l2_mi] : %d" % (l1[l1_mi], l2[l2_mi]))
if l1[l1_mi] <= l2[l2_mi]:
real_move = min(l1_hi+1, l1_lo+move+1) - l1_lo
l1_lo += real_move
else:
real_move = min(l2_hi+1, l2_lo+move+1) - l2_lo
l2_lo += real_move
k -= real_move
else:
# Remove j/2 element
move = int((j-1)/2)
l1_mi, l2_mi = [l - move for l in [l1_hi, l2_hi]]
# they cann't be smaller than l?_lo
l1_mi, l2_mi = max(l1_mi, l1_lo), max(l2_mi, l2_lo)
print("l1[l1_mi] : %d, l2[l2_mi] : %d" % (l1[l1_mi], l2[l2_mi]))
if l1[l1_mi] >= l2[l2_mi]:
real_move = l1_hi - max(l1_lo-1, l1_hi-move-1)
l1_hi -= real_move
else:
real_move = l2_hi - max(l2_lo-1, l2_hi-move-1)
l2_hi -= real_move
k = (l1_hi-l1_lo+1) + (l2_hi-l2_lo+1) - 1 - (j-real_move)
print("Remove k/2 elements: l1_lo: %d, l1_hi: %d, l2_lo: %d, l2_hi: %d" % (l1_lo, l1_hi, l2_lo, l2_hi))
print('looking for No. %d element in %s and %s' % (k, l1[l1_lo:l1_hi+1], l2[l2_lo:l2_hi+1]))
# break condition 2, lo > hi
if l1_hi < l1_lo:
print('crossing l1_lo: %d, l1_hi: %d.' % (l1_lo, l1_hi))
return l2[l2_lo+k]
if l2_hi < l2_lo:
print('crossing l2_lo: %d, l2_hi: %d.' % (l2_lo, l2_hi))
return l1[l1_lo+k]
if __name__ == '__main__':
# l1 = [1, 2, 3]
# l2 = [4, 5, 6]
# for i in range(0, len(l1+l2)):
# print('## Iteration i = %d' % i)
# print(process_lists(l1, l2, i))
# l1 = list(range(1, 9, 2))
# l2 = list(range(2, 10, 2))
# for i in range(len(l1+l2)):
# print('## Iteration i = %d' % i)
# print(process_lists(l1, l2, i))
# l1 = [7]
# l2 = list(range(0, 7)) + list(range(8, 16))
# for i in range(len(l1+l2)):
# print('## Iteration i = %d' % i)
# print(process_lists(l1, l2, i))
l1 = [1]
l2 = [2, 3, 4, 5, 6, 7]
l = (len(l1) + len(l2)) // 2
print(process_lists(l1, l2, l))
l1 = [2,3,5,6,8,9]
l2 = [1,4,7]
l = (len(l1) + len(l2)) // 2
print(process_lists(l1, l2, l))
| [
"eroicaleo@gmail.com"
] | eroicaleo@gmail.com |
469482f8e69ff66760208ab38b9533fd5b051aad | 945c6df0d8c129b5ffc3c2bcbadbfe1d6e5608d5 | /Bela/bela.py | e99040a8892ec52d6350f752428ab6a589a365ba | [] | no_license | jb1361/kattis | a93236db5c8c2d5660bf9acc6385db61854f9ff6 | c5245124a18a465b4ea50b11228033fae1c65775 | refs/heads/master | 2018-11-29T23:40:22.853840 | 2018-09-05T15:39:33 | 2018-09-05T15:39:33 | 94,928,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | dominant = [['A',11],['K',4],['Q',3],['J',20],['T',10],['9',14],['8',0],['7',0]]
notdominant = [['A',11],['K',4],['Q',3],['J',2],['T',10],['9',0],['8',0],['7',0]]
inp = input()
inp = inp.split()
hands = int(inp[0]) * 4
domHand = inp[1]
card_data = []
i = 0
while i < hands:
temp = input()
card_data.append(temp)
i += 1
points = 0
def calc_points(card, suit):
if suit == domHand:
tempp = 0
for i in dominant:
if i[0] == card:
return int(i[1])
else:
for i in notdominant:
if i[0] == card:
return int(i[1])
for i in card_data:
points += calc_points(i[0],i[1])
print(points) | [
"justinbutler4@hotmail.com"
] | justinbutler4@hotmail.com |
638b967383833a626bf654f60c58a3690a7a5a47 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02911/s816534461.py | 13c30ef108655a3fe436359c3a523f096f0a221c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | n, k, q = map(int, input().split())
a = [int(input()) for i in range(q)]
l = [k-q] * n
for i in a:
l[i-1] += 1
for f in l:
if f <= 0:
print("No")
else:
print("Yes") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
078aac1497450963b1b2d31e8579a5c45914908d | 56b63ee537f872af0fc028016d1508b4c1dd5c60 | /school/migrations/0069_cloan_deposit.py | fe3ae552699145c8ebafd5e53de70ecfa7fb258e | [] | no_license | jacknjillsolutionsrevanth/EMS1 | 01fc571120f765b0fbfe3aa654b15ff578d6e9b9 | db14d8e6c15669b5938aa9276c5e22006218814a | refs/heads/main | 2023-08-03T19:40:50.073133 | 2021-10-01T07:02:37 | 2021-10-01T07:02:37 | 410,202,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py | # Generated by Django 3.1.2 on 2020-11-02 05:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school', '0068_center_ifsc'),
]
operations = [
migrations.CreateModel(
name='Cloan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('loan_type', models.CharField(blank=True, max_length=255, null=True)),
('date', models.DateField(blank=True, null=True)),
('transaction_type', models.CharField(blank=True, max_length=255, null=True)),
('center', models.CharField(blank=True, max_length=255, null=True)),
('loan_no', models.CharField(blank=True, max_length=255, null=True)),
('loan_date', models.DateField(blank=True, null=True)),
('principal_amt', models.IntegerField(blank=True, null=True)),
('interest_rate', models.FloatField(default=0.0)),
('flat_deminished', models.CharField(blank=True, max_length=255, null=True)),
('loan_duration', models.IntegerField(blank=True, null=True)),
('interest_amt', models.FloatField(default=0.0)),
('noofinstallments', models.IntegerField(blank=True, null=True)),
('installment_amt', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Deposit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(blank=True, null=True)),
('issuedto', models.CharField(blank=True, max_length=255, null=True)),
('center', models.CharField(blank=True, max_length=255, null=True)),
('transporter', models.CharField(blank=True, max_length=255, null=True)),
('modeofreturn', models.CharField(blank=True, max_length=255, null=True)),
('amount', models.FloatField(default=0.0)),
('remarks', models.CharField(blank=True, max_length=255, null=True)),
('closingdate', models.CharField(blank=True, max_length=255, null=True)),
],
),
]
| [
"jacknjillsolutions.revanth@gmail.com"
] | jacknjillsolutions.revanth@gmail.com |
84d15772355558872edd5fbf0f1bde46367a3a78 | e35fd52fe4367320024a26f2ee357755b5d5f4bd | /leetcode/p0953 - Verifying an Alien Dictionary.py | ef9f41b49f256cc3e3d21bf400d9105f0bd05618 | [] | no_license | liseyko/CtCI | a451967b0a0ce108c491d30b81e88d20ad84d2cd | c27f19fac14b4acef8c631ad5569e1a5c29e9e1f | refs/heads/master | 2020-03-21T14:28:47.621481 | 2019-11-12T22:59:07 | 2019-11-12T22:59:07 | 138,658,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
order = {c: i for i, c in enumerate(order)}
for i in range(1, len(words)):
w1, w2 = words[i-1], words[i]
for i in range(max(len(w1), len(w2))):
if i == len(w1):
return True
if i == len(w2) or order[w1[i]] > order[w2[i]]:
return False
elif order[w1[i]] < order[w2[i]]:
break
return True
def isAlienSorted(self, words, order):
order = {c: i for i, c in enumerate(order)}
nwords = [[order[c] for c in w] for w in words]
return all(w1 <= w2 for w1, w2 in zip(nwords, nwords[1:]))
| [
"liseyko@gmail.com"
] | liseyko@gmail.com |
82ab39e1ff512816b925a6c5a98b421d60a96b99 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/429/usersdata/314/106483/submittedfiles/jogoDaVelha.py | 90530203e54e5e7d61182079f64593ac3dba01f6 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | from jogoDaVelha_BIB import *
tabuleiro = [ [' ',' ',' '], [' ',' ',' '], [' ',' ',' '] ] #Tabuleiro vazio
grupo = 'H [Breendon, Gustavo, Roberto, Rafael]' #Nome dos integrantes do grupo
jogador = ['',''] #Nome do jogador e simbolo
computador = ['Computador',''] #Computador e simbolo
bemVindo(grupo)
while True:
tabuleiro = [ [' ',' ',' '], [' ',' ',' '], [' ',' ',' '] ]
jogador[0] = input('Qual o seu nome (ou apelido)? ')
jogador[1], computador[1] = solicitaSimboloDoHumano()
turno = sorteioPrimeiraJogada(jogador[0])
movimentos = 0
print('Vencedor do sorteio para início do jogo: {}' .format(turno))
while True :
if turno == computador[0] :
tabuleiro = jogadaComputador(tabuleiro, computador[1])
movimentos += 1
acabou = verificaVencedor(tabuleiro, computador, movimentos)
if acabou :
break
turno = jogador[0]
else :
while True :
jogada = input('Qual a sua jogada, {}? ' .format(jogador[0]))
jogadaValida = validaJogada(tabuleiro, jogada)
if not jogadaValida :
print('OPS!!! Essa jogada não está disponível. Tente novamente!')
else :
tabuleiro = jogadaHumana(tabuleiro, jogador[1], jogada)
movimentos += 1
acabou = verificaVencedor(tabuleiro, jogador, movimentos)
turno = computador[0]
break
if acabou :
break
continua = input('Deseja jogar novamente? ').upper()
if not continua=='S':
break
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
5701ca5a24ad422a24d1e85ccc647845822be9d9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03167/s908459155.py | 5f856077bed000e7aa3d84fcde080fe3df6829a2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | H, W = map(int, input().split())
a = []
for _ in range(H):
a.append(input())
dp = [[0]*W for _ in range(H)]
dp[0][0] = 1
for i in range(H):
for j in range(W):
if a[i][j] == '#':
continue
if i + 1 < H and a[i+1][j] == '.':
dp[i+1][j] += dp[i][j]
dp[i+1][j] %= 1e9 + 7
if j + 1 < W and a[i][j+1] == '.':
dp[i][j+1] += dp[i][j]
dp[i][j+1] %= 1e9 + 7
print(int(dp[-1][-1])) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5ddecfc8cb11d9c0029f0d99237baea75bb36a83 | 4f878cc67a5d447c0a7f76539db82719998d885e | /dash/tut/callback/7.py | 5ebbc8b38b8bc5c986155ca9b69e6de2b8bc7e19 | [] | no_license | masknugget/webapp_lowcode | 91110340c5cfd4d4e11dbea77826e42a3998a84c | fffe73ad87cf02e703529d20f034fb13c2add5ff | refs/heads/main | 2023-06-24T14:46:28.841698 | 2021-07-25T15:32:18 | 2021-07-25T15:32:18 | 389,378,583 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | # -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_design_kit as ddk
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
dcc.Input(id='input-1-state', type='text', value='Montréal'),
dcc.Input(id='input-2-state', type='text', value='Canada'),
html.Button(id='submit-button-state', n_clicks=0, children='Submit'),
html.Div(id='output-state')
])
@app.callback(Output('output-state', 'children'),
Input('submit-button-state', 'n_clicks'),
State('input-1-state', 'value'),
State('input-2-state', 'value'))
def update_output(n_clicks, input1, input2):
return u'''
The Button has been pressed {} times,
Input 1 is "{}",
and Input 2 is "{}"
'''.format(n_clicks, input1, input2)
if __name__ == '__main__':
app.run_server(debug=True) | [
"946883098@qq.com"
] | 946883098@qq.com |
20afee652131381186c1524e777baef2bc3b5d6f | 058f6cf55de8b72a7cdd6e592d40243a91431bde | /tests/llvm/dynamic/test_warning_dont_abort/test.py | 09517e76e3243740f057aabb1099d60f3b7704e8 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | LLNL/FPChecker | 85e8ebf1d321b3208acee7ddfda2d8878a238535 | e665ef0f050316f6bc4dfc64c1f17355403e771b | refs/heads/master | 2023-08-30T23:24:43.749418 | 2022-04-14T19:57:44 | 2022-04-14T19:57:44 | 177,033,795 | 24 | 6 | Apache-2.0 | 2022-09-19T00:09:50 | 2019-03-21T22:34:14 | Python | UTF-8 | Python | false | false | 2,790 | py | #!/usr/bin/env python
import subprocess
# returns: tuple (error, op, file, line)
#
#+-------------------------- FPChecker Warning Report --------------------------+
# Error : Underflow
# Operation : ADD
# File : dot_product.cu
# Line : 9
#+------------------------------------------------------------------------------+
#
def getFPCReport(lines):
ret = ("", "", "", "")
for i in range(len(lines)):
l = lines[i]
if "FPChecker" in l and "Report" in l and "+" in l:
err = lines[i+1].split()[2]
op = lines[i+2].split()[2]
f = lines[i+3].split()[2]
line = lines[i+4].split()[2]
ret = (err, op, f, line)
break
return ret
def compileAndRun(op_level):
# --- compile code ---
cmd = ["make -f Makefile." + op_level]
try:
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print e.output
exit()
# --- run code ---
cmd = ["./main"]
try:
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print e.output
exit()
ret = cmdOutput.split("\n")
#print ret
return ret
# Check we get exactly 64 errors in the report (8 elems x 8 threads)
def checkForErrorReports(out):
ret = False
firstLine = False
secondLine = False
for l in out:
if "#FPCHECKER: Errors at dot_product.cu:8" in l and "#64" in l:
firstLine = True
if "#FPCHECKER: Errors at dot_product.cu:18" in l and "#64" in l:
secondLine = True
return (firstLine and secondLine)
def main():
op0_res = compileAndRun("0")
rep0 = getFPCReport(op0_res)
op1_res = compileAndRun("1")
rep1 = getFPCReport(op1_res)
op2_res = compileAndRun("2")
rep2 = getFPCReport(op2_res)
op3_res = compileAndRun("3")
rep3 = getFPCReport(op3_res)
no_aborts_are_seen = False
if rep0 == ("", "", "", "") and rep1 == ("", "", "", "") and rep2 == ("", "", "", "") and rep3 == ("", "", "", ""):
no_aborts_are_seen = True
error_report_is_correct = False
if checkForErrorReports(op0_res) == True and checkForErrorReports(op1_res) and checkForErrorReports(op2_res) and checkForErrorReports(op3_res):
error_report_is_correct = True
if no_aborts_are_seen==True and error_report_is_correct==True:
print "PASSED"
else:
print "failed"
main()
| [
"ilaguna@llnl.gov"
] | ilaguna@llnl.gov |
2c557f523e47a0593ccd02f8d8c509ceaf504d3a | 71460476c5f5ebdca719def124f1a0650861fdab | /mint_work/custom/client_plan_upgrade/models/__init__.py | 0280776e04b90ee44aff3b2999f96ed1c2b472d6 | [] | no_license | merdhah/dubai_work | fc3a70dc0b1db6df19c825a3bf1eef2a373d79c0 | e24eb12b276a4cd5b47a4bd5470d915179872a4f | refs/heads/master | 2022-01-07T11:22:07.628435 | 2018-10-17T13:37:24 | 2018-10-17T13:37:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | # -*- coding: utf-8 -*-
# Copyright 2015 LasLabs Inc.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from . import res_user
from . import ir_module_module
# from . import models_new
| [
"asghar0517@gmail.com"
] | asghar0517@gmail.com |
87b18f2181d9c52a66be0587a9b7b20999510dae | df191de3e8c14e10c2a78318c987371a59f1465c | /sturgisbank/settings.py | 3ddf4b654f66cb06f5a09d5e445759ea81d4e601 | [] | no_license | daniel-kanchev/sturgisbank | 500c1cb6351b839bd5d30f6d914f1a1c2fada783 | eb0e1122374c675d00937fe9b4a3f2931b665497 | refs/heads/main | 2023-04-05T04:38:37.418610 | 2021-04-02T07:43:47 | 2021-04-02T07:43:47 | 353,945,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | BOT_NAME = 'sturgisbank'
SPIDER_MODULES = ['sturgisbank.spiders']
NEWSPIDER_MODULE = 'sturgisbank.spiders'
USER_AGENT = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0',
ITEM_PIPELINES = {
'sturgisbank.pipelines.DatabasePipeline': 300,
}
FEED_EXPORT_ENCODING = 'utf-8'
ROBOTSTXT_OBEY = True
LOG_LEVEL = 'WARNING'
# LOG_LEVEL = 'DEBUG'
| [
"daniel.kanchev@adata.pro"
] | daniel.kanchev@adata.pro |
35eb78d6f9cd10f98b78a299d6e86811bd229b75 | b0cdec1a01255ca65da18433e9d2620c7fad181c | /manabot | d1182b224cb929bf6746b9839887d17a75dfaa00 | [
"MIT"
] | permissive | DronMDF/manabot | c7f05b90c1f0c125a3f30961c39dea642c64f8f1 | b412e8cb9b5247f05487bed4cbf4967f7b58327f | refs/heads/master | 2021-09-05T03:03:50.885632 | 2018-01-23T20:48:39 | 2018-01-23T20:48:39 | 112,320,984 | 1 | 0 | MIT | 2018-01-23T20:48:40 | 2017-11-28T10:27:35 | Python | UTF-8 | Python | false | false | 342 | #!/usr/bin/env python3
import sys
from tb import Application, ConfigFromArgs, ConfigFromFile, ConfigFile, ConfigDefault
app = Application(
ConfigFromArgs(
sys.argv,
ConfigFromFile(
ConfigFile(
ConfigFromArgs(
sys.argv,
ConfigDefault({'config': '/etc/manabot.conf'})
)
),
ConfigDefault({})
)
)
)
app.run()
| [
"dron.valyaev@gmail.com"
] | dron.valyaev@gmail.com | |
63cea6c8d102f66e4c4a8db82a3cb4d7ea41b7e5 | b5e4c4e3abb7f87bfd70ecd912810e2562cecdc5 | /section6/venv/Lib/site-packages/aniso8601/resolution.py | e118112f415bc40aef09eecf2ffc5985b075cfd2 | [] | no_license | chandshilpa/flaskapi | a89822707dc02f9c588af04f1f33f82a55b627b3 | 5f229d59d155e68e026566919d292c831ea00ed4 | refs/heads/master | 2022-12-09T10:59:14.563256 | 2019-01-08T17:33:46 | 2019-01-08T17:33:46 | 164,698,842 | 0 | 1 | null | 2022-12-07T16:24:53 | 2019-01-08T17:21:32 | Python | UTF-8 | Python | false | false | 424 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from aniso8601 import compat
class DateResolution(object):
Year, Month, Week, Weekday, Day, Ordinal = list(compat.range(6))
class TimeResolution(object):
Seconds, Minutes, Hours = list(compat.range(3))
| [
"chandsandeep700@gmail.com"
] | chandsandeep700@gmail.com |
3d6124c3ce877101be4b17672474a2c7edb48eb4 | 8dcd3ee098b4f5b80879c37a62292f42f6b2ae17 | /venv/Lib/site-packages/pandas/tests/series/test_period.py | bf3096fd0a0d6d69bae147ecab6fa870afcc4eb3 | [] | no_license | GregVargas1999/InfinityAreaInfo | 53fdfefc11c4af8f5d2b8f511f7461d11a3f7533 | 2e4a7c6a2424514ca0ec58c9153eb08dc8e09a4a | refs/heads/master | 2022-12-01T20:26:05.388878 | 2020-08-11T18:37:05 | 2020-08-11T18:37:05 | 286,821,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,977 | py | import numpy as np
import pandas as pd
import pandas._testing as tm
import pytest
from pandas import DataFrame, Period, Series, period_range
from pandas.core.arrays import PeriodArray
class TestSeriesPeriod:
def setup_method(self, method):
self.series = Series(period_range("2000-01-01", periods=10, freq="D"))
def test_auto_conversion(self):
series = Series(list(period_range("2000-01-01", periods=10, freq="D")))
assert series.dtype == "Period[D]"
series = pd.Series(
[pd.Period("2011-01-01", freq="D"), pd.Period("2011-02-01", freq="D")]
)
assert series.dtype == "Period[D]"
def test_getitem(self):
assert self.series[1] == pd.Period("2000-01-02", freq="D")
result = self.series[[2, 4]]
exp = pd.Series(
[pd.Period("2000-01-03", freq="D"), pd.Period("2000-01-05", freq="D")],
index=[2, 4],
dtype="Period[D]",
)
tm.assert_series_equal(result, exp)
assert result.dtype == "Period[D]"
def test_isna(self):
# GH 13737
s = Series([pd.Period("2011-01", freq="M"), pd.Period("NaT", freq="M")])
tm.assert_series_equal(s.isna(), Series([False, True]))
tm.assert_series_equal(s.notna(), Series([True, False]))
def test_fillna(self):
# GH 13737
s = Series([pd.Period("2011-01", freq="M"), pd.Period("NaT", freq="M")])
res = s.fillna(pd.Period("2012-01", freq="M"))
exp = Series([pd.Period("2011-01", freq="M"), pd.Period("2012-01", freq="M")])
tm.assert_series_equal(res, exp)
assert res.dtype == "Period[M]"
def test_dropna(self):
# GH 13737
s = Series([pd.Period("2011-01", freq="M"), pd.Period("NaT", freq="M")])
tm.assert_series_equal(s.dropna(), Series([pd.Period("2011-01", freq="M")]))
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# NaT support
@pytest.mark.xfail(reason="PeriodDtype Series not supported yet")
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, pd._libs.iNaT], dtype="period[D]")
val = series[3]
assert pd.isna(val)
series[2] = val
assert pd.isna(series[2])
def test_NaT_cast(self):
result = Series([np.nan]).astype("period[D]")
expected = Series([pd.NaT], dtype="period[D]")
tm.assert_series_equal(result, expected)
def test_set_none(self):
self.series[3] = None
assert self.series[3] is pd.NaT
self.series[3:5] = None
assert self.series[4] is pd.NaT
def test_set_nan(self):
# Do we want to allow this?
self.series[5] = np.nan
assert self.series[5] is pd.NaT
self.series[5:7] = np.nan
assert self.series[6] is pd.NaT
def test_intercept_astype_object(self):
expected = self.series.astype("object")
df = DataFrame({"a": self.series, "b": np.random.randn(len(self.series))})
result = df.values.squeeze()
assert (result[:, 0] == expected.values).all()
df = DataFrame({"a": self.series, "b": ["foo"] * len(self.series)})
result = df.values.squeeze()
assert (result[:, 0] == expected.values).all()
def test_align_series(self, join_type):
rng = period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
ts.align(ts[::2], join=join_type)
def test_truncate(self):
# GH 17717
idx1 = pd.PeriodIndex(
[pd.Period("2017-09-02"), pd.Period("2017-09-02"), pd.Period("2017-09-03")]
)
series1 = pd.Series([1, 2, 3], index=idx1)
result1 = series1.truncate(after="2017-09-02")
expected_idx1 = pd.PeriodIndex(
[pd.Period("2017-09-02"), pd.Period("2017-09-02")]
)
tm.assert_series_equal(result1, pd.Series([1, 2], index=expected_idx1))
idx2 = pd.PeriodIndex(
[pd.Period("2017-09-03"), pd.Period("2017-09-02"), pd.Period("2017-09-03")]
)
series2 = pd.Series([1, 2, 3], index=idx2)
result2 = series2.sort_index().truncate(after="2017-09-02")
expected_idx2 = pd.PeriodIndex([pd.Period("2017-09-02")])
tm.assert_series_equal(result2, pd.Series([2], index=expected_idx2))
@pytest.mark.parametrize(
"input_vals",
[
[Period("2016-01", freq="M"), Period("2016-02", freq="M")],
[Period("2016-01-01", freq="D"), Period("2016-01-02", freq="D")],
[
Period("2016-01-01 00:00:00", freq="H"),
Period("2016-01-01 01:00:00", freq="H"),
],
[
Period("2016-01-01 00:00:00", freq="M"),
Period("2016-01-01 00:01:00", freq="M"),
],
[
Period("2016-01-01 00:00:00", freq="S"),
Period("2016-01-01 00:00:01", freq="S"),
],
],
)
def test_end_time_timevalues(self, input_vals):
# GH 17157
# Check that the time part of the Period is adjusted by end_time
# when using the dt accessor on a Series
input_vals = PeriodArray._from_sequence(np.asarray(input_vals))
s = Series(input_vals)
result = s.dt.end_time
expected = s.apply(lambda x: x.end_time)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("input_vals", [("2001"), ("NaT")])
def test_to_period(self, input_vals):
# GH 21205
expected = Series([input_vals], dtype="Period[D]")
result = Series([input_vals], dtype="datetime64[ns]").dt.to_period("D")
tm.assert_series_equal(result, expected)
| [
"44142880+GregVargas1999@users.noreply.github.com"
] | 44142880+GregVargas1999@users.noreply.github.com |
9058c8094f9714649dceace28ed2e34cb079ee65 | 86857aa31757eb76afbbb6e1f803ebfb09375dd9 | /leetcode/leetcode208.py | 3af676c22ac75436e566917418a9800d87fc0c44 | [] | no_license | jingxiufenghua/algorithm_homework | 075efb3122e20411141d64c8e25d97411a2c7a1c | 5cd8a6c99c463ce01f512379bcb265b7f0b99885 | refs/heads/master | 2023-05-14T15:59:20.272453 | 2021-06-04T02:43:53 | 2021-06-04T02:43:53 | 337,891,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | import collections
class Node(object):
def __init__(self):
self.children = collections.defaultdict(Node)
self.isword = False
class Trie(object):
def __init__(self):
self.root = Node()
def insert(self, word):
current = self.root
for w in word:
current = current.children[w]
current.isword = True
def search(self, word):
current = self.root
for w in word:
current = current.children.get(w)
if current == None:
return False
return current.isword
def startsWith(self, prefix):
current = self.root
for w in prefix:
current = current.children.get(w)
if current == None:
return False
return True
| [
"2450392436@qq.com"
] | 2450392436@qq.com |
48c1a46f49c2ca8c340460b91ea52a5cd294d71c | 839fb68043bd3a827f6ed6d123844922419284e9 | /Chapter09/named_font_demo.py | 487217ec0f1a2a61dfcf1c7648af3d62d5a52c56 | [
"MIT"
] | permissive | LihengGong/Python-GUI-Programming-with-Tkinter-2E | 15220f2487686a04c82451fd212b6fc6095a888a | 9e9c7468982992d87358be09c11c2cfaaaecd615 | refs/heads/main | 2023-08-25T12:18:10.976232 | 2021-10-20T19:49:01 | 2021-10-20T19:49:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | import tkinter as tk
from tkinter import font
root = tk.Tk()
for name in font.names():
font_obj = font.nametofont(name)
tk.Label(root, text=name, font=font_obj).pack()
namedfont = tk.StringVar()
family = tk.StringVar()
size = tk.IntVar()
tk.OptionMenu(root, namedfont, *font.names()).pack()
tk.OptionMenu(root, family, *font.families()).pack()
tk.Spinbox(root, textvariable=size, from_=6, to=128).pack()
def setFont():
font_obj = font.nametofont(namedfont.get())
font_obj.configure(family=family.get(), size=size.get())
tk.Button(root, text='Change', command=setFont).pack()
root.mainloop()
| [
"me@alandmoore.com"
] | me@alandmoore.com |
a0d98bf52dab4cccef405a7ad9db5e5e13ff8a44 | 1a54763c0774679bffa193db3f41781ca68b0e96 | /concurrence/day04/thread_server.py | 977cf317fd26cc11412820949f9c7163ac61e832 | [] | no_license | RRCHcc/python_net | 137e6e50b5cd1c71a9decdd1ba18509177ba2f4e | 795d4e56e49101c3c0a81230a1d928454ddd2544 | refs/heads/master | 2020-05-30T10:54:47.744245 | 2019-06-01T03:17:35 | 2019-06-01T03:17:35 | 189,685,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | """
基于threading的多线程网络并发
重点代码
1. 创建监听套接字
2. 循环接收客户端连接请求
3. 当有新的客户端连接创建线程处理客户端请求
4. 主线程继续等待其他客户端连接
5. 当客户端退出,则对应分支线程退出
"""
from socket import *
from threading import Thread
import os, sys
def handle(c):
print("客户端:", c.getpeername())
while True:
data = c.recv(1024)
if not data:
break
print(data.decode())
c.send(b"OK")
c.close()
# 创建监听套接字
HOST = "0.0.0.0"
PORT = 44447
ADDR = (HOST, PORT)
s = socket()
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(ADDR)
s.listen(3)
print("Listen the port 44447...")
# 循环等待客户端连接
while True:
try:
c, addr = s.accept()
except KeyboardInterrupt:
sys.exit("退出服务器") # 退出进程
except Exception as e:
print(e)
continue
# 创建新的线程处理客户端请求
t = Thread(target=handle, args=(c,))
t.setDaemon(True) # 分支线程随主线程退出
t.start()
| [
"2570629639@qq.com"
] | 2570629639@qq.com |
eb7d099aa8ea32a713245571c139a2c0b88358e4 | d05c946e345baa67e7894ee33ca21e24b8d26028 | /machine-learning/blur-faces/blur_faces.py | dfe4f59dbc9e72ed7e87a6080f10cc661b823695 | [
"MIT"
] | permissive | x4nth055/pythoncode-tutorials | 327255550812f84149841d56f2d13eaa84efd42e | d6ba5d672f7060ba88384db5910efab1768c7230 | refs/heads/master | 2023-09-01T02:36:58.442748 | 2023-08-19T14:04:34 | 2023-08-19T14:04:34 | 199,449,624 | 1,858 | 2,055 | MIT | 2023-08-25T20:41:56 | 2019-07-29T12:35:40 | Jupyter Notebook | UTF-8 | Python | false | false | 1,882 | py | import cv2
import numpy as np
import sys
# https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt
prototxt_path = "weights/deploy.prototxt.txt"
# https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel
model_path = "weights/res10_300x300_ssd_iter_140000_fp16.caffemodel"
# load Caffe model
model = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)
# get the image file name from the command line
image_file = sys.argv[1]
# read the desired image
image = cv2.imread(image_file)
# get width and height of the image
h, w = image.shape[:2]
# gaussian blur kernel size depends on width and height of original image
kernel_width = (w // 7) | 1
kernel_height = (h // 7) | 1
# preprocess the image: resize and performs mean subtraction
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))
# set the image into the input of the neural network
model.setInput(blob)
# perform inference and get the result
output = np.squeeze(model.forward())
for i in range(0, output.shape[0]):
confidence = output[i, 2]
# get the confidence
# if confidence is above 40%, then blur the bounding box (face)
if confidence > 0.4:
# get the surrounding box cordinates and upscale them to original image
box = output[i, 3:7] * np.array([w, h, w, h])
# convert to integers
start_x, start_y, end_x, end_y = box.astype(np.int)
# get the face image
face = image[start_y: end_y, start_x: end_x]
# apply gaussian blur to this face
face = cv2.GaussianBlur(face, (kernel_width, kernel_height), 0)
# put the blurred face into the original image
image[start_y: end_y, start_x: end_x] = face
cv2.imshow("image", image)
cv2.waitKey(0)
cv2.imwrite("image_blurred.jpg", image) | [
"fullclip@protonmail.com"
] | fullclip@protonmail.com |
fd7413e2f751d25f991c8131197624b90234bd14 | 5fbdbbd4d1f5b0f7c729f355d3ab930d7b55a726 | /dataDriver_text2.py | b0e45f24e59e20f51e59722b2bc0a20a59bf8bcc | [] | no_license | Shuimoningxiang/untitled | 28fb6b4b87116899ba907cca830e0e2119671546 | b3a7ca3de754a0173ed52e47012c279a91a64763 | refs/heads/master | 2021-09-04T18:52:19.162715 | 2018-01-21T09:48:53 | 2018-01-21T09:48:53 | 118,321,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | import csv
import os
# path = os.path.dirname(__file__)
# final_path=path+"\date\huiyuan.csv"
# print(final_path)
def readData():
path = os.path.dirname(__file__)
final_path = path + "\date\huiyuan.csv"
print(final_path)
result=[]
#file=open(final_path,'r')
with open(final_path,'r') as file:
table=csv.reader(file)
for i in table:
result.append(i)
# print(i)
#file.clse()
return result
abcd=readData()
for i in abcd:
for i2 in i:
print(i2)
| [
"51Testing"
] | 51Testing |
f15daef86f2215d4497e0a7d238f99f873f1f3aa | 1a5a9bfa6ee62c328fc6ab828ad743c555b0f23a | /catagory/JianzhiOffer/stage-02/0365-count-1-in-binary.py | 02c5d9bde729f707f70f94ba181e303c223f577a | [] | no_license | zzy1120716/my-nine-chapter | 04b3e4d43a0d8086e5c958b81a3dc4356622d65f | c7bf3eed366b91d6bdebb79d0f11680cf7c18344 | refs/heads/master | 2020-03-30T03:07:14.748145 | 2019-05-15T13:07:44 | 2019-05-15T13:07:44 | 150,670,072 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | """
365. 二进制中有多少个1
中文English
计算在一个 32 位的整数的二进制表示中有多少个 1。
样例
样例 1:
输入:32
输出:1
解释:
32(100000),返回 1。
样例 2:
输入:5
输出:2
解释:
5(101),返回 2。
挑战
如果整数有 n 位,并且有 m 位个 1。你能在 O(m) 的时间内解决它吗?
"""
class Solution:
"""
@param: num: An integer
@return: An integer
"""
def countOnes(self, num):
# write your code here
ones = 0
for i in range(32):
# % 2
ones += num & 1
# // 2
num >>= 1
return ones
if __name__ == '__main__':
# 32
print(Solution().countOnes(-1))
# 1
print(Solution().countOnes(256))
| [
"zzy1120716@126.com"
] | zzy1120716@126.com |
f66168c7f02a410dbb138535e1f3375b0ccbae9d | 87bd02d63966ed1539d107497b8fdbf931b02121 | /2018/07/aoc2018_07_part1.py | 1d03f024bb78b9ff72538dc5542137e8fd5f4757 | [] | no_license | kajott/adventofcode | c4764d97d4ad4045a7f055862a11077c7e155ea3 | 60f51bce5de5e94eb3763970f0524d281bc1978b | refs/heads/master | 2023-01-06T16:27:23.716873 | 2022-12-29T13:56:47 | 2022-12-29T13:56:47 | 161,079,423 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | import re,collections as C
D=C.defaultdict
l,g,r=D(int),D(set),""
for a,b in(re.findall(r'\b\w\b',x)for x in open("input.txt")):g[a]|={b};l[a]+=0;l[b]+=1
while l:
x=min((c,x)for x,c in l.items())[1]
for y in g[x]:l[y]-=1
r+=x;del l[x]
print r
| [
"keyj@emphy.de"
] | keyj@emphy.de |
ffc10c964ddda8442bd5f414c795f7f8c76c2c05 | 9c3765dba0b249eb0a8da92076d2ae01291fc0e7 | /not_done/py_not_started/euler_306.py | 391ec906144e93d97cc62d77050ca4dc0c03555d | [] | no_license | saetar/pyEuler | 3a021f95a1856775bef87b38c753049b04282b80 | f0af7092e16c2109028b4b1aa5bed7a0057d3fe9 | refs/heads/master | 2020-03-21T12:05:15.430454 | 2018-06-15T03:50:50 | 2018-06-15T03:50:50 | 138,535,115 | 0 | 0 | null | 2018-06-25T02:40:43 | 2018-06-25T02:40:42 | null | UTF-8 | Python | false | false | 1,279 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ~ Jesse Rubin ~ project Euler ~
"""
Paper-strip Game
http://projecteuler.net/problem=306
The following game is a classic example of Combinatorial Game Theory:
Two players start with a strip of n white squares and they take alternate turns.
On each turn, a player picks two contiguous white squares and paints them black.
The first player who cannot make a move loses.
If n = 1, there are no valid moves, so the first player loses automatically.
If n = 2, there is only one valid move, after which the second player loses.
If n = 3, there are two valid moves, but both leave a situation where the second player loses.
If n = 4, there are three valid moves for the first player; she can win the game by painting the two middle squares.
If n = 5, there are four valid moves for the first player (shown below in red); but no matter what she does, the second player (blue) wins.
So, for 1 ≤ n ≤ 5, there are 3 values of n for which the first player can force a win.
Similarly, for 1 ≤ n ≤ 50, there are 40 values of n for which the first player can force a win.
For 1 ≤ n ≤ 1 000 000, how many values of n are there for which the first player can force a win?
"""
def p306():
pass
if __name__ == '__main__':
p306() | [
"jessekrubin@gmail.com"
] | jessekrubin@gmail.com |
c0f66b8b7bbe8922ed6c8bcc3fa84e62fd9fccf7 | 90df3cbdea7146a62e55cd74366aac8601d5da27 | /test_geweke.py | e89b12df62af6dd4defe1db8231750c4075a3e1f | [] | no_license | mattjj/gslrandom | fd7d13fea77640078b6a40b510136507399097f8 | 98eee548bdd14680393ac53cfc2536dd67e0efb3 | refs/heads/master | 2021-01-21T20:16:24.504708 | 2015-06-10T21:26:30 | 2015-06-10T21:47:29 | 36,503,828 | 0 | 0 | null | 2015-05-29T12:45:05 | 2015-05-29T12:45:05 | null | UTF-8 | Python | false | false | 2,136 | py | """
A simple Geweke test. I'm afraid there might be a slight bias toward
sampling the first entry in the multinomial.
"""
import os
import numpy as np
np.random.seed(1234)
from scipy.stats import probplot, beta
import matplotlib.pyplot as plt
from pybasicbayes.distributions import Multinomial
from pybasicbayes.util.text import progprint_xrange
from gslrandom import multinomial_par, multinomial,PyRNG, get_omp_num_threads
if "OMP_NUM_THREADS" in os.environ:
num_threads = int(os.environ["OMP_NUM_THREADS"])
else:
num_threads = get_omp_num_threads()
assert num_threads > 0
import ipdb; ipdb.set_trace()
# Choose random seeds
seeds = np.random.randint(2**16, size=num_threads)
pyrngs = [PyRNG(seed) for seed in seeds]
alpha = 1.
K = 3
N = 100
Ns = np.random.poisson(10, size=N).astype(np.uint32)
# Ns = np.ones(N).astype(np.uint32)
# Sample model
dirichlet = Multinomial(alphav_0=alpha*np.ones(K), K=K)
X = np.zeros((N, K), dtype=np.uint32)
multinomial_par(pyrngs, Ns, dirichlet.weights * np.ones((N,K)), X)
N_iter = 50000
samplers = ["numpy", "multinomial", "multinomial_par"]
fig = plt.figure()
for i,sampler in enumerate(samplers):
print "Testing ", sampler
ps = []
for itr in progprint_xrange(N_iter, perline=50):
# Resample the dirichlet
dirichlet.resample(X)
# Resample X
if sampler == "numpy":
for n,x in zip(Ns,X):
x[:] = np.random.multinomial(n, dirichlet.weights)
elif sampler == "multinomial":
multinomial(pyrngs[0], Ns, dirichlet.weights * np.ones((N,K)), out=X)
elif sampler == "multinomial_par":
multinomial_par(pyrngs, Ns, dirichlet.weights * np.ones((N,K)), X)
else:
raise Exception("invalid sampler")
# Get sample
ps.append(dirichlet.weights.copy())
ps = np.array(ps)
print np.mean(ps, axis=0)
print np.std(ps, axis=0)
for k in xrange(K):
ax = fig.add_subplot(K,len(samplers),i*K+k+1)
marg_p = beta(alpha, (K-1)*alpha)
probplot(ps[:,k], dist=marg_p, plot=ax)
ax.set_title(sampler + "_%d" % k)
plt.show() | [
"scott.linderman@gmail.com"
] | scott.linderman@gmail.com |
6c78ccfccb734995304f041d3de5d2726e9d1b63 | 1a220abd21c56728aa3368534506bfc9ced8ad46 | /95.COS/2급/모의고사 3회/03.py | 2a1980856327f4a9abae0ae30e2455e9771fbf18 | [] | no_license | JeonJe/Algorithm | 0ff0cbf47900e7877be077e1ffeee0c1cd50639a | 6f8da6dbeef350f71b7c297502a37f87eb7d0823 | refs/heads/main | 2023-08-23T11:08:17.781953 | 2023-08-23T08:31:41 | 2023-08-23T08:31:41 | 197,085,186 | 0 | 0 | null | 2023-02-21T03:26:41 | 2019-07-15T23:22:55 | Python | UTF-8 | Python | false | false | 1,515 | py | # 체조 경기를 진행하고 있습니다. 지금 연기한 선수의 연기 완성도를 채점하는 E점수를 결정하려고
# 합니다. E심판은 모두 6명이며, 각 심판들은 100점 만점에서 시작하여 실수에 따라 점수를 감점합
# 니다. E심판의 점수 중 최고점과 최저점을 제외하고 나머지 심판들의 점수 평균을 최종 E점수로
# 정합니다. 단, 이때 소수점 이하는 버립니다.
# 예를 들어 6명의 E심판이 채점한 점수가 [90, 80, 70, 85, 100, 90]라면, 가장 높은 점수인 100점
# 과 가장 낮은 점수인 70점을 제외하고 나머지 점수의 평균을 구하게 되면 85점입니다. 소수점 이
# 하를 버리게 되면 85점이 최종 점수가 됩니다.
# E심판이 채점한 점수가 담긴 리스트 scores가 매개변수로 주어질 때 최종 E점수를 return하도록
# solution 함수를 작성해 주세요.
#다음과 같이 import를 사용할 수 있습니다.
#import math
def solution(scores):
size = len(scores)
sscores = sorted(scores)
print(sum(sscores), sscores[0], sscores[size-1],size)
avg = int((sum(sscores) - sscores[0] - sscores[size-1] )/ size-2)
print(avg)
answer = 0
return answer
#아래는 테스트케이스 출력을 해보기 위한 코드입니다.
scores = [90, 80, 70, 85, 100, 90]
ret = solution(scores)
#[실행] 버튼을 누르면 출력 값을 볼 수 있습니다.
print("solution 함수의 반환 값은", ret, "입니다.")
| [
"43032391+JeonJe@users.noreply.github.com"
] | 43032391+JeonJe@users.noreply.github.com |
d6fa91da5175175095b83b4070163dce36509ec6 | cc08e9349a14620409dee0bdcf1420976352cf0d | /04_algorithm/06day/걍 품/시간개념.py | 88a829bdbc1897644a4272fafeda435eef97a762 | [] | no_license | Nyapy/TIL | b3f611177d3c54d224c9983b5bedc62abddeeaae | c3c52ad33963628674de4c1dcf8aed53f67af177 | refs/heads/master | 2023-01-11T19:37:02.693660 | 2020-07-04T04:31:41 | 2020-07-04T04:31:41 | 195,938,004 | 2 | 2 | null | 2023-01-07T11:25:27 | 2019-07-09T05:22:47 | Python | UTF-8 | Python | false | false | 879 | py | import sys
sys.stdin = open('시간개념.txt')
T = int(input())
for tc in range(T):
time1 = list(map(int, input().split(':')))
time2 = list(map(int, input().split(':')))
abs_sec1 = 3600*time1[0] + 60*time1[1] + time1[2]
abs_sec2 = 3600 * time2[0] + 60 * time2[1] + time2[2]
if abs_sec1 < abs_sec2:
time_lag = abs_sec2 - abs_sec1
hour = time_lag//3600
minute = (time_lag%3600)//60
sec = (time_lag % 3600) % 60
time = []
time += [hour]
time += [minute]
time += [sec]
else:
time_lag = (3600 * 24) - abs_sec1 + abs_sec2
hour = time_lag // 3600
minute = (time_lag % 3600) // 60
sec = (time_lag % 3600) % 60
time = []
time += [hour]
time += [minute]
time += [sec]
print("{:02}:{:02}:{:02}" .format(hour, minute, sec))
| [
"nyapy@naver.com"
] | nyapy@naver.com |
df7b984513c24df772aa36fd5577fb62ddef4f6b | 317d199d36556ecf5da06c660cb5cb655a86ea09 | /Challenges/rock_paper_scissors/rps.py | 2c085d2ad3e8be9dc50b3dedb02c4919fd2764ed | [] | no_license | baubrun/Challenges-PY | e109126a64a20128202e03c2ed359c179f523dcd | e2ca45cbca264f5790ce303807e25810a5d8d977 | refs/heads/master | 2022-12-17T03:24:43.308680 | 2020-09-14T12:37:24 | 2020-09-14T12:37:24 | 262,485,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | """
Rock, Paper, Scissors
Create a function which takes two strings
(p1 and p2 — which represent player 1 and )
as arguments and returns a string stating the winner
in a game of Rock, Paper, Scissors.
Each argument will contain a single string:
"Rock", "Paper", or "Scissors".
Return the winner according to the following rules:
Rock beats Scissors
Scissors beats Paper
Paper beats Rock
If p1 wins, return the string
"The winner is p1". If p2 wins,
return the string "The winner is p2" and if p1 and p2
are the same, return "It's a draw".
Examples
rps("Rock", "Paper") ➞ "The winner is p2"
rps("Scissors", "Paper") ➞ "The winner is p1"
rps("Paper", "Paper") ➞ "It's a draw"
Notes
All inputs will be valid strings.
"""
def rps(p1, p2):
PAPER = "Paper"
ROCK = "Rock"
SCISSORS = "Scissors"
l = [PAPER, ROCK, SCISSORS]
if p1 == p2:
return "It's a draw"
hand1 = l.index(p1)
hand2 = l.index(p2)
if hand1 == 0:
if hand2 == 1:
return "The winner is p1"
return "The winner is p2"
if hand1 == 1:
if hand2 == 2:
return "The winner is p1"
return "The winner is p2"
if hand1 == 2:
if hand2 == 0:
return "The winner is p1"
return "The winner is p2"
| [
"baubelf@gmail.com"
] | baubelf@gmail.com |
dd1a5f7306470c09c8a4e5d4fe2278049dc1ce9d | 22bf910b64283b3c15cc4d80542e83fa89e9f09d | /monero_glue/messages/WipeDevice.py | 57631812883ce5dcd80f84abc1ac8053d0c0c93c | [
"MIT"
] | permissive | ph4r05/monero-agent | 24ed1aa17d6616b2ae6bcdb7b9997f982f8b7b5d | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | refs/heads/master | 2022-10-18T06:30:43.550133 | 2021-07-01T16:27:56 | 2021-07-01T16:27:56 | 126,215,119 | 24 | 5 | MIT | 2022-09-23T22:53:44 | 2018-03-21T17:18:21 | Python | UTF-8 | Python | false | false | 309 | py | # Automatically generated by pb2py
# fmt: off
from .. import protobuf as p
if __debug__:
try:
from typing import Dict, List # noqa: F401
from typing_extensions import Literal # noqa: F401
except ImportError:
pass
class WipeDevice(p.MessageType):
MESSAGE_WIRE_TYPE = 5
| [
"dusan.klinec@gmail.com"
] | dusan.klinec@gmail.com |
0622bc92b933929fb78f1de42eaa6d2f4aabd814 | 5529b621f65eb855d381932d313c3ca3ed7090f6 | /process.py | 3fefdbc5e4572e2687c390407c0f36aa6bf92646 | [
"MIT"
] | permissive | isabella232/tennis-ages | c39f2d84414890f2ff27537ef0fa1c34ac4476e3 | 8334101e2b1a7484c540be3650d36e7e04a1e40b | refs/heads/master | 2021-06-01T06:03:52.412498 | 2016-04-12T21:43:34 | 2016-04-12T21:43:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | #!/usr/bin/env python
from dateutil.parser import parse
from datetime import date
def process():
today = date.today()
with open('USA_-_womens_national_rankings.txt') as f:
content = f.readlines()
for line in content:
line = line.strip()
try:
if line != '16 March 2016':
born = parse(line)
if born.year > 1940 and born.year < 2005:
age = today.year - born.year - ((today.month, today.day) < (born.month, born.day))
print '%s,%s' % (born, age)
except ValueError:
pass
if __name__ == '__main__':
process()
| [
"davideads@gmail.com"
] | davideads@gmail.com |
60176f9b42f14ac88d509ed03458e028dfa605e5 | 2caa47f0bdb2f03469a847c3ba39496de315d992 | /Contest/Tenka1-2018-Beginner/c/main.py | ce4eaf625bb1b68df7eb761a8a5387da31561e15 | [
"CC0-1.0"
] | permissive | mpses/AtCoder | 9023e44885dc67c4131762281193c24b69d3b6da | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | refs/heads/master | 2023-03-23T17:00:11.646508 | 2021-03-20T12:21:19 | 2021-03-20T12:21:19 | 287,489,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | #!/usr/bin/env python3
N, *a = map(int, open(0))
n = N//2 - 1
a.sort()
print(2*sum(a[n+2:]) - 2*sum(a[:n]) + a[n+1] - a[n] - (min(a[n]+a[n+2], 2*a[n+1]) if N%2 else 0))
| [
"nsorangepv@gmail.com"
] | nsorangepv@gmail.com |
4efca2e0d72033c9a358908f0fefa68c97baabe1 | 6f97d4e47b4e8bceb6a43ffe417656c06c077d3e | /remoting/curl.py | 8cd4f1dda3a1ea9c22c0862d599245cf83853dda | [] | no_license | getwingm/remoting | b35a47317e7d8c0b728a4462628663242eac6381 | e5a8dc483ffc0935fde49a75ccf13160f2d7f9a8 | refs/heads/master | 2020-04-03T08:08:07.362362 | 2014-05-22T19:29:20 | 2014-05-22T19:29:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | import pycurl
import StringIO
def curl(url, headers=None, socks_host=None, socks_port=None):
# if socks_port is given, it should be an integer
c = pycurl.Curl()
c.setopt(pycurl.URL, url)
if socks_host or socks_port:
c.setopt(pycurl.PROXY, socks_host or 'localhost')
c.setopt(pycurl.PROXYPORT, socks_port or 5090)
c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
if headers:
header_list = ['%s: %s' % (key, val) for key, val in headers.items()]
c.setopt(pycurl.HTTPHEADER, header_list)
output = StringIO.StringIO()
c.setopt(pycurl.WRITEFUNCTION, output.write)
c.perform()
return output.getvalue()
| [
"io@henrian.com"
] | io@henrian.com |
2b7ec4e81348d282b660f2e7f30fc016dede1ddd | 4aa7a4d0525095725eb99843c83827ba4806ceb1 | /my_mini_project/Deep_Learning/Dense/cpi_Dense.py | 762335816729d70a0ee09cf85ececcf612a71eb4 | [] | no_license | seonukim/Study | 65a70f5bdfad68f643abc3086d5c7484bb2439d4 | a5f2538f9ae8b5fc93b5149dd51704e8881f0a80 | refs/heads/master | 2022-12-04T17:04:31.489771 | 2020-08-21T00:35:15 | 2020-08-21T00:35:15 | 260,144,755 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,010 | py | import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout
from keras.layers import LeakyReLU
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler
from sklearn.model_selection import train_test_split
# scaler = StandardScaler()
# scaler = RobustScaler()
scaler = MinMaxScaler()
# scaler = MaxAbsScaler()
leaky = LeakyReLU(alpha = 0.3)
es = EarlyStopping(monitor = 'loss',
mode = 'min',
patience = 10)
## 데이터
train = pd.read_csv('C:/Users/bitcamp/Downloads/'
'/cpi_train(1975.01 - 2002.09).csv',
index_col = 0, header = 0,
encoding = 'cp949')
test = pd.read_csv('C:/Users/bitcamp/Downloads/'
'/cpi_test(2002.10 - 2020.05).csv',
index_col = 0, header = 0,
encoding = 'cp949')
print(train.shape) # (213, 13)
print(test.shape) # (213, 13)
## NumPy형 변환
train = train.values
test = test.values
print(type(train)) # <class 'numpy.ndarray'>
print(type(test)) # <class 'numpy.ndarray'>
## 데이터 분할하기
def split_xy(data, time, y_column):
x, y = list(), list()
for i in range(len(data)):
x_end_number = i + time
y_end_number = x_end_number + y_column
if y_end_number > len(data):
break
tmp_x = data[i:x_end_number, :]
tmp_y = data[x_end_number:y_end_number, 0]
x.append(tmp_x)
y.append(tmp_y)
return np.array(x), np.array(y)
x, y = split_xy(train, 5, 1)
print(x.shape) # (208, 5, 13)
print(y.shape) # (208, 1)
## 데이터 전처리
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size = 0.2,
shuffle = False)
print(x_train.shape) # (166, 5, 13)
print(x_test.shape) # (42, 5, 13)
print(y_train.shape) # (166, 1)
print(y_test.shape) # (42, 1)
## Dense모델에 넣기 위해 reshape
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1] * x_train.shape[2])
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1] * x_test.shape[2])
print(x_train.shape) # (166, 65)
print(x_test.shape) # (42, 65)
## Scaling
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
print(x_train[0])
'''
## 모델링
model = Sequential()
model.add(Dense(100, input_shape = (65, ),
activation = 'relu'))
model.add(Dropout(rate = 0.1))
model.add(Dense(1, activation = 'relu'))
model.summary()
model.save('./my_mini_project/Dense/Dense_model.h5')
## 컴파일 및 훈련
model.compile(loss = 'mse',
optimizer = 'rmsprop',
metrics = ['mse'])
model.fit(x_train, y_train,
epochs = 1000, batch_size = 1,
callbacks = [es], verbose = 1)
## 모델 평가 및 예측
res = model.evaluate(x_test, y_test)
print("loss : ", res[0]) # loss : 1.1412532769498371
print("mse : ", res[1]) # mse : 1.1412532329559326
pred = model.predict(x_test)
for i in range(5):
print('실제값 : ', y_test[i], '예측값 : ', pred[i])
'''
'''
실제값 : [65.164] 예측값 : [66.05616]
실제값 : [65.055] 예측값 : [66.54069]
실제값 : [64.672] 예측값 : [66.896965]
실제값 : [64.452] 예측값 : [66.94108]
실제값 : [65.111] 예측값 : [66.851395]
'''
## test 데이터 분리
a, b = split_xy(test, 5, 1)
print(a.shape) # (208, 5, 13)
print(b.shape) # (208, 1)
## 데이터 전처리
a_train, a_test, b_train, b_test = train_test_split(
a, b, test_size = 0.2, shuffle = False)
print(a_train.shape) # (166, 5, 13)
print(a_test.shape) # (42, 5, 13)
print(b_train.shape) # (166, 1)
print(b_test.shape) # (42, 1)
## Dense모델에 넣기 위해 데이터 reshape
a_train = a_train.reshape(a_train.shape[0], a_train.shape[1] * a_train.shape[2])
a_test = a_test.reshape(a_test.shape[0], a_test.shape[1] * a_test.shape[2])
print(a_train.shape) # (166, 65)
print(a_test.shape) # (42, 65)
## Scaling
scaler.fit(a_train)
a_train = scaler.transform(a_train)
a_test = scaler.transform(a_test)
print(a_train[0])
## 모델 불러오기
model = load_model('./my_mini_project/Dense/Dense_model.h5')
model.summary()
## 컴파일 및 훈련
model.compile(loss = 'mse',
optimizer = 'adam',
metrics = ['mse'])
model.fit(a_train, b_train,
epochs = 1000, batch_size = 2,
callbacks = [es], verbose = 1)
## 모델 평가
res = model.evaluate(a_test, b_test)
print("loss : ", res[0]) # loss : 26.6597063654945
print("mse : ", res[1]) # mse : 26.659706115722656
pred_2 = model.predict(a_test)
for i in range(42):
print('실제값 : ', b_test[i], '예측값 : ', pred_2[i])
'''
실제값 : [102.64] 예측값 : [105.10877]
실제값 : [102.92] 예측값 : [105.93016]
실제값 : [102.85] 예측값 : [106.56531]
실제값 : [102.72] 예측값 : [106.543686]
실제값 : [102.83] 예측값 : [107.598434]
실제값 : [102.61] 예측값 : [108.56856]
실제값 : [102.78] 예측값 : [108.38743]
실제값 : [103.37] 예측값 : [109.004524]
실제값 : [103.49] 예측값 : [110.387726]
실제값 : [103.39] 예측값 : [110.835754]
실제값 : [102.62] 예측값 : [111.340324]
실제값 : [102.99] 예측값 : [111.924095]
실제값 : [103.42] 예측값 : [111.57676]
실제값 : [104.21] 예측값 : [110.89941]
실제값 : [104.1] 예측값 : [110.65243]
실제값 : [104.29] 예측값 : [110.05317]
실제값 : [104.34] 예측값 : [110.82879]
실제값 : [104.13] 예측값 : [111.53039]
실제값 : [103.93] 예측값 : [112.151]
실제값 : [104.85] 예측값 : [111.760124]
실제값 : [105.65] 예측값 : [112.12572]
실제값 : [105.46] 예측값 : [111.91189]
실제값 : [104.71] 예측값 : [111.7032]
실제값 : [104.35] 예측값 : [111.20279]
실제값 : [104.24] 예측값 : [110.55778]
실제값 : [104.69] 예측값 : [109.36505]
실제값 : [104.49] 예측값 : [107.22308]
실제값 : [104.87] 예측값 : [104.85908]
실제값 : [105.05] 예측값 : [103.84239]
실제값 : [104.88] 예측값 : [103.84634]
실제값 : [104.56] 예측값 : [103.85918]
실제값 : [104.81] 예측값 : [103.73224]
실제값 : [105.2] 예측값 : [104.46961]
실제값 : [105.46] 예측값 : [104.5829]
실제값 : [104.87] 예측값 : [104.55163]
실제값 : [105.12] 예측값 : [103.92594]
실제값 : [105.79] 예측값 : [103.70546]
실제값 : [105.8] 예측값 : [103.69801]
실제값 : [105.54] 예측값 : [103.21263]
실제값 : [104.95] 예측값 : [102.21549]
실제값 : [104.71] 예측값 : [101.95478]
실제값 : [104.71] 예측값 : [101.35535] <- 2020.07월의 CPI 총 지수
'''
| [
"92.seoonooo@gmail.com"
] | 92.seoonooo@gmail.com |
7d6ed1ced3438d158b7dcef576862fc4d9ad0ad7 | 17f527d6936397270183a35d7097e0a99de16cb5 | /rasen_book/basic_2/rooted_tree.py | 547f143bb94346f4c29666f256d70fd860307d0a | [] | no_license | ryosuke071111/algorithms | e942f043d08c7c7e2c926ed332ee2b8c44bdf0c5 | 867764450cc0f2a709fa2f743d9a0d95001e9296 | refs/heads/master | 2020-05-14T17:14:39.314064 | 2019-04-17T12:58:12 | 2019-04-17T12:58:12 | 181,888,623 | 11 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | NIL = -1
class Node: #ノード作成
def __init__(self):
self.parent = NIL
self.left = NIL
self.right = NIL
def getDepth(u): #深さを調査
d = 0
while T[u].parent != NIL: #親がいたら
u = T[u].parent #上に上る
d += 1 #深さカウント+1
return d
def getChildren(u): #子供調査
c = T[u].left #自分の左側の子供をcとする
result = [] #結果リスト
while c != NIL: #自分の左の子供が存在する限り
result.append(c) #結果リストに子供を貼っていく
c = T[c].right #その右側にノードがあればその子を貼っていく
return result
n = int(input())
T = [0]*n
for i in range(n):
T[i] = Node() #ノードのリスト
for i in range(n):
tmp = list(map(int, input().split())) #[id, 子供の数、子供のid] リスト
id = tmp.pop(0)
k = tmp.pop(0)
c = tmp #残ったら子供のリストとなる
if k != 0:
for j in range(len(c)):
T[c[j]].parent = id
T[id].left = c[0] #自分の左の子供にtmpの最新の子供を貼り付ける
for j in range(len(c)-1):
T[c[j]].right = c[j+1] #自分の右の子供にtmpの子供を貼り付ける
for i in range(n):
d = getDepth(i)
c = getChildren(i)
if d == 0:
t = 'root'
elif c == []:
t = 'leaf'
else:
t = 'internal node'
print('node ',i,': ','parent = ',T[i].parent,', depth = ',d,', ',t,', ',c,sep = '')
| [
"ryosuke0711993@gmail.com"
] | ryosuke0711993@gmail.com |
11bd35fefb742454ba0670f53928bb4eff176cef | c8a04384030c3af88a8e16de4cedc4ef8aebfae5 | /stubs/pandas/core/groupby/base.pyi | 40914d2704f18b8f4cd75c1dd593da5818d0e209 | [
"MIT"
] | permissive | Accern/accern-xyme | f61fce4b426262b4f67c722e563bb4297cfc4235 | 6ed6c52671d02745efabe7e6b8bdf0ad21f8762c | refs/heads/master | 2023-08-17T04:29:00.904122 | 2023-05-23T09:18:09 | 2023-05-23T09:18:09 | 226,960,272 | 3 | 2 | MIT | 2023-07-19T02:13:18 | 2019-12-09T20:21:59 | Python | UTF-8 | Python | false | false | 775 | pyi | # Stubs for pandas.core.groupby.base (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
# pylint: disable=unused-argument,redefined-outer-name,no-self-use,invalid-name
# pylint: disable=relative-beyond-top-level,line-too-long,arguments-differ
# pylint: disable=no-member,too-few-public-methods,keyword-arg-before-vararg
# pylint: disable=super-init-not-called,abstract-method,redefined-builtin
# pylint: disable=unused-import,useless-import-alias,signature-differs
# pylint: disable=blacklisted-name,c-extension-no-member
from typing import Any
class GroupByMixin:
...
plotting_methods: Any
common_apply_whitelist: Any
series_apply_whitelist: Any
dataframe_apply_whitelist: Any
cython_transforms: Any
cython_cast_blacklist: Any
| [
"josua.krause@gmail.com"
] | josua.krause@gmail.com |
e3baf2fee82699caed0d28245f9d8a0a4b2a00e3 | e1b2b4215a08c1ef8df03d68a933f538bcab1176 | /projects/migrations/0001_initial.py | d8d64fe140786f7801ed16527fb13b74f355dde3 | [
"MIT"
] | permissive | wanguinjoka/Awwards | 4ed1d232dbe49167e3b15c3854c2d21455966673 | 23218076075601cb899a8ed28c11c2bd561e8f1c | refs/heads/master | 2020-04-01T20:37:55.332604 | 2018-10-19T14:45:54 | 2018-10-19T14:45:54 | 153,613,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-18 16:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Site',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField()),
('site_image', models.ImageField(upload_to='projects/')),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('site_url', models.CharField(blank=True, max_length=100)),
('developer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"wangui.njoka@gmail.com"
] | wangui.njoka@gmail.com |
c57020eb0fb14e7f7a51cb39ffeac16321b07756 | 01fdd206c8c825b30870bdd3f6e75f0aa113b849 | /test/record/parser/test_response_whois_co_ug_property_status_unconfirmed.py | 44fbcad55857c7be4ea983289a09e1f7bed4becc | [
"MIT"
] | permissive | huyphan/pyyawhois | 0fbc5a7d64a53ae6e3393fdc1c7ff0d0ac5f22b5 | 77fb2f73a9c67989f1d41d98f37037406a69d136 | refs/heads/master | 2021-01-23T22:42:55.989651 | 2015-09-19T16:40:06 | 2015-09-19T16:40:06 | 23,335,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.co.ug/property_status_unconfirmed
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisCoUgPropertyStatusUnconfirmed(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.co.ug/property_status_unconfirmed.txt"
host = "whois.co.ug"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_registered(self):
eq_(self.record.registered, True)
| [
"dachuy@gmail.com"
] | dachuy@gmail.com |
8fbeb9f6b161fe0dfa306a2b9f8b05576bf7ffa5 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /lib/python2.7/site-packages/astropy/nddata/tests/test_nddata_base.py | 68b51c55334f9432d9bc581bd6d936d30d2a03ae | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 1,463 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Tests of NDDataBase
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..nddata_base import NDDataBase
from ...tests.helper import pytest
class MinimalSubclass(NDDataBase):
def __init__(self):
super(MinimalSubclass, self).__init__()
@property
def data(self):
return None
@property
def mask(self):
return super(MinimalSubclass, self).mask
@property
def unit(self):
return super(MinimalSubclass, self).unit
@property
def wcs(self):
return super(MinimalSubclass, self).wcs
@property
def meta(self):
return super(MinimalSubclass, self).meta
class MinimalUncertainty(object):
"""
Define the minimum attributes acceptable as an uncertainty object.
"""
def __init__(self, value):
self._uncertainty = value
@property
def uncertainty_type(self):
return "totally and completely fake"
def test_nddata_base_subclass():
a = MinimalSubclass()
assert a.meta is None
assert a.data is None
assert a.mask is None
assert a.unit is None
assert a.wcs is None
good_uncertainty = MinimalUncertainty(5)
a.uncertainty = good_uncertainty
assert a.uncertainty is good_uncertainty
bad_uncertainty = 5
with pytest.raises(TypeError):
a.uncertainty = bad_uncertainty
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
58d5dde0bff26df07a3835833b1fc51873e23df9 | f3baf8b850c896231b4c254a22567fd5d7a5035c | /Aula 16/web.py | 414a87d388f65424a1221b6bf05f757182fcd896 | [
"MIT"
] | permissive | Katakhan/TrabalhosPython2 | e1c23119ef582038ceea0004c872c00778fd326e | ab47af0ff3c00922857578e58a1a149d9e65e229 | refs/heads/master | 2020-09-21T02:15:04.505791 | 2020-03-19T13:23:41 | 2020-03-19T13:23:41 | 224,650,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | from flask import Flask, render_template
from Faixa import ler
app = Flask(__name__)
@app.route('/lista')
def listar_faixas():
return render_template("lista.html", nome = 'Lista de Faixas', lista = ler)
app.run() | [
"antoniorafaelgastaldi@hotmail.com"
] | antoniorafaelgastaldi@hotmail.com |
46ab140e843abff2a8e2d248dbce509b67ef8b61 | 76fa4bc242502bcd9dfe1053c964318b94acc6d8 | /numpy/append.py | bcd3aa63ba9ec11c6e64df626596ef2678090f2e | [] | no_license | phani-1995/Week3-python_libraries | 720156098ccab5301a58e39a4dd7af5a19a08008 | 1347b8dfd4980b37471a54ce991c967fdcb32e2b | refs/heads/master | 2021-04-01T17:42:54.855954 | 2020-03-23T06:50:18 | 2020-03-23T06:50:18 | 248,204,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | import numpy as np
a=[10,20,30]
print("The original array is: ",a)
x=np.append(a, [40,50,60,70,80,90])
print("The new array is : ",x)
| [
"phanindrajallavaram@gmail.com"
] | phanindrajallavaram@gmail.com |
c580d300800cfbb77da8334c268c8ffd851841c6 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/msData/datatypes/Facets/NCName/NCName_enumeration004.py | 63150c8aeb21fe8810124ca42d4b34c1600604f2 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 267 | py | from output.models.ms_data.datatypes.facets.ncname.ncname_enumeration004_xsd.ncname_enumeration004 import FooTypeFoo
from output.models.ms_data.datatypes.facets.ncname.ncname_enumeration004_xsd.ncname_enumeration004 import Test
obj = Test(
foo=FooTypeFoo.FOO
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
d87578492b072196777372c73ee2c551e194668f | 6bcf8b136d45b53e75c0a6a75d8545188acb8190 | /sourmash_lib/fig.py | f07bef297252769c2b0b82042e37022426f2bab7 | [
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] | permissive | swamidass/sourmash | 2dab03a86842f868242c18f4b1b6307891eeb389 | fb7a6c1ac1a86ef4adc8b7385c664e947ed1b365 | refs/heads/master | 2021-01-21T06:30:18.211742 | 2017-03-01T05:01:57 | 2017-03-01T05:01:57 | 83,241,616 | 0 | 0 | NOASSERTION | 2022-08-20T14:57:30 | 2017-02-26T21:21:56 | Standard ML | UTF-8 | Python | false | false | 1,900 | py | #! /usr/bin/env python
"""
Make plots using the distance matrix+labels output by ``sourmash compare``.
"""
try:
import numpy
import scipy
import pylab
import scipy.cluster.hierarchy as sch
except (RuntimeError, ImportError):
pass
def load_matrix_and_labels(basefile):
"""Load the comparison matrix and associated labels.
Returns a square numpy matrix & list of labels.
"""
D = numpy.load(open(basefile, 'rb'))
labeltext = [x.strip() for x in open(basefile + '.labels.txt')]
return (D, labeltext)
def plot_composite_matrix(D, labeltext, show_labels=True, show_indices=True,
vmax=1.0, vmin=0.0):
"""Build a composite plot showing dendrogram + distance matrix/heatmap.
Returns a matplotlib figure."""
if show_labels:
show_indices = True
fig = pylab.figure(figsize=(11, 8))
ax1 = fig.add_axes([0.09, 0.1, 0.2, 0.6])
# plot denderogram
Y = sch.linkage(D, method='single') # centroid
dendrolabels = labeltext
if not show_labels:
dendrolabels = [str(i) for i in range(len(labeltext))]
Z1 = sch.dendrogram(Y, orientation='left', labels=dendrolabels,
no_labels=not show_indices)
ax1.set_xticks([])
xstart = 0.45
width = 0.45
if not show_labels:
xstart = 0.315
scale_xstart = xstart + width + 0.01
# plot matrix
axmatrix = fig.add_axes([xstart, 0.1, width, 0.6])
# (this reorders D by the clustering in Z1)
idx1 = Z1['leaves']
D = D[idx1, :]
D = D[:, idx1]
# show matrix
im = axmatrix.matshow(D, aspect='auto', origin='lower',
cmap=pylab.cm.YlGnBu, vmin=vmin, vmax=vmax)
axmatrix.set_xticks([])
axmatrix.set_yticks([])
# Plot colorbar.
axcolor = fig.add_axes([scale_xstart, 0.1, 0.02, 0.6])
pylab.colorbar(im, cax=axcolor)
return fig
| [
"titus@idyll.org"
] | titus@idyll.org |
4b9b537852edb55ab74e630264c5c984a186c436 | d0a84d97aaa8dcc2dff4a6b33ce98dee6d474496 | /com.CheckProofing/2020/Test_w_42_TV_Deals/test_w_42_TV_Deals_EPP_QLEDTV8K_url_segment.py | 8cf211361608f5a284cf6ec475af2d53c5196aa7 | [] | no_license | ahmed-test001/python | 21a27248c4571a13c0ed4dccab256aede1beea3a | eab59b9a54fae1a51fbc18c391599eb3b0e28b3d | refs/heads/master | 2023-03-10T21:00:54.634028 | 2021-02-27T05:31:58 | 2021-02-27T05:31:58 | 342,778,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,124 | py | import json
from urllib.parse import urlparse, parse_qs
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
import unittest
import sys
import os
import logging
sys.path.append(os.path.join(os.path.dirname(__file__),"."))
from Utility_Files import ReadConfig
from Utility_Files.HTMLTestRunner import stdout_redirector
logger = logging.getLogger(__name__)
out_hdlr = logging.StreamHandler(stdout_redirector)
out_hdlr.setFormatter(logging.Formatter('%(asctime)s%(levelname)s%(message)s'))
out_hdlr.setLevel(logging.INFO)
logger.addHandler(out_hdlr)
logger.setLevel(logging.INFO)
class URLSegment_W_42_TV_Deals_EPP_QLEDTV8K_Test(unittest.TestCase):
def test_UrlSegmentvalidation(self):
logger.info(': ' + self.test_UrlSegmentvalidation.__name__ + "\n ##### Starting TEST ##### ")
final_json = {}
final_array = []
bpid_array = []
final_bpid = {}
dir_name = "../OutputT/"
test = os.listdir(dir_name)
for item in test:
if item.endswith(".json"):
os.remove(os.path.join(dir_name, item))
with open('../TextFolder/TestIn_UniqueURL_List.txt')as f:
# urls = f.readline().split()
urls = f.read().splitlines()
for url in urls:
if ReadConfig.read_w42_TVDeals_configData('WEBLink', 'qled8k') in url:
try:
option = webdriver.ChromeOptions()
option.add_experimental_option('excludeSwitches', ['enable-logging'])
self.driver = webdriver.Chrome(executable_path='../Drivers/chromedriver_01.exe', options=option)
self.driver.maximize_window()
self.driver.get(url)
txt = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.XPATH, "(//div[@class ='device-label'])[2]"))).text
txt1 = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.XPATH, "//p[@class ='title']"))).text
self.driver.quit()
except:
try:
txt = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.XPATH, "(//div[@class='title'])[2]"))).text
if len(txt) == 0:
txt = "No Device Present"
txt1 = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.XPATH, "//p[@class ='title']"))).text
except:
txt = "No Device Present"
txt1 = "Title Promo not Available"
parsed_url = urlparse(url)
pair = parse_qs(parsed_url.query)
bpidValue = pair.get('bpid')
pair['tradeIn_ModelName'] = txt.split(',')
pair['preorder_text_banner'] = txt1.split(',')
pair['url_deep_link'] = url.split()
bpid_array.append(bpidValue)
final_array.append(pair)
self.driver.quit()
final_json['check_list'] = final_array
final_bpid['bpid_list'] = bpid_array
final_json = json.dumps(final_json, indent=4, sort_keys=False)
final_bpid = json.dumps(final_bpid, indent=4, sort_keys=False)
f = open("../OutputT/OutResult.json", "w")
f.write(final_json)
logger.info(": Printing URL Segment values:" + final_json)
f.close()
f = open("../OutputT/bpIdList.json", "w")
f.write(final_bpid)
logger.info(": Printing BPID:" + final_bpid)
f.close()
logger.info('#### TEST Complete ####')
def test_segment_validation(self):
logger.info(': ' + self.test_segment_validation.__name__ + "\n ##### Starting TEST ##### ")
with open('../OutputT/OutResult.json', 'r')as jsonfile:
readdata=json.load(jsonfile)
# if ReadConfig.read_w41_S20FE_configData('TVDataEPP', 'offerCID') in readdata['check_list'][0]['offerCID']:
# logger.info(": offerCID matched")
# else:
# logger.info(": offerCID NOT matched")
# if ReadConfig.read_w41_S20FE_configData('TVDataEPP', 'promoCode') in readdata['check_list'][0]['promoCode']:
# logger.info(": promoCode matched")
# else:
# logger.info(": promoCode NOT matched")
# if ReadConfig.read_w41_S20FE_configData('TVDataEPP', 'skipOffer') in readdata['check_list'][0]['skipOffer']:
# logger.info(": skipOffer matched")
# else:
# logger.info(": skipOffer NOT matched")
# if ReadConfig.read_w41_S20FE_configData('TVDataEPP', 'utm_source') in readdata['check_list'][0]['utm_source']:
# logger.info(": utm_source matched")
# else:
# logger.info(": utm_source NOT matched")
# if ReadConfig.read_w41_S20FE_configData('TVDataEPP', 'utm_medium') in readdata['check_list'][0]['utm_medium']:
# logger.info(": utm_medium matched")
# else:
# logger.info(": utm_medium NOT matched")
# if ReadConfig.read_w42_TVDeals_configData('TVDataEPP', 'utm_campaign') in readdata['check_list'][0]['utm_campaign']:
# logger.info(": utm_campaign matched")
# else:
# logger.info(": utm_campaign NOT matched")
# if ReadConfig.read_w42_TVDeals_configData('TVDataEPP', 'marsLinkCategory') in readdata['check_list'][0]['marsLinkCategory']:
# logger.info(": marsLinkCategory matched")
# else:
# logger.info(": marsLinkCategory NOT matched")
# if ReadConfig.read_w41_S20FE_configData('TVDataEPP', 'MKM_RID') in readdata['check_list'][0]['MKM_RID']:
# logger.info(": MKM_RID matched")
# else:
# logger.info(": MKM_RID NOT matched")
# if ReadConfig.read_w41_S20FE_configData('TVDataEPP', 'MKM_MID') in readdata['check_list'][0]['MKM_MID']:
# logger.info(": MKM_MID matched")
# else:
# logger.info(": MKM_MID NOT matched")
if ReadConfig.read_w42_TVDeals_configData('TVDataEPP', 'cid') in readdata['check_list'][0]['cid']:
logger.info(": cid matched")
else:
logger.info(": cid NOT matched")
if ReadConfig.read_w42_TVDeals_configData('TVDataEPP', 'bpid') in readdata['check_list'][0]['bpid']:
logger.info(": bpid matched")
else:
logger.info(": bpid NOT matched")
logger.info('#### TEST Complete ####')
if __name__ == '__main__':
unittest.main()
| [
"ahmedu.ferdous@gmail.com"
] | ahmedu.ferdous@gmail.com |
e06905c6d21fcca68df17ac398d343813c1d928f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_bustles.py | 9306899ef63702f12adbd82507903dfabaf4bc2a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.nouns._bustle import _BUSTLE
#calss header
class _BUSTLES(_BUSTLE, ):
def __init__(self,):
_BUSTLE.__init__(self)
self.name = "BUSTLES"
self.specie = 'nouns'
self.basic = "bustle"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
12a4375cf4891bb40aac4e72c72a86695597729b | 48460db1a6fdc6c09845c86cf5fa257f1a32f08a | /leetcode/medium/1041_Robot_Bounded_In_Circle.py | 6384889f67c7af592d411faf5355890bbe147bed | [] | no_license | MichalBrzozowski91/algorithms | 9d0b085621ed94b1aff5473663fbdc686463cd8d | ae57535b574a800c6300eae7d55b21f2432c3baa | refs/heads/master | 2022-12-20T08:00:59.385002 | 2020-09-30T16:32:33 | 2020-09-30T16:32:33 | 290,835,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | class Solution:
def isRobotBounded(self, instructions) -> bool:
# We calculate composition of all instructions
direction = 0 # 0: North, 1: West, 2: South, 3: East
movement = {0: [0,1],1: [-1,0],2: [0,-1],3: [1,0]}
position = [0,0]
for letter in instructions:
if letter == 'L':
direction = (direction - 1) % 4
elif letter == 'R':
direction = (direction + 1) % 4
elif letter == 'G':
position[0] += movement[direction][0]
position[1] += movement[direction][1]
if direction == 0 and position != [0,0]: # Robot moved but did not rotate
return False
else:
return True
| [
"noreply@github.com"
] | MichalBrzozowski91.noreply@github.com |
16eba7e91dddfc9fd58fd0bdfcb6fc0faaaa7bc4 | 89e6c3548fbdd06178aae712de1ff19004bc2faa | /my_django/contrib/staticfiles/utils.py | 5a69ad055a37f4d7a0a0c72ea6dd79f935e9a6a7 | [] | no_license | bhgv/ublog_git.hg.repo-django.python-engine | a3f3cdcbacc95ec98f022f9719d3b300dd6541d4 | 74cdae100bff5e8ab8fb9c3e8ba95623333c2d43 | refs/heads/master | 2020-03-23T01:04:07.431749 | 2018-07-25T12:59:21 | 2018-07-25T12:59:21 | 140,899,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,979 | py | import os
import fnmatch
from my_django.conf import settings
from my_django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the staticfiles settings have sane values.
"""
if base_url is None:
base_url = settings.STATIC_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the required STATIC_URL setting.")
if settings.MEDIA_URL == base_url:
raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL "
"settings must have different values")
if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and
(settings.MEDIA_ROOT == settings.STATIC_ROOT)):
raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT "
"settings must have different values")
| [
"bhgv.empire@gmail.com"
] | bhgv.empire@gmail.com |
cdd92af8c583df98a026b684150ffdee3db66b54 | c56ee3cf2a97ae7fc043bd90e26ad5e34b87328f | /.venv/Lib/site-packages/pip/_internal/commands/wheel.py | c36acd229ba85ef1b3b4f6b228713e369f56bb42 | [
"MIT"
] | permissive | LuckJMG/ImprovedReplace | a88cab845ab894e3e8cb9591bc4e5611b43d403e | e59ad89c43f901d409215353a7403781fb689c7e | refs/heads/main | 2023-02-27T07:40:26.746185 | 2021-02-02T03:04:18 | 2021-02-02T03:04:18 | 235,675,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,377 | py | # -*- coding: utf-8 -*-
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os
import shutil
from pip._internal.cache import WheelCache
from pip._internal.cli import cmdoptions
from pip._internal.cli.req_command import RequirementCommand, with_cleanup
from pip._internal.exceptions import CommandError
from pip._internal.req.req_tracker import get_requirement_tracker
from pip._internal.utils.misc import ensure_dir, normalize_path
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.wheel_builder import build, should_build_for_wheel_command
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import Any, List
logger = logging.getLogger(__name__)
class WheelCommand(RequirementCommand):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not
recompiling your software during every install. For more details, see the
wheel docs: https://wheel.readthedocs.io/en/latest/
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel
package to build individual wheels.
"""
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
def __init__(self, *args, **kw):
super(WheelCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
"-w",
"--wheel-dir",
dest="wheel_dir",
metavar="dir",
default=os.curdir,
help=(
"Build wheels into <dir>, where the default is the "
"current working directory."
),
)
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(cmdoptions.prefer_binary())
cmd_opts.add_option(
"--build-option",
dest="build_options",
metavar="options",
action="append",
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.",
)
cmd_opts.add_option(cmdoptions.no_build_isolation())
cmd_opts.add_option(cmdoptions.use_pep517())
cmd_opts.add_option(cmdoptions.no_use_pep517())
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(cmdoptions.ignore_requires_python())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(cmdoptions.progress_bar())
cmd_opts.add_option(
"--global-option",
dest="global_options",
action="append",
metavar="options",
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.",
)
cmd_opts.add_option(
"--pre",
action="store_true",
default=False,
help=(
"Include pre-release and development versions. By default, "
"pip only finds stable versions."
),
)
cmd_opts.add_option(cmdoptions.require_hashes())
index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser,)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
@with_cleanup
def run(self, options, args):
# type: (Values, List[Any]) -> None
cmdoptions.check_install_build_global(options)
session = self.get_default_session(options)
finder = self._build_package_finder(options, session)
build_delete = not (options.no_clean or options.build_dir)
wheel_cache = WheelCache(options.cache_dir, options.format_control)
options.wheel_dir = normalize_path(options.wheel_dir)
ensure_dir(options.wheel_dir)
req_tracker = self.enter_context(get_requirement_tracker())
directory = TempDirectory(
options.build_dir, delete=build_delete, kind="wheel", globally_managed=True,
)
reqs = self.get_requirements(args, options, finder, session)
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
req_tracker=req_tracker,
session=session,
finder=finder,
wheel_download_dir=options.wheel_dir,
use_user_site=False,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
options=options,
wheel_cache=wheel_cache,
ignore_requires_python=options.ignore_requires_python,
use_pep517=options.use_pep517,
)
self.trace_basic_info(finder)
requirement_set = resolver.resolve(reqs, check_supported_wheels=True)
reqs_to_build = [
r
for r in requirement_set.requirements.values()
if should_build_for_wheel_command(r)
]
# build wheels
build_successes, build_failures = build(
reqs_to_build,
wheel_cache=wheel_cache,
build_options=options.build_options or [],
global_options=options.global_options or [],
)
for req in build_successes:
assert req.link and req.link.is_wheel
assert req.local_file_path
# copy from cache to target directory
try:
shutil.copy(req.local_file_path, options.wheel_dir)
except OSError as e:
logger.warning(
"Building wheel for %s failed: %s", req.name, e,
)
build_failures.append(req)
if len(build_failures) != 0:
raise CommandError("Failed to build one or more wheels")
| [
"lucas.mosquera13@gmail.com"
] | lucas.mosquera13@gmail.com |
b80e40afc308d64a20780da4e4481dcccaa2cb8a | f36856f1fe47f66d7181d4bc026bfb6fc9a215e2 | /code/train.py | 723b3bac4a2c9802ecf1f3c0294dc2934b62cfb6 | [] | no_license | TrendingTechnology/ROS | 5776bc7faa419c74164703d486092dc4ac9a7bce | bceef4d9dc505f55322a4c25fb8071f49e7a5671 | refs/heads/master | 2023-01-10T15:39:48.464872 | 2020-08-07T13:48:54 | 2020-08-07T13:48:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,169 | py | import sys
import argparse
from steps_separation_adaptation import Trainer
import numpy as np
import torch
import os
def get_args():
parser = argparse.ArgumentParser(description="Script to launch training",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#domains
parser.add_argument("--source", help="Source")
parser.add_argument("--target", help="Target")
parser.add_argument("--batch_size", type=int, default=32, help="Batch size")
parser.add_argument("--learning_rate", type=float, default=0.003, help="Learning rate")
parser.add_argument("--divison_learning_rate_backbone", type=float, default=10.0, help="Scaling factor of the learning rate used for the part pf the backbone not freezed")
#epochs step1 and step2
parser.add_argument("--epochs_step1", type=int, default=80, help="Epochs of step1")
parser.add_argument("--epochs_step2", type=int, default=80,help="Epochs of step2")
#number of classes: known, unknown and the classes of self-sup task
parser.add_argument("--n_classes", type=int, default=25, help="Number of classes of source domain -- known classes")
parser.add_argument("--n_classes_target", type=int, default=65,help="Number of classes of target domain -- known+unknown classes")
parser.add_argument("--ss_classes", "-rc", type=int, default=4, help="Number of classes for the self-supervised task")
#weights used during training
parser.add_argument("--ss_weight_source", type=float, default=3.0, help="Weight of the source domain for the ss task (it acts in step1)")
parser.add_argument("--ss_weight_target", type=float, default=3.0, help="Weight of the target domain for the ss task (it acts in step2)")
parser.add_argument("--cls_weight_source", type=float, default=1.0, help="Weight for the cls task (it acts in step1 and step2)")
parser.add_argument("--entropy_weight", type=float, default=0.1, help="Weight for the ss task (it acts in step2)")
parser.add_argument("--weight_center_loss", type=float, default=0.0, help="Weight of the center loss for the ss task (it acts in step1)")
parser.add_argument("--weight_class_unknown", type=float, default=1.0, help="Power of learning of the unknown class (it acts in step2)")
#path of the folders used
parser.add_argument("--folder_dataset",default=None, help="Path to the dataset")
parser.add_argument("--folder_txt_files", default='/.../ROS/data/',help="Path to the txt files of the dataset")
parser.add_argument("--folder_txt_files_saving", default='/.../ROS/data/',help="Path where to save the new txt files")
parser.add_argument("--folder_log", default=None, help="Path of the log folder")
#to select gpu/num of workers
parser.add_argument("--gpu", type=int, default=0, help="gpu chosen for the training")
parser.add_argument("--n_workers", type=int, default=4, help="num of worker used")
parser.add_argument("--use_VGG", action='store_true', default=False, help="If use VGG")
parser.add_argument("--use_weight_net_first_part", action='store_true', default=False, help="If use the weight computed in the step1 for step2")
parser.add_argument("--only_4_rotations", action='store_true', default=False,help="If not use rotation for class")
return parser.parse_args()
args = get_args()
orig_stdout = sys.stdout
rand = np.random.randint(200000)
words = args.folder_txt_files.split('/ROS/')
args.folder_log = words[0]+'/'+'ROS/outputs/logs/' + str(rand)
args.folder_name = words[0]+'/'+'ROS/outputs/' + str(rand)
args.folder_txt_files_saving = args.folder_txt_files + str(rand)
gpu = str(args.gpu)
device = torch.device("cuda:"+gpu)
if not os.path.exists(args.folder_name):
os.makedirs(args.folder_name)
print('\n')
print('TRAIN START!')
print('\n')
print('THE OUTPUT IS SAVED IN A TXT FILE HERE -------------------------------------------> ', args.folder_name)
print('\n')
f = open(args.folder_name + '/out.txt', 'w')
sys.stdout = f
print("\n%s to %s - %d ss classes" % (args.source, args.target, args.ss_classes))
trainer = Trainer(args, device, rand)
trainer._do_train()
print(args)
sys.stdout = orig_stdout
f.close()
| [
"noreply@github.com"
] | TrendingTechnology.noreply@github.com |
d49ee13b8846f6305e8ae4ead7f5b99135202e48 | b1b77bb1ed47586f96d8f2554a65bcbd0c7162cc | /SPOTIFY/crtauth/crtauth/ldap_key_provider.py | d89c6427c32275e73dd6181b9b384991f5fb2e4a | [
"Apache-2.0"
] | permissive | DanHefrman/stuff | b3624d7089909972ee806211666374a261c02d08 | b98a5c80cfe7041d8908dcfd4230cf065c17f3f6 | refs/heads/master | 2023-07-10T09:47:04.780112 | 2021-08-13T09:55:17 | 2021-08-13T09:55:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,057 | py | # Copyright (c) 2011-2017 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from crtauth import exceptions, rsa, key_provider
import ldap
from ldap import filter
class LDAPKeyProvider(key_provider.KeyProvider):
"""
Provides a PubKey instance based on a lookup in an LDAP directory.
User entries are expected to be of class posixAccount living directly under
base_dn in the directory information tree, have an uid matching the
username parameter and one sshPublicKey string representation
of the ssh public key of the user.
Group entries are expected to be of class posixGroup and be located under
cn=groups under the base_dn in the directory information tree. The group
string parameter corresponds to the cn attribute of the posixGroup entry
"""
def __init__(self, uri, auth_user, auth_password, base_dn, group=None):
"""
Constructs and binds an LDAPKeyProvider instance to the server
identified by the uri using auth_user and auth_password for
authentication.
When users are looked up, it is verified that they belong to the
provided group.
"""
self.group = None
if group:
self.group = filter.escape_filter_chars(group)
self.base_dn = base_dn
# I know, this is not functionality the ldap module straightforwardly
# exposes, but it seems to work.
self.conn = ldap.ldapobject.ReconnectLDAPObject(uri)
self.conn.simple_bind(auth_user, auth_password)
def get_key(self, username):
"""
Returns a PubKey instance based on LDAP lookup. If group is specified
in the constructor, the user needs to be a member for the lookup to
succeed.
Throws NoSuchUserException, InsufficientPrivilegesException,
MissingKeyException when appropriate.
"""
user = filter.escape_filter_chars(username)
f = ("(|(&(uid=%s)(objectClass=posixAccount))"
"(&(memberUid=%s)(objectClass=posixGroup)))" % (user, user))
# We don't care about looking for a group if self.group is not set
group_dn = None
if self.group:
group_dn = "cn=%s,cn=groups,%s" % (self.group, self.base_dn)
result = dict(self.conn.search_s(self.base_dn, ldap.SCOPE_SUBTREE, f,
['sshPublicKey']))
attributes = result.get("uid=%s,cn=users,%s" % (user, self.base_dn))
if attributes is None:
raise exceptions.NoSuchUserException("User '%s' not found" % user)
key_list = attributes.get("sshPublicKey")
if key_list is None:
raise exceptions.MissingKeyException("User '%s' does not have "
"her key in LDAP" % user)
if len(key_list) > 1:
raise RuntimeError("Can't handle multiple sshPublicKey values "
"for an LDAP user")
if group_dn and group_dn not in result:
s = ("User '%s' not member of required group '%s'" %
(user, self.group))
raise exceptions.InsufficientPrivilegesException(s)
return rsa.RSAPublicKey(key_list[0])
class HybridKeyProvider(key_provider.KeyProvider):
"""
A KeyProvider that behaves as an LDAP KeyProvider if there is no ldap data
it falls back to a FileKeyProvider.
Useful for non mixing real ldap users with service-specific non-human
users.
"""
def __init__(self, dir, uri, auth_user, auth_password, base_dn, group=None):
"""
Constructs a FileKeyProvider based on the directory dir, and a
LDAPKeyProvider based on the remaining arguments.
"""
self.file_key_provider = key_provider.FileKeyProvider(dir)
self.ldap_key_provider = LDAPKeyProvider(uri, auth_user, auth_password,
base_dn, group)
def get_key(self, username):
"""
Returns the user's public key if it can be found in LDAP, otherwise
tries to find it in the key directory, or fails.
"""
try:
return self.ldap_key_provider.get_key(username)
except exceptions.NoSuchUserException:
try:
return self.file_key_provider.get_key(username)
except Exception, e:
raise exceptions.NoSuchUserException(
"User %s not in ldap, defaulted to pubkey dir and got "
"exception %s" % (username, e))
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
f65d9423059940465bbccc546b180b8afb0b29bf | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02730/s566992327.py | fed7821775b5c54933a78f29905ba5d357c3433d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | s = input()
n = len(s)
# 全体が回文かどうか
if s != s[::-1]:
print('No')
# 1文字目から(N-1)/2文字目までからなる文字列が回文かどうか
elif s[:(n-1)//2] != s[:(n-1)//2][::-1]:
print('No')
# Sの(N+3)/2文字目からN文字目までからなる文字列が回文かどうか
elif s[(n+3)//2-1:] != s[(n+3)//2-1:][::-1]:
print('No')
else:
print('Yes') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
fe41008bb2ab32968ed7dc245a83ccd70bb5c1db | cd0a284c47fb03121e05284b6d5f2940ea6457ba | /fb/dfs-bfs/207-course-schedule.py | 7b8325d06c0f32426434b8845cabd8b291aed9f0 | [] | no_license | franktank/py-practice | 5803933c07c07a06670f83b059806385d0d029fa | 1dec441f1975d402d093031569cfd301eb71d465 | refs/heads/master | 2021-03-22T04:33:20.818891 | 2017-11-14T03:40:54 | 2017-11-14T03:40:54 | 101,592,046 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,031 | py | """
There are a total of n courses you have to take, labeled from 0 to n - 1.
Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?
For example:
2, [[1,0]]
There are a total of 2 courses to take. To take course 1 you should have finished course 0. So it is possible.
2, [[1,0],[0,1]]
There are a total of 2 courses to take. To take course 1 you should have finished course 0, and to take course 0 you should also have finished course 1. So it is impossible.
Note:
The input prerequisites is a graph represented by a list of edges, not adjacency matrices. Read more about how a graph is represented.
You may assume that there are no duplicate edges in the input prerequisites.
"""
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
# How to handle loops?
# To take a course, we must have its prerequisite done
# Brute Force
# For each course we see, try to find if its preqrequisite can be done
# Iterate through rest of prerequisites:
# If it is NOT in preqrequisites
# The preqrequisite can be finished, and then numCourses -= 1
# If it is in prerequisites, repeat and make it None in prerequisites if it can be done, and then numCourses -= 1
for pr in prerequisites:
if pr == None:
continue
take = self.helper(pr, prerequisites)
if take:
numCourses -= 1
if numCourses < 0:
return False
else:
return True
def helper(self, pr, prerequisites):
"""
:rtype: bool
"""
for pr in prerequisites
| [
"fliangz96@gmail.com"
] | fliangz96@gmail.com |
bbf45a6ca2e4e02d5dc77888323c239edcc5f744 | fce5eda4745578557f7120104188c2437529b98f | /listas_tipos/dicionario/muitos_usuarios.py | 1558b8ec7959ad7e558fc063ca69369b76826a28 | [] | no_license | weguri/python | 70e61584e8072125a4b4c57e73284ee4eb10f33b | d5195f82428104d85b0e6215b75e31ee260e5370 | refs/heads/master | 2022-12-01T08:26:36.248787 | 2020-08-23T03:30:46 | 2020-08-23T03:30:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | """
Dicionário em um dicionário
Podemos aninhar um dicionário em outro dicionário
"""
usuarios = {
'alberteinstein': {
'nome': 'albert',
'sobrenome': 'einstein',
'pais': 'alemanha'
},
'stephenhawking': {
'nome': 'stephen',
'sobrenome': 'hawking',
'pais': 'reino unido'
}
}
for username, info_user in usuarios.items():
print("\nUsuario:", username)
nome_completo = info_user['nome'] + " " + info_user['sobrenome']
localizacao = info_user['pais']
print("\tNome:", nome_completo.title())
print("\tPais:", localizacao.title())
| [
"welguri@gmail.com"
] | welguri@gmail.com |
a7569d7922515885e8dd93423d4e71b1c36dbd34 | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/NCSCCyberAsssessmentFramework/Scripts/EntryWidgetNCSCResultsD/EntryWidgetNCSCResultsD.py | 173f34e55128e5321c9d1b28478b0aade2266374 | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 1,005 | py | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
incident = demisto.incidents()
cafdresult = incident[0].get("CustomFields", {}).get("cafdresultraw", {})
if type(cafdresult) != dict:
cafdresult = json.loads(cafdresult)
total = len(cafdresult)
non_compliant_count = (
len([x for x in cafdresult if x["Result"] != "Achieved"]) if cafdresult else None
)
medium = int(round(total / 3, 0))
high = int(round(total / 3 * 2, 0))
data = {
"Type": 17,
"ContentsFormat": "number",
"Contents": {
"stats": non_compliant_count,
"params": {
"layout": "horizontal",
"name": "Unachieved items",
"sign": "",
"colors": {
"items": {
"#00CD33": {"value": -1},
"#FF9000": {"value": medium},
"#FF1744": {"value": high},
}
},
"type": "above",
},
},
}
demisto.results(data)
| [
"noreply@github.com"
] | demisto.noreply@github.com |
22ff77002f20b413ed20ffcd59834f91fd70bff6 | 3aa334fe55b2d618726395167cd75dd37ae9ec27 | /testapp/management/commands/bootstrap.py | 08950d431e442356eac5db34e6b440b6ef3ea179 | [
"BSD-3-Clause"
] | permissive | nishitchittora/django-saml-sp | cd143539c83258d6fd62059f8efb89323b91e282 | 879036f1f90febdf4eed1ef74b2288074736fbec | refs/heads/master | 2023-05-29T03:28:37.768564 | 2021-03-27T02:43:46 | 2021-03-27T02:43:46 | 372,801,911 | 0 | 0 | BSD-3-Clause | 2021-06-01T11:15:20 | 2021-06-01T11:15:19 | null | UTF-8 | Python | false | false | 1,872 | py | from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from sp.models import IdP
class Command(BaseCommand):
help = 'Bootstraps the SP with a default "admin" user and a local test IdP.'
def handle(self, *args, **options):
User = get_user_model()
if User.objects.count() == 0:
print(
'Creating default "admin" account with password "letmein" '
"-- change this immediately!"
)
User.objects.create_superuser(
"admin",
"admin@example.com",
"letmein",
first_name="Admin",
last_name="User",
)
if IdP.objects.count() == 0:
print('Creating "local" IdP for http://localhost:8000')
idp = IdP.objects.create(
name="Local SimpleSAML Provider",
url_params={"idp_slug": "local"},
base_url="http://localhost:8000",
contact_name="Admin User",
contact_email="admin@example.com",
metadata_url="http://localhost:8080/simplesaml/saml2/idp/metadata.php",
respect_expiration=True,
logout_triggers_slo=True,
)
idp.generate_certificate()
# The local IdP sends an email address, but it isn't the nameid. Override it
# to be our nameid, AND set the email field on User.
idp.attributes.create(
saml_attribute="email", mapped_name="email", is_nameid=True
)
try:
idp.import_metadata()
except Exception:
print(
"Could not import IdP metadata; "
"make sure your local IdP exposes {}".format(idp.metadata_url)
)
| [
"dcwatson@gmail.com"
] | dcwatson@gmail.com |
263e182dab5e386359c971120c177a398de757ba | d6d874fe9e1607a859e9484fdc5bce09b3f76472 | /Pipeline/the_LATEST/latest_MAYA/maya_SCRIPTS/pickrunner - Copy (3)/controller/engine_0001.py | 9fbf2e97230181255c7175805dee4d28ab0d535b | [] | no_license | tws0002/pop2-project | c80095cc333195ebb9ffa2199e2c3a3446d0df0c | 6886f05d54ec77b66d13b4eaafe8a66ac49f2f41 | refs/heads/master | 2021-01-11T20:53:19.982950 | 2016-03-10T10:31:29 | 2016-03-10T10:31:29 | 79,202,989 | 1 | 1 | null | 2017-01-17T07:56:09 | 2017-01-17T07:56:09 | null | UTF-8 | Python | false | false | 4,399 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
An enhanced, lightwight, user-defined hierarchy between nodes in Maya
"""
# IMPORT STANDARD LIBRARIES
import os
import sys
sys.path.append("F:\transfer\to_monty\the_LATEST\latest_MAYA\maya_SCRIPTS\pickrunner")
# sys.path.append("F:\transfer\to_monty\the_LATEST\sys_PY\py_MODULES")
# IMPORT THIRD-PARTY LIBRARIES
try:
import PyQt4.QtCore as QtCore
import PyQt4.QtGui as QtGui
except ImportError:
import PySide.QtCore as QtCore
import PySide.QtGui as QtGui
except:
raise
# IMPORT LOCAL LIBRARIES
import controller.membership as membership
import controller.engine as engine
import view.basegui as basegui
class Window(QtGui.QMainWindow, basegui.Ui_MainWindow):
def __init__(self):
super(Window, self).__init__()
self.setupUi(self)
self.init_defaults()
self.init_ui()
self.triggers()
# end __init__
def init_ui(self):
"""
Inits the default behavior of the GUI
"""
# diectional buttons
self.up_pb.setStyleSheet(self.buttonStyle)
self.left_pb.setStyleSheet(self.buttonStyle)
self.down_pb.setStyleSheet(self.buttonStyle)
self.right_pb.setStyleSheet(self.buttonStyle)
# lower lineEdits
self.up_le.setEnabled(False)
self.left_le.setEnabled(False)
self.down_le.setEnabled(False)
self.right_le.setEnabled(False)
# end init_ui
def init_defaults(self):
self.db = {}
self.assignDirections = False
self.loadedNode = None
self.buttonStyle = "background-color: #e6ffff; border: 1px solid black; padding: 4px;"
self.buttonStyleOn = "background-color: #ccffcc;"
self.buttonAssignOff = "background-color: #ffcccc;"
self.buttonError = "background-color: red;"
self.motor = engine.MotorMaya()
# end init_defaults
def triggers(self):
"""
Creates the interactivity functionality of the GUI
"""
self.assignLayout_pb.clicked.connect(self.toggle_pickwalk_directions)
addAttrLeft = functools.partial(engine.MotorMaya, direction="left")
self.left_pb.clicked.connect(self.add_attr)
QtGui.QShortcut(QtGui.QKeySequence("Ctrl+Q"), self, self.reject)
# end triggers
def reject(self):
"""
Executes when the user wants to close the current window class
"""
sys.exit()
# end reject
def toggle_pickwalk_directions(self):
"""
Turns on/off the ability to add buttons to the current node loaded
"""
if not self.assignDirections:
# enable the buttons
self.assignDirections = True
self.assignLayout_pb.setStyleSheet(self.buttonStyleOn)
self.up_pb.setStyleSheet(self.buttonStyleOn)
self.left_pb.setStyleSheet(self.buttonStyleOn)
self.down_pb.setStyleSheet(self.buttonStyleOn)
self.right_pb.setStyleSheet(self.buttonStyleOn)
currentSelection = self.motor.get_selection()
if currentSelection == 0:
self.loadSelection_pb.setStyleSheet(self.buttonError)
self.loadedNode = None
else:
self.loadedNode = currentSelection[-1]
else:
# disable the buttons
self.assignDirections = False
self.assignLayout_pb.setStyleSheet(self.buttonAssignOff)
self.up_pb.setStyleSheet(self.buttonStyle)
self.left_pb.setStyleSheet(self.buttonStyle)
self.down_pb.setStyleSheet(self.buttonStyle)
self.right_pb.setStyleSheet(self.buttonStyle)
self.loadSelection_pb.setStyleSheet(self.buttonStyle)
# end toggle_pickwalk_directions
# end Windows
def show_gui():
"""
Shows the main GUI to the application
"""
app = QtGui.QApplication.activeWindow()
if app is None:
app = QtGui.QApplication.instance()
if app is None:
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
# end show_gui
def main():
show_gui()
# end main
if __name__ == "__main__":
main() | [
"colinvfx@gmail.com"
] | colinvfx@gmail.com |
548d7e455a7fbe253bb7e27b83098b05012f1446 | 402537ee248b91a127772f7ce00a4b2f93fe1d06 | /chapter10/src/src/allocation/domain/commands.py | cb656444bf546659c1bd27074c8bdb424ac020c7 | [] | no_license | babjo/architecture-patterns-with-python | 56ac7b1801cf658fc912ffa7b22398d015d8ee8f | 705a68b34b2c11e2eb18b11444819f964ab6fce9 | refs/heads/master | 2023-07-14T23:01:51.246828 | 2021-08-25T13:25:06 | 2021-08-25T13:25:06 | 394,334,509 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | from typing import Optional
from dataclasses import dataclass
from datetime import date
class Command:
pass
@dataclass
class Allocate(Command):
orderid: str
sku: str
qty: int
@dataclass
class CreateBatch(Command):
ref: str
sku: str
qty: int
eta: Optional[date] = None
@dataclass
class ChangeBatchQuantity(Command):
ref: str
qty: int
| [
"kd980311@naver.com"
] | kd980311@naver.com |
302247df466f9f53facf141a2738300d61ef8a04 | 584ce08fd638b2481e61b00da22ae70290cb0e2d | /main/forms.py | e54ec2919a4ddecb88435e72ec1d36138d69f316 | [] | no_license | CHIRAG202/Tutoria-Project | dedef5581ea72f47be5965f5c783a7176aa17fb2 | 133520fbd8b4154d3b1b777a13e179dfd062c438 | refs/heads/master | 2021-08-23T02:10:20.378763 | 2017-12-02T11:42:28 | 2017-12-02T11:42:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | from django import forms
from django.contrib.auth.models import User
from main.models import Student, Sessions, Tutor
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta():
model = User
fields = ('username', 'password', 'email')
# remove email
class StudentInfoForm(forms.ModelForm):
class Meta():
model = Student
fields = ('firstName', 'lastName', 'avatar', 'phoneNo')
class TutorInfoForm(forms.ModelForm):
class Meta():
model = Tutor
fields = ('firstName', 'lastName', 'courses',
'university_name', 'hourly_rate', 'tutor_intro', 'isStudent','phoneNo', 'searchTags', 'avatar', 'tutorType')
class BookingForm(forms.ModelForm):
class Meta():
model = Sessions
fields = ('tutorID', 'studentID', 'bookedDate', 'bookedStartTime', 'bookedEndTime')
| [
"rohaksinghal14@gmail.com"
] | rohaksinghal14@gmail.com |
7e1027a626b96efa1718ab2aad192f93f8db7e12 | 8981fd540c4857edbaf4162e9ca08e86c5625b80 | /capital.py | 792693b711ad1a9c52b0f3f748a65d7a48a526fb | [] | no_license | DylanQiu/CheckioProject | c37a149795b076665b6b05ff1b2d4af9f701c840 | 386fd5aee694ddc7efe7dab1aa1a1f4610a0fb0b | refs/heads/master | 2021-07-19T13:15:07.708474 | 2017-10-24T04:57:13 | 2017-10-24T04:57:13 | 104,953,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | def find_message(text):
"""Find a secret message"""
uppers = [l for l in text if l.isupper()]
s = ''.join(uppers)
return s
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert find_message("How are you? Eh, ok. Low or Lower? Ohhh.") == "HELLO", "hello"
assert find_message("hello world!") == "", "Nothing"
assert find_message("HELLO WORLD!!!") == "HELLOWORLD", "Capitals"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!")
| [
"32313210+DylanQiu@users.noreply.github.com"
] | 32313210+DylanQiu@users.noreply.github.com |
3185c772f0e736ae4fc5a2c5fa54f50793bfac2a | ef14d37fc87a191b36b5b70c39b02b0d193f9fe0 | /futuregreen/people/urls.py | 5cef8b0730140b238ffbcfa32a611ae77ea5c14b | [] | no_license | dmeehan/futuregreen | 9f608b69255761011a525e349fb583669e8dacaa | 835c455503a75658d8b744df643158ac6575b737 | refs/heads/master | 2020-12-19T01:19:08.975895 | 2017-06-21T13:21:03 | 2017-06-21T13:21:03 | 2,144,649 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | # people/urls.py
from django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic import ListView, DetailView, TemplateView
from futuregreen.people.views import EmployeeDetailView, EmployeeListView
urlpatterns = patterns('',
url(r'^$', EmployeeListView.as_view(), name = 'person_list'),
url(r'^(?P<slug>[-\w]+)/$', EmployeeDetailView.as_view(), name = 'people_person_detail'),
) | [
"dmeehan@gmail.com"
] | dmeehan@gmail.com |
2751b3b5e76bad7aaf1ba7884f8ec09cb869c56d | ecff4b18a49ce5952c5f9125dc027cebdecf10a8 | /azure-mgmt-logic/azure/mgmt/logic/models/workflow_secret_keys.py | ab2f480de04cdc5157f1b5b10bfb7d0ed53826c9 | [
"Apache-2.0"
] | permissive | jehine-MSFT/azure-sdk-for-python | a56c18020ecd5f4c245c093fd6a33e1b1d7c95e1 | 6d0f94b39406eab374906c683bd2150217132a9c | refs/heads/master | 2020-12-06T19:17:38.153819 | 2016-04-08T21:03:16 | 2016-04-08T21:03:16 | 55,809,131 | 0 | 0 | null | 2016-04-08T20:54:00 | 2016-04-08T20:54:00 | null | UTF-8 | Python | false | false | 1,608 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WorkflowSecretKeys(Model):
"""WorkflowSecretKeys
:param primary_secret_key: Gets the primary secret key.
:type primary_secret_key: str
:param secondary_secret_key: Gets the secondary secret key.
:type secondary_secret_key: str
"""
_attribute_map = {
'primary_secret_key': {'key': 'primarySecretKey', 'type': 'str'},
'secondary_secret_key': {'key': 'secondarySecretKey', 'type': 'str'},
}
def __init__(self, primary_secret_key=None, secondary_secret_key=None, **kwargs):
self.primary_secret_key = primary_secret_key
self.secondary_secret_key = secondary_secret_key
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
38bdd4206acfc59ea9b3adf745d3103f4eed2c66 | b3d4198406ec727b29eb3429433aa3eec0c80ead | /CBF/membership/admin.py | dd61fd70bef33ea626b5e552b20d0d4d0d3be629 | [] | no_license | aqt01/CBF | 7c9148aa1e5eed9524082cecef74f9571e1f5889 | 4769b11d26dad1a1dfff718e042f78564b13f671 | refs/heads/master | 2020-06-12T18:00:26.528230 | 2018-03-22T00:34:25 | 2018-03-22T00:34:25 | 75,782,721 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from django.contrib import admin
from .models import SocialMedia, Member
class SocialMediaInLine(admin.TabularInline):
model = SocialMedia
extra = 1
class MemberAdmin(admin.ModelAdmin):
inlines = [
SocialMediaInLine,
]
list_display = ['name', 'email', 'role_description']
admin.site.register(Member, MemberAdmin)
admin.site.register(SocialMedia)
| [
"lowell.abbott@gmail.com"
] | lowell.abbott@gmail.com |
c32b16b42a4384da77bdc4d6e8b0b0fe32ef1331 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/support/azext_support/_utils.py | 9429828d972a0884f8f6269b4854ae1ae5c694b5 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 2,174 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import re
from azure.cli.core._profile import Profile
from azure.cli.core.azclierror import UnauthorizedError
from knack.util import CLIError
from knack.log import get_logger
logger = get_logger(__name__)
def is_billing_ticket(service_name):
return "517f2da6-78fd-0498-4e22-ad26996b1dfc" in service_name
def is_quota_ticket(service_name):
return "06bfd9d3-516b-d5c6-5802-169c800dec89" in service_name
def is_subscription_mgmt_ticket(service_name):
return "f3dc5421-79ef-1efa-41a5-42bf3cbb52c6" in service_name
def is_technical_ticket(service_name):
return (not is_billing_ticket(service_name)) and \
(not is_quota_ticket(service_name)) and \
(not is_subscription_mgmt_ticket(service_name))
def parse_support_area_path(problem_classification_id):
service_id_prefix = "/providers/Microsoft.Support/services/".lower()
guid_regex = "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
sap_regex = re.compile('^{0}({1})/problemclassifications/({1})$'.format(service_id_prefix, guid_regex))
match = sap_regex.search(problem_classification_id.lower())
if match is not None and len(match.groups()) == 2:
return {"service_name": match.group(1), "problem_classifications_name": match.group(2)}
return None
def get_bearer_token(cmd, tenant_id):
client = Profile(cli_ctx=cmd.cli_ctx)
try:
logger.debug("Retrieving access token for tenant %s", tenant_id)
creds, _, _ = client.get_raw_token(tenant=tenant_id)
except CLIError as unauthorized_error:
raise UnauthorizedError("Can't find authorization for {0}. ".format(tenant_id) +
"Run \'az login -t <tenant_name> --allow-no-subscriptions\' and try again.") from \
unauthorized_error
return "Bearer " + creds[1]
| [
"noreply@github.com"
] | Azure.noreply@github.com |
04abc664f35460d59c8be0e6ce737af05ee1140d | d7b9b490c954c7a9160b69f8ce2c907ef4681ecb | /sponsors/migrations/0017_sponsorbenefit_added_by_user.py | f304cd76bece2385330c80321b7225cdc2430663 | [
"Apache-2.0"
] | permissive | python/pythondotorg | 00db93a4b1789a4d438806d106d9cee3349ad78c | c4ee749942227ca75c8e670546afe67232d647b2 | refs/heads/main | 2023-08-28T20:04:24.735314 | 2023-08-03T19:12:29 | 2023-08-03T19:12:29 | 6,127,047 | 1,131 | 646 | Apache-2.0 | 2023-08-24T15:57:04 | 2012-10-08T16:00:15 | Python | UTF-8 | Python | false | false | 435 | py | # Generated by Django 2.0.13 on 2020-11-20 14:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sponsors", "0016_auto_20201119_1448"),
]
operations = [
migrations.AddField(
model_name="sponsorbenefit",
name="added_by_user",
field=models.BooleanField(default=False, verbose_name="Added by user?"),
),
]
| [
"noreply@github.com"
] | python.noreply@github.com |
cd6217740f6dc93ee83304e31a2062ebd5bf0370 | 9499922b6d2e2652a5beccafdb57ea35e7f58970 | /templates/openwisp2/urls.py | aa3a020b690d1659b6c1439f03afdb449ac142dc | [
"BSD-3-Clause"
] | permissive | stepura/ansible-openwisp2 | 2d49fe3804df0427cf8006e4346acc7e889d52ce | 1c11882bed03e4f11be15b4d0395c8e9bd30492e | refs/heads/master | 2020-05-21T14:41:34.562597 | 2017-03-10T15:34:50 | 2017-03-10T15:34:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | from django.conf.urls import include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django_netjsonconfig.admin_theme.admin import admin, openwisp_admin
openwisp_admin()
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
# controller URLs
# used by devices to download/update their configuration
# keep the namespace argument unchanged
url(r'^', include('django_netjsonconfig.controller.urls', namespace='controller')),
# common URLs
# shared among django-netjsonconfig components
# keep the namespace argument unchanged
url(r'^', include('django_netjsonconfig.urls', namespace='netjsonconfig')),
# django-x509 urls
# keep the namespace argument unchanged
url(r'^', include('django_x509.urls', namespace='x509')),
]
urlpatterns += staticfiles_urlpatterns()
| [
"nemesis@ninux.org"
] | nemesis@ninux.org |
c8f86134a84120155ebc8043bfd218ea17981057 | a2098c9c8d39cc9e392f21de64c7ced0549d6f1f | /custom/blog/feeds.py | e022cd27429659435e03ba70731e061570513524 | [] | no_license | dmitryro/divorcesus | 23fe394b0d065f635ecb11eed945cc4fcb9bb829 | 8ecedb2b8a019e63f37702888dd12e994a75105e | refs/heads/master | 2022-12-11T17:20:13.348413 | 2020-10-01T17:27:57 | 2020-10-01T17:27:57 | 56,432,086 | 0 | 1 | null | 2022-12-08T02:22:29 | 2016-04-17T11:05:27 | JavaScript | UTF-8 | Python | false | false | 910 | py | from django.contrib.syndication.views import Feed
from django.utils import feedgenerator
from custom.blog.models import Post
from django.utils.feedgenerator import Atom1Feed
from django.core.urlresolvers import reverse
import datetime
class RssSiteNewsFeed(Feed):
title = "Divorces U.S. Feed"
link = "/blog/"
description = "Updates to Divorces U.S. blog."
def items(self):
return Post.objects.order_by('-time_published')[:5]
# def link(self, obj):
# return obj.get_absolute_url()
def item_title(self, item):
return item.title
def item_description(self, item):
return item.body
# item_link is only needed if NewsItem has no get_absolute_url method.
# def item_link(self, item):
# return reverse('posts', args=[item.pk])
class AtomSiteNewsFeed(RssSiteNewsFeed):
feed_type = Atom1Feed
subtitle = RssSiteNewsFeed.description
| [
"dmitryro@gmail.com"
] | dmitryro@gmail.com |
af2569704f85afa754bf1a09f1bb6e3bf339a63d | 021c96f56992bfb58da4973a3b0067ca3298585c | /branch/sqlgen2/sample/toolbox/rtorrent/infopart.py | 15f881db4c5295f6f9c6933408c87f500c3172f7 | [] | no_license | BackupTheBerlios/useless-svn | 3818ec28f74be9ad4b43f7261ebbe50c4efea3d7 | a38ecbb06063d09bf50c284e9fd3f7d9c0e5f3a1 | refs/heads/master | 2021-01-25T10:30:05.919994 | 2012-11-13T20:25:26 | 2012-11-13T20:25:26 | 40,749,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,049 | py | from qt import QWidget
from qt import PYSIGNAL, SIGNAL
from kdecore import KURL
from kdeui import KMessageBox
from useless.kdebase.htmlpart import BaseInfoPart
from base import MyUrl
from infodoc import RtorrentDocument
class RtorrentInfoPart(BaseInfoPart):
def __init__(self, parent, name='RtorrentInfoPart'):
BaseInfoPart.__init__(self, parent, name=name)
self.clear_view()
self.doc = RtorrentDocument()
def set_info(self, infohash):
self.clear_view()
self.app.processEvents()
self.begin()
self.doc.set_torrent(self.app.rtorrent.torrents[infohash])
self.infohash = infohash
self.write(unicode(self.doc.generate()))
self.end()
#self.emit(PYSIGNAL('EntityInfoUpdated'), (entityid,))
####################################################
# the methods in this section map url's to actions #
####################################################
def urlSelected(self, url, button, state, target, args):
print url
return
if url.find('||') > -1:
self._perform_url_action(url)
else:
self.openURL(KURL(url))
def _perform_url_action(self, url):
parsed = myurl.parse(str(url))
print parsed
action, atype, ident = parsed
if ident.isdigit():
ident = int(ident)
if action == 'edit':
if self._update_entity_dlg is None:
dlg = MainEntityDialog(self.dialog_parent, dtype='update', entityid=ident)
dlg.show()
elif action == 'delete':
print 'delete selected'
elif action == 'addtag':
dlg = AddTagsDialog(self.dialog_parent, ident)
dlg.show()
elif action == 'deltag':
dlg = RemoveTagsDialog(self.dialog_parent, ident)
dlg.show()
else:
KMessageBox.error(self.dialog_parent,
'Unknown action: %s' % action)
| [
"umeboshi@70758ab2-d2f7-0310-a994-9f7f813c4004"
] | umeboshi@70758ab2-d2f7-0310-a994-9f7f813c4004 |
ebbd242f0378e77644f277eea227a316fa7e5f7b | 0729e5a36e75e938b04570ad1515bc9958088a50 | /kopipasta/migrations/0002_auto_20210420_1015.py | 49d94aa250b85cf1d1c0f8d38e416480aaab9ca1 | [] | no_license | multiscripter/kopipasta-django-sqlite | 42c17f0815e807349025ae99222a76ec23e0b0aa | b25cabebc8fdf323ff4535cb921450b8faa427ec | refs/heads/master | 2023-04-06T13:51:15.007595 | 2021-04-24T20:27:47 | 2021-04-24T20:27:47 | 360,644,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | # Generated by Django 3.2 on 2021-04-20 10:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kopipasta', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.SmallAutoField(primary_key=True, serialize=False, verbose_name='ИД')),
('name', models.CharField(max_length=128, verbose_name='Название')),
],
options={
'verbose_name': 'Категория',
'verbose_name_plural': 'Категории',
},
),
migrations.RemoveField(
model_name='item',
name='next',
),
migrations.RemoveField(
model_name='item',
name='prev',
),
]
| [
"ILL-JAH@yandex.ru"
] | ILL-JAH@yandex.ru |
c723e01098140dae38ba2781b5263766148e056c | 6ac2631c256f156d4ddf169e6c67f1fe66ebcaaf | /081/pyteacher/app_base/models.py | bfd1736a59fa47f89e025fc3e617a5e1ee89ba2d | [] | no_license | kasaiee/how-to-pyteacher | 101f106aeeed1b34756cecf502337ff8ee584ff5 | 074a57533f53fd1b8c7f37cd11dbc3b32ab8a08f | refs/heads/master | 2022-12-10T23:50:46.851784 | 2019-07-15T19:31:03 | 2019-07-15T19:31:03 | 187,372,111 | 6 | 4 | null | 2022-12-08T01:55:05 | 2019-05-18T15:08:03 | null | UTF-8 | Python | false | false | 6,187 | py | from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
import jdatetime
from django.utils.timezone import localtime
from django.db import models
from django.urls import reverse
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.contenttypes.fields import GenericForeignKey
from app_chat.models import Chat
from app_social.models import Like, Bookmark, Comment
from django.contrib.auth import get_user_model
User = get_user_model()
def course_image_path(instance, filename):
return instance.title
def attachment_path(instance, filename):
return instance.title
class Course(models.Model):
slug = models.SlugField(null=True, allow_unicode=True, blank=True)
image = models.ImageField(upload_to=course_image_path, null=True)
title = models.CharField(max_length=100, null=True)
description = RichTextUploadingField(null=True)
chats = GenericRelation(Chat)
likes = GenericRelation(Like)
bookmarks = GenericRelation(Bookmark)
comments = GenericRelation(Comment)
def price(self):
return sum([se.price for se in self.coursesession_set.all()])
def get_absolute_url(self):
params = {'slug': self.slug}
return reverse('app-base:course-detail', kwargs=params)
def save(self, *args, **kwargs):
self.slug = self.title.replace(' ', '-')
super().save(*args, **kwargs)
def __str__(self):
return self.title
def get_upload_path(instance, filename):
return 'session/private-videos/%s/%s' % (instance.id, filename)
class CourseSession(models.Model):
slug = models.SlugField(null=True, allow_unicode=True, blank=True)
course = models.ForeignKey(Course, on_delete=models.CASCADE, null=True)
title = models.CharField(max_length=100, null=True)
description = models.TextField(null=True)
aparat_video = models.TextField(null=True, blank=True)
next_session = models.ForeignKey(
'CourseSession', on_delete=models.SET_NULL, null=True, related_name='next', blank=True)
video = models.FileField(upload_to=get_upload_path, null=True, blank=True)
attachment_files = GenericRelation('AttachmentFiles')
chats = GenericRelation(Chat)
likes = GenericRelation(Like)
bookmarks = GenericRelation(Bookmark)
comments = GenericRelation(Comment)
price = models.PositiveIntegerField(null=True, default=0)
def prev_session(self):
return CourseSession.objects.get(next_session=self)
@property
def has_price(self):
return boll(self.price)
def image(self):
return self.course.image
def save(self, *args, **kwargs):
self.slug = self.title.replace(' ', '-')
super().save(*args, **kwargs)
def get_absolute_url(self):
params = {'course_slug': self.course.slug, 'session_slug': self.slug}
return reverse('app-base:course-session-detail', kwargs=params)
def __str__(self):
return self.title
class CourseSessionExercise(models.Model):
slug = models.SlugField(null=True, allow_unicode=True, blank=True)
course_session = models.ForeignKey(CourseSession, on_delete=models.CASCADE, null=True)
title = models.CharField(max_length=100, null=True)
description = RichTextUploadingField(null=True)
aparat_video = models.TextField(null=True, blank=True)
attachment_files = GenericRelation('AttachmentFiles')
chats = GenericRelation(Chat)
likes = GenericRelation(Like)
bookmarks = GenericRelation(Bookmark)
comments = GenericRelation(Comment)
def image(self):
return self.course_session.course.image
def save(self, *args, **kwargs):
self.slug = self.title.replace(' ', '-')
super().save(*args, **kwargs)
def get_absolute_url(self):
params = {'course_slug': self.course_session.course.slug,
'session_slug': self.course_session.slug, 'exercise_slug': self.slug}
return reverse('app-base:course-session-exercise-detail', kwargs=params)
def user(self):
return [c.user for c in self.chats.all() if not c.user.is_superuser][0]
def __str__(self):
return self.title
class ExerciseByStudent(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
exercise = models.ForeignKey(CourseSessionExercise, on_delete=models.CASCADE, null=True)
# rate = models.PositiveSmallIntegerField(null=True)
done = models.BooleanField(default=True)
code = models.TextField(null=True, blank=True)
done_datetime = models.DateTimeField(auto_now_add=True, null=True)
def jd_done_datetime(self):
self.done_datetime = localtime(self.done_datetime)
jdatetime.set_locale('fa_IR')
jdatetime.datetime.now().strftime('%A %B')
jd_datetime = jdatetime.datetime.fromgregorian(
year=self.done_datetime.year,
month=self.done_datetime.month,
day=self.done_datetime.day,
hour=self.done_datetime.hour,
minute=self.done_datetime.minute,
second=self.done_datetime.second,
)
return jd_datetime.strftime('%A, %d %B %y %H:%M:%S')
def __str__(self):
return self.user.username + ' ' + self.exercise.title
class AttachmentFiles(models.Model):
file = models.FileField(upload_to='attach-files/%y-%m-%d_%H:%M')
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
@property
def title(self):
return self.file.url.split('/')[-1]
@property
def color(self):
colors = {
'ppt': 'orange',
'pptx': 'orange',
'doc': 'light-blue darken-3',
'docx': 'light-blue darken-3',
'csv': 'green',
'xlsx': 'green',
'xls': 'green',
'py': 'yellow',
'pdf': 'pink',
}
file_format = self.title.split('.')[-1]
return colors.setdefault(file_format, 'grey')
def __str__(self):
return self.content_object.title
| [
"1tapciran@gmail.com"
] | 1tapciran@gmail.com |
721db38f5608aae8294e9b5c455423a9532f1398 | 250db406ad4a62e3d576e55b979bcfdc3407f226 | /Leetcode分类/7. LinkedList/Leetcode_86_Partition List/my_solution.py | 4eb4526fd86b4a98a47fac363c9b88d4e4013760 | [] | no_license | chenshanghao/Interview_preparation | 0830f0e461a2fe287b8ec24ae761974f50268767 | 4e7701d32990604c16ba18a8083c2108c0232306 | refs/heads/master | 2020-04-25T02:36:19.499364 | 2019-06-10T04:51:00 | 2019-06-10T04:51:00 | 172,446,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def partition(self, head, x):
"""
:type head: ListNode
:type x: int
:rtype: ListNode
"""
lowerList = lowerTail = ListNode(-1)
higherList = higherTail = ListNode(-1)
while head:
tmp = head
print(tmp.val)
head = head.next
if tmp.val < x:
lowerTail.next = tmp
lowerTail = lowerTail.next
else:
higherTail.next = tmp
higherTail = higherTail.next
higherTail.next = None
lowerTail.next = higherList.next
return lowerList.next | [
"21551021@zju.edu.cn"
] | 21551021@zju.edu.cn |
e2966c2a118b6034281b5b1b5ed6e51dfc5c0cf6 | a62fad21b7d00360e08a4c2666ced6e0a938d772 | /blood_finder_api/blood_finder_api/asgi.py | 56c9802576900480eb5889e63e5f485e18ea4841 | [] | no_license | NumanIbnMazid/blood-finder | 46153efc5094601d628c16f685fb3d4a68e259ac | 95ace66f7e9a5460389940cfc6341cfb218b7148 | refs/heads/master | 2023-07-29T16:47:30.723456 | 2021-09-13T11:23:00 | 2021-09-13T11:23:00 | 405,943,320 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | """
ASGI config for blood_finder_api project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blood_finder_api.settings')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blood_finder_api.settings.development')
# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blood_finder_api.settings.pythonanywhere')
# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blood_finder_api.settings.heroku')
application = get_asgi_application()
| [
"numanibnmazid@gmail.com"
] | numanibnmazid@gmail.com |
d17f43149aafd5d87d8b4ef49f3a0806a3ccffcc | 04b2e1c38fc64b8fd1020dfd5232eb3034f6cc1a | /ui/LogPlotSwitcher.py | 4d37292d20ada961aa6c953beeab4ff74c2930c8 | [
"Apache-2.0"
] | permissive | Kevin2599/GRIPy | faac3c0ffb98fc26094349bcc40c3522bd53b76b | 9cd79dded7f57b52515410a9bcb0a8cead48bfdd | refs/heads/master | 2020-04-13T17:27:51.655974 | 2018-08-29T20:39:09 | 2018-08-29T20:39:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,939 | py | # -*- coding: utf-8 -*-
import wx
from wx.combo import BitmapComboBox
from collections import OrderedDict
from om.Manager import ObjectManager
class Dialog(wx.Dialog):
def __init__(self, parent, colors, color_names, i_color, welluid, lims, loguid, *args, **kwargs):
if 'on_ok_callback' in kwargs:
self.on_ok_callback = kwargs.pop('on_ok_callback')
else:
self.on_ok_callback = None
if 'on_cancel_callback' in kwargs:
self.on_cancel_callback = kwargs.pop('on_cancel_callback')
else:
self.on_cancel_callback = None
super(Dialog, self).__init__(parent, *args, **kwargs)
self._OM = ObjectManager(self)
self.cur_loguid = loguid
self.lims = OrderedDict()
for uid, lim in lims.items():
self.lims[uid] = [str(a) for a in lim]
button_sizer = self.CreateButtonSizer(wx.OK | wx.CANCEL)
self.Bind(wx.EVT_BUTTON, self.on_button)
self.SetTitle(u"Alterar Perfil")
fgs = wx.FlexGridSizer(3, 2, 4, 4)
color_label = wx.StaticText(self, label="Cor: ")
log_label = wx.StaticText(self, label="Perfil: ")
lim_label = wx.StaticText(self, label="Limites: ")
self.color_box = BitmapComboBox(self, style=wx.CB_READONLY)
for c, cn in zip(colors, color_names):
self.color_box.Append(cn, wx.EmptyBitmapRGBA(32, 2, c[0], c[1],
c[2], 255))
self.log_box = wx.Choice(self)
self.log_box.AppendItems([log.name for log in self._OM.list('log', welluid)])
self.loguidmap = [log.uid for log in self._OM.list('log', welluid)]
self.log_box.Bind(wx.EVT_CHOICE, self.on_log_select)
lim_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.lim1_ctrl = wx.TextCtrl(self, style=wx.TE_RIGHT)
lim_sizer.Add(self.lim1_ctrl, 1, wx.EXPAND)
self.lim2_ctrl = wx.TextCtrl(self, style=wx.TE_RIGHT)
lim_sizer.Add(self.lim2_ctrl, 1, wx.EXPAND)
fgs.AddMany([(color_label), (self.color_box, 1, wx.EXPAND),
(log_label), (self.log_box, 1, wx.EXPAND),
(lim_label), (lim_sizer, 1, wx.EXPAND)])
fgs.AddGrowableCol(1, 1)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(fgs, flag=wx.ALL | wx.EXPAND, border=8)
vbox.Add(button_sizer, flag=wx.ALIGN_RIGHT, border=8)
if i_color is not None:
self.color_box.SetSelection(i_color)
if loguid is not None:
idx = self.loguidmap.index(loguid)
self.log_box.SetSelection(idx)
self.lim1_ctrl.SetValue(self.lims[loguid][0])
self.lim2_ctrl.SetValue(self.lims[loguid][1])
self.SetSizerAndFit(vbox)
def on_log_select(self, event):
idx = event.GetSelection()
loguid = self.loguidmap[idx]
if loguid != self.cur_loguid:
l1 = self.lim1_ctrl.GetValue()
l2 = self.lim2_ctrl.GetValue()
if self.cur_loguid is not None:
self.lims[self.cur_loguid] = [l1, l2]
self.lim1_ctrl.SetValue(self.lims[loguid][0])
self.lim2_ctrl.SetValue(self.lims[loguid][1])
self.cur_loguid = loguid
event.Skip(True)
def on_button(self, event):
evt_id = event.GetId()
if evt_id == wx.ID_OK and self.on_ok_callback is not None:
self.on_ok_callback(event)
elif evt_id == wx.ID_CANCEL and self.on_cancel_callback is not None:
self.on_cancel_callback(event)
event.Skip(True)
def get_loguid(self):
idx = self.log_box.GetSelection()
loguid = self.loguidmap[idx]
return loguid
def get_i_color(self):
return self.color_box.GetSelection()
def get_lim(self):
return [float(self.lim1_ctrl.GetValue()),
float(self.lim2_ctrl.GetValue())]
| [
"adrianopaulo@gmail.com"
] | adrianopaulo@gmail.com |
3177575f51e0cd6e73acce8e9bef8aea053bfe42 | 9a38733c268e4a715c70c2bedba10433ddad3380 | /test_GCR_on_miniImagenet.py | 6e4d9a5131fb6b44729a971b08e179abb0b76661 | [] | no_license | lwj2018/few-shot | bca3d7d4148f607cc70e1a1c1e5847a0428ed53e | d2c5fc14f519f81e2e29e6abea6affe82e122b61 | refs/heads/master | 2021-05-17T21:52:10.277842 | 2020-04-08T11:26:38 | 2020-04-08T11:26:38 | 250,967,777 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,915 | py | import os.path as osp
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from datasets.mini_imagenet_drop500 import MiniImageNet2
from datasets.samplers import CategoriesSampler_train_100way, CategoriesSampler_val_100way
from models.GCR import GCR
from models.convnet import gcrConvnet
from utils.ioUtils import *
from utils.critUtils import loss_for_gcr
from utils.testUtils import test_100way
from utils.metricUtils import euclidean_metric
from torch.utils.tensorboard import SummaryWriter
from utils.dataUtils import getDataloader
from Arguments import Arguments
# Hyper params
epochs = 1000
learning_rate = 1e-3
# Options
shot = 5
dataset = 'miniImage'
store_name = 'test' + dataset + '_GCR' + '_%dshot'%(shot)
summary_name = 'runs/' + store_name
checkpoint = '/home/liweijie/projects/few-shot/checkpoint/20200401_miniImage_GCR_best.pth.tar'
log_interval = 20
device_list = '1'
num_workers = 8
model_path = "./checkpoint"
start_epoch = 0
best_acc = 0.00
# Get args
args = Arguments(shot,dataset)
# Use specific gpus
os.environ["CUDA_VISIBLE_DEVICES"]=device_list
# Device setting
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Use writer to record
writer = SummaryWriter(os.path.join(summary_name, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))))
# Prepare dataset & dataloader
valset = MiniImageNet2('trainvaltest')
val_loader = DataLoader(dataset=valset, batch_size = 128,
num_workers=8, pin_memory=True, shuffle=True)
valset2 = MiniImageNet2('trainval')
val_loader2 = DataLoader(dataset=valset2, batch_size = 128,
num_workers=8, pin_memory=True, shuffle=True)
valset3 = MiniImageNet2('test')
val_loader3 = DataLoader(dataset=valset3, batch_size = 128,
num_workers=8, pin_memory=True, shuffle=True)
model_cnn = gcrConvnet().to(device)
model = GCR(model_cnn,train_way=args.train_way,\
test_way=args.test_way, shot=args.shot,query=args.query,query_val=args.query_val).to(device)
# Resume model
if checkpoint is not None:
start_epoch, best_acc = resume_gcr_model(model, checkpoint, args.n_base)
# Create loss criterion
criterion = nn.CrossEntropyLoss()
# Start Test
print("Test Started".center(60, '#'))
for epoch in range(start_epoch, start_epoch+1):
acc = test_100way(model,criterion,val_loader,device,epoch,log_interval,writer,args,euclidean_metric)
print('Batch accu_a on miniImagenet: {:.3f}'.format(acc))
acc = test_100way(model,criterion,val_loader2,device,epoch,log_interval,writer,args,euclidean_metric)
print('Batch accu_b on miniImagenet: {:.3f}'.format(acc))
acc = test_100way(model,criterion,val_loader3,device,epoch,log_interval,writer,args,euclidean_metric)
print('Batch accu_n on miniImagenet: {:.3f}'.format(acc))
print("Test Finished".center(60, '#')) | [
"lwj19970331@gmail.com"
] | lwj19970331@gmail.com |
898f4f336d98d93c12cdc2ef9a7995f33a80704b | c44904609923bbc20812ddc1f875ffb182f98518 | /Day 4 - Beginner - Randomisation and Python Lists/1. Random Exercise/main.py | 0df72c8cf8e418a612a922577f52721843005366 | [] | no_license | ceteongvanness/100-Days-of-Code-Python | a3f7b88a25aedbfe3cc5633cadf09fa746c2a2ec | 272b68c8d720f2d25e05245d41c7b8fff2851ddd | refs/heads/master | 2023-03-11T05:13:32.823152 | 2021-02-20T00:31:02 | 2021-02-20T00:31:02 | 310,734,202 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | import random
test_seed = int(input("Create a seed number: "))
random.seed(test_seed)
randomSide = random.randint(0, 1)
if randomSide == 1:
print("Heads")
else:
print("Tails") | [
"ceteongvanness@hotmail.com"
] | ceteongvanness@hotmail.com |
3cc902946a6b0a3ee3e399f506011de912eece59 | 07e396e3682465cd753d5c99ef06f9f70c374816 | /Test_scripts/homerest_rest_framework.py | 3de208f2c2b5c59ade5ec780ffc954b9710e9514 | [] | no_license | DavisDeco/Django-RestAPI | d1ac04346f32e3e9122a485e7cebd81c73253f2e | f88135437c3fe40f27fe214adca05905a1c80e92 | refs/heads/master | 2022-12-25T20:33:00.659364 | 2019-12-13T09:10:57 | 2019-12-13T09:10:57 | 227,798,733 | 0 | 1 | null | 2022-12-08T06:16:24 | 2019-12-13T09:03:55 | Python | UTF-8 | Python | false | false | 1,102 | py | import json
import requests
import os
ENDPOINT = "http://127.0.0.1:8000/api/status/"
image_path = os.path.join(os.getcwd(),"Arifu_Logo_Transparent.png")
def do_img(method='get', data={}, is_json=True, img_path=None):
header = {}
if is_json:
headers['content-type'] = 'application/json'
data = json.dumps(data)
if img_path is not None:
with open(image_path,'rb') as image:
file_data = {
'image' : image
}
r = request.request(method,ENDPOINT,data=data,files=file_data)
else:
r = request.request(method,ENDPOINT,data=data,headers=headers)
print(r.text)
print(r.status_code)
return r
do_img(method='post',data={'user':1,"content":""},is_json=False)
def do(method='get', data={}, is_json=True):
header = {}
if is_json:
headers['content-type'] = 'application/json'
data = json.dumps(data)
r = request.request(method,ENDPOINT,data=data,headers=headers)
print(r.text)
print(r.status_code)
return r | [
"you@example.com"
] | you@example.com |
bd0d602e44529f3602a633fba72b2687e55f1fa0 | e573b586a921084f29a36f8e2de5afcae2c65ff8 | /tasks/part_3/replacement_3.py | 7fbd0023900ac766dbda655780824a8c8f5546a5 | [] | no_license | HannaKulba/AdaptiveTraining_English | e69c8a0c444c1fa72b4783ba837cb3d9dc055d91 | 46497dc6827df37f4ebb69671912ef5b934ab6f0 | refs/heads/master | 2020-12-28T15:05:25.762072 | 2020-02-19T14:39:22 | 2020-02-19T14:39:22 | 238,381,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | import sys
import re
for line in sys.stdin:
line = line.rstrip()
result = re.sub(r'\b(a|A+)+\b', 'argh', line, 1)
print(result)
| [
"anna.mirraza@gmail.com"
] | anna.mirraza@gmail.com |
8a0208ea6f4f05a60f00b54f6cdd4233171e6574 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_055/ch85_2020_05_09_19_47_41_223416.py | 40546593ba1c709f5bd32f0a8ecff9073cf49f93 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | ocorrencias_banana = 0
with open('macacos-me-mordam.txt', 'r') as arquivo:
texto = arquivo.read()
for string in texto:
for l in range(len(string) - 1):
if string[l] == 'b' or 'B' and string[l + 1] == 'a' or 'A' and string[l + 2] == 'n' or 'N' and string[l + 3] == 'a' or 'A' and string[l + 4] == 'n' or 'N' and string[l + 5] == 'a' or 'A':
ocorrencias_banana += 1
print(ocorrencias_banana)
| [
"you@example.com"
] | you@example.com |
2e0c46b2e3309b3b760e0fa11ae5d3a7644321bf | 19fdaccb11ba28ddf73ad88cfe7d158e6872b4a7 | /Projects_csld/common/redis_token.py | 1f1f7d87eaaab69d6792073dfc3bf03a931af22f | [] | no_license | basheGG/CSLD | 130a243780d65d596a146cb0894bf0355b5165fb | 286c5cbcab1487ecc36724fb1221bc07f021db45 | refs/heads/master | 2020-09-20T05:28:07.185000 | 2019-11-27T09:30:40 | 2019-11-27T09:30:40 | 224,388,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | import redis
import json
r = redis.Redis(host='127.0.0.1', port=6379, password='', db=1, decode_responses=True)
class Token():
Token = ""
Id = ""
Type = ""
Permissions = []
def LoadToken(self, token):
print('***********************************************')
print(token)
print('***********************************************')
j = r.get("FaceMakeMoney:{Token}".format(Token=token))
o = json.loads(j)
self.Token = o["Token"]
self.Id = o["Id"]
self.Type = o["Type"]
return self | [
"admin@example.com"
] | admin@example.com |
c176e3f41b84dbe540dacb097d32f02880d5c6f0 | c4feb6227cc68e96c7454ee7682a91f6f6afd164 | /supervised_learning/0x08-deep_cnns/5-dense_block.py | 3c0f8e19dbbbc41427fb787ed218f4146de7e322 | [] | no_license | Karenahv/holbertonschool-machine_learning | 4b7ae5ad4cd1f06f8bae87a509d11b5c8069f8c9 | 884db3d605c2d0eee968f03ce7f525f2a557f261 | refs/heads/master | 2022-12-24T16:17:34.753055 | 2020-09-30T02:09:08 | 2020-09-30T02:09:08 | 255,319,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | #!/usr/bin/env python3
"""builds a dense block as
described in Densely Connected
Convolutional Networks"""
import tensorflow.keras as K
def dense_block(X, nb_filters, growth_rate, layers):
""" builds a dense block as
described in Densely Connected
Convolutional Networks"""
kernel_init = K.initializers.he_normal(seed=None)
for i in range(layers):
x = K.layers.BatchNormalization()(X)
x = K.layers.Activation('relu')(x)
x = K.layers.Conv2D(filters=4*growth_rate,
kernel_size=1,
padding='same',
kernel_initializer=kernel_init)(x)
x = K.layers.BatchNormalization()(x)
x = K.layers.Activation('relu')(x)
x = K.layers.Conv2D(filters=growth_rate,
kernel_size=3,
padding='same',
kernel_initializer=kernel_init)(x)
X = K.layers.concatenate([X, x])
nb_filters += growth_rate
return X, nb_filters
| [
"you@example.com"
] | you@example.com |
2c96ac1a2773232924361a5a429b20d26afb2287 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/130/usersdata/162/33622/submittedfiles/al8.py | f937fc1c073c5b47ba4a97a0d3fd006ae097d61d | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | # -*- coding: utf-8 -*-
n=int(input('Digite o valor de n:'))
i=1
for i in range(1,(n+1),1):
fatorial=fatorial*i
print(fatorial)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
91ad66dae791a33f3cf1c674c9da232c59a763ab | a5eab1deb33a380a48444b836c871f0c93307e47 | /conkit/io/tests/test__iotools.py | d4ea7801c48c3147051277ccc5d876f640778c48 | [
"BSD-3-Clause"
] | permissive | xiangf/conkit | a425131bef2da193da0fe9100213b1cd5a40ce01 | 633978d4665dcf99f28c6e35e602e91c37c49229 | refs/heads/master | 2021-04-05T23:49:27.750815 | 2018-02-26T12:36:04 | 2018-02-26T12:36:04 | 125,077,426 | 0 | 0 | null | 2018-03-13T15:57:26 | 2018-03-13T15:57:26 | null | UTF-8 | Python | false | false | 3,868 | py | """Testing facility for conkit.io._iotools"""
__author__ = "Felix Simkovic"
__date__ = "21 Nov 2016"
import os
import unittest
from conkit.io import _iotools
class Test(unittest.TestCase):
def test_create_tmp_f_1(self):
fname = _iotools.create_tmp_f()
self.assertTrue(os.path.isfile(fname))
os.unlink(fname)
def test_create_tmp_f_2(self):
content = 'Hello, World!'
fname = _iotools.create_tmp_f(content=content, mode='w')
self.assertTrue(os.path.isfile(fname))
with open(fname, 'r') as f_in:
written_content = f_in.read()
self.assertEqual(content, written_content)
os.unlink(fname)
def test_create_tmp_f_3(self):
content = 'Hello, World!'
content_bytes = content.encode('utf-8')
fname = _iotools.create_tmp_f(content=content_bytes, mode='wb')
self.assertTrue(os.path.isfile(fname))
with open(fname, 'rb') as f_in:
written_content_bytes = f_in.read()
written_content = written_content_bytes.decode('utf-8')
self.assertEqual(content, written_content)
os.unlink(fname)
def test_is_str_like_1(self):
self.assertTrue(_iotools.is_str_like('foo')) # str
self.assertFalse(_iotools.is_str_like(1)) # int
self.assertFalse(_iotools.is_str_like(1.)) # float
self.assertFalse(_iotools.is_str_like([])) # list
self.assertFalse(_iotools.is_str_like(())) # tuple
self.assertFalse(_iotools.is_str_like({})) # dict
self.assertFalse(_iotools.is_str_like(set())) # set
def test_open_f_handle_1(self):
fname = _iotools.create_tmp_f()
with _iotools.open_f_handle(fname, 'append') as fhandle:
self.assertEqual('a', fhandle.mode)
f_in_handle = _iotools.open_f_handle(fname, 'append')
with _iotools.open_f_handle(f_in_handle, 'append') as fhandle:
self.assertEqual('a', fhandle.mode)
f_in_handle.close()
os.unlink(fname)
def test_open_f_handle_2(self):
fname = _iotools.create_tmp_f()
with _iotools.open_f_handle(fname, 'read') as fhandle:
self.assertEqual('r', fhandle.mode)
f_in_handle = _iotools.open_f_handle(fname, 'read')
with _iotools.open_f_handle(f_in_handle, 'read') as fhandle:
self.assertEqual('r', fhandle.mode)
f_in_handle.close()
os.unlink(fname)
def test_open_f_handle_3(self):
fname = _iotools.create_tmp_f()
with _iotools.open_f_handle(fname, 'write') as fhandle:
self.assertEqual('w', fhandle.mode)
f_in_handle = _iotools.open_f_handle(fname, 'write')
with _iotools.open_f_handle(f_in_handle, 'write') as fhandle:
self.assertEqual('w', fhandle.mode)
f_in_handle.close()
os.unlink(fname)
def test_open_f_handle_4(self):
fname = _iotools.create_tmp_f()
with _iotools.open_f_handle(fname, 'write') as fhandle:
self.assertEqual('w', fhandle.mode)
fhandle.write("hello world!")
with _iotools.open_f_handle(fname, 'read') as fhandle:
self.assertEqual('r', fhandle.mode)
self.assertEqual("hello world!", fhandle.read().strip())
os.unlink(fname)
def test_open_f_handle_5(self):
with self.assertRaises(TypeError):
_iotools.open_f_handle(1, 'read')
with self.assertRaises(TypeError):
_iotools.open_f_handle(1.0, 'write')
def test_open_f_handle_6(self):
fname = _iotools.create_tmp_f()
with self.assertRaises(ValueError):
_iotools.open_f_handle(fname, 'foo')
with self.assertRaises(ValueError):
_iotools.open_f_handle(fname, 'bar')
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
"felixsimkovic@me.com"
] | felixsimkovic@me.com |
153e0370ae6af4a26ea68a01c9d3909b89fac7dc | 2ca4d75dc00c20b886f16855d62d3cce444bb301 | /testsuits/test_baidu_search1.py | a928b1fbb78ffae2c522cce0168010df06d3edb8 | [] | no_license | christinecoco/python_automation | d81103aed00910595e662b581ef4350c16a9c316 | bad2e90d0ae23f3d34096d7cc6682b94702a8730 | refs/heads/master | 2020-05-23T22:37:47.718254 | 2019-05-16T07:43:15 | 2019-05-16T07:43:15 | 186,977,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | #coding=utf-8
import time
import unittest
# import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
from framework.browser_engine import BrowserEngine
from pageobjects.baidu_homepage import HomePage
class BaiduSearch(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
测试固件的setUp()的代码,主要是测试的前提准备工作
:return:
"""
browse=BrowserEngine(cls)
cls.driver=browse.open_browser(cls)
@classmethod
def tearDownClass(cls):
"""
测试结束后的操作,这里基本上都是关闭浏览器
:return:
"""
cls.driver.quit()
def test_baidu_search(self):
"""
这里一定要test开头,把测试逻辑代码封装到一个test开头的方法里
:return:
"""
# reload(sys)
# sys.setdefaultencoding('utf-8')
homepage = HomePage(self.driver)
homepage.type_search('selenium')
homepage.send_submit_btn()
time.sleep(2)
homepage.get_window_img()
try:
assert 'selenium' in homepage.get_page_title()
print('test pass')
except Exception as e:
print('test fail',format(e))
def test_search1(self):
homepage=HomePage(self.driver)
homepage.type_search('python')
homepage.send_submit_btn()
time.sleep(2)
homepage.get_window_img()
if __name__=='__main__':
unittest.main() | [
"tester@test.com"
] | tester@test.com |
c361fd4c9c62df1c52bbd066ad265b76e4f5d3bd | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/exp-big-1450.py | 4af975f9d49d0f7c1af22e4f2dc895b6dd87fb17 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,180 | py | # Compute x**y
def exp(x: int, y: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp2(x: int, y: int, x2: int, y2: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp3(x: int, y: int, x2: int, y2: int, x3: int, y3: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp4(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp5(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int, x5: int, y5: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
# Input parameter
n:int = 42
n2:int = 42
n3:int = 42
n4:int = 42
n5:int = 42
# Run [0, n]
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
while i <= n:
print(exp(2, i % 31))
$Var = i + 1 | [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
d12e1498499716d532c06a62f69e953762cfd604 | be61a9f30274514857ea34297719157f1e5b8447 | /fhir/resources/DSTU2/eligibilityresponse.py | 83758ea3826fdf34e860f691830f098b99803f84 | [
"BSD-3-Clause"
] | permissive | jwygoda/fhir.resources | ceff3a620100d2e875136b86d3e82816c0e60a33 | 5053565570d1ca992d9971d20db813c53fd350b9 | refs/heads/master | 2021-02-05T02:59:17.436485 | 2019-07-18T10:57:33 | 2019-07-18T10:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,429 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/EligibilityResponse) on 2019-05-14.
# 2019, SMART Health IT.
from . import domainresource
class EligibilityResponse(domainresource.DomainResource):
""" EligibilityResponse resource.
This resource provides eligibility and plan details from the processing of
an Eligibility resource.
"""
resource_name = "EligibilityResponse"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.created = None
""" Creation date.
Type `FHIRDate` (represented as `str` in JSON). """
self.disposition = None
""" Disposition Message.
Type `str`. """
self.identifier = None
""" Business Identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.organization = None
""" Insurer.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.originalRuleset = None
""" Original version.
Type `Coding` (represented as `dict` in JSON). """
self.outcome = None
""" complete | error.
Type `str`. """
self.request = None
""" Claim reference.
Type `FHIRReference` referencing `EligibilityRequest` (represented as `dict` in JSON). """
self.requestOrganization = None
""" Responsible organization.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.requestProvider = None
""" Responsible practitioner.
Type `FHIRReference` referencing `Practitioner` (represented as `dict` in JSON). """
self.ruleset = None
""" Resource version.
Type `Coding` (represented as `dict` in JSON). """
super(EligibilityResponse, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(EligibilityResponse, self).elementProperties()
js.extend([
("created", "created", fhirdate.FHIRDate, False, None, False),
("disposition", "disposition", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("organization", "organization", fhirreference.FHIRReference, False, None, False),
("originalRuleset", "originalRuleset", coding.Coding, False, None, False),
("outcome", "outcome", str, False, None, False),
("request", "request", fhirreference.FHIRReference, False, None, False),
("requestOrganization", "requestOrganization", fhirreference.FHIRReference, False, None, False),
("requestProvider", "requestProvider", fhirreference.FHIRReference, False, None, False),
("ruleset", "ruleset", coding.Coding, False, None, False),
])
return js
from . import coding
from . import fhirdate
from . import fhirreference
from . import identifier
| [
"connect2nazrul@gmail.com"
] | connect2nazrul@gmail.com |
a3a9e7e725c1b44de191970285de795ee13c6cdb | cc72013ede1b3bb02c32a3d0d199be4f7986c173 | /ch13/anagrams.py | ea593d66359a246b5e448761405cf3e62f20e948 | [] | no_license | alextickle/zelle-exercises | b87d2a1476189954565f5cc97ee1448200eb00d4 | b784ff9ed9b2cb1c56e31c1c63f3e2b52fa37875 | refs/heads/master | 2021-01-19T00:33:19.132238 | 2017-09-14T23:35:35 | 2017-09-14T23:35:35 | 87,182,609 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | def anagrams(s):
if s == "":
return [s]
else:
ans = []
for w in anagrams(s[1:]):
for pos in range(len(w) + 1):
ans.append(w[:pos]+s[0]+w[pos:])
return ans
| [
"alexander.tickle@gmail.com"
] | alexander.tickle@gmail.com |
83bf77fbf76ff64192e41dc2e4a0ba42221f38b8 | 2c32cf726e111b8625265c458feeaea436652e83 | /Trie/implement-trie.py | de17e2d41fb4c7a26a782d2fb9471c1792719d35 | [] | no_license | minhthe/practice-algorithms-and-data-structures | 6fa3bf98e8e2fe98f4e32419fb797b1df4400364 | 488a82dd3a0c797859a6c9e1195d6d579d676073 | refs/heads/master | 2021-05-16T23:01:20.026475 | 2020-09-23T04:17:13 | 2020-09-23T04:17:13 | 250,505,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | '''https://leetcode.com/problems/implement-trie-prefix-tree/'''
class Node:
def __init__(self):
self.child = {}
self.cntWord = 0
class Trie:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = Node()
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
tmp = self.root
for c in word:
if c not in tmp.child:
tmp.child[c] = Node()
tmp = tmp.child[c]
tmp.cntWord += 1
def search(self, word: str) -> bool:
"""
Returns if the word is in the trie.
"""
tmp = self.root
for c in word:
if c not in tmp.child: return False
tmp = tmp.child[c]
return tmp.cntWord
def startsWith(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
tmp = self.root
for c in prefix:
if c not in tmp.child:
return False
tmp = tmp.child[c]
return True
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix) | [
"minhthe.007@gmail.com"
] | minhthe.007@gmail.com |
fd0a0b5a5b5e3cc90a4fd846d0b27ae1c4e1dcd3 | 15d7b8fb4fd92984844a9ef8b60213ad5b7344d4 | /fairseq/criterions/wav2vec_criterion.py | cc454b93096c428ce456d55c18ce7ad99c16564d | [
"MIT"
] | permissive | seeledu/fairseq | 18a80cecd9dbdf428569cda516aa290dca60a4d4 | 148327d8c1e3a5f9d17a11bbb1973a7cf3f955d3 | refs/heads/master | 2023-02-25T08:11:53.009964 | 2021-01-28T22:18:48 | 2021-01-28T22:21:10 | 307,387,657 | 0 | 0 | MIT | 2021-01-19T08:30:55 | 2020-10-26T13:49:16 | null | UTF-8 | Python | false | false | 7,130 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List, Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.logging.meters import safe_round
@dataclass
class Wav2VecCriterionConfig(FairseqDataclass):
infonce: bool = field(
default=False,
metadata={
"help": "if set, uses cross entropy instead of binary cross entropy (i.e. InfoNCE loss)"
},
)
loss_weights: Optional[List[float]] = field(
default=None,
metadata={"help": "weights for additional loss terms (not first one)"},
)
log_keys: List[str] = field(
default_factory=lambda: [],
metadata={"help": "output keys to log"},
)
@register_criterion("wav2vec", dataclass=Wav2VecCriterionConfig)
class Wav2vecCriterion(FairseqCriterion):
def __init__(self, task, infonce=False, loss_weights=None, log_keys=None):
super().__init__(task)
self.infonce = infonce
self.loss_weights = loss_weights
self.log_keys = [] if log_keys is None else log_keys
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
logits = model.get_logits(net_output).float()
target = model.get_targets(sample, net_output)
weights = None
if hasattr(model, "get_target_weights") and not self.infonce:
weights = model.get_target_weights(target, net_output)
if torch.is_tensor(weights):
weights = weights.float()
losses = []
if self.infonce:
loss = F.cross_entropy(
logits,
target,
reduction="sum" if reduce else "none",
)
else:
loss = F.binary_cross_entropy_with_logits(
logits,
target.float(),
weights,
reduction="sum" if reduce else "none",
)
sample_size = target.numel() if self.infonce else target.long().sum().item()
losses.append(loss.detach().clone())
if self.loss_weights is not None:
assert hasattr(model, "get_extra_losses")
extra_losses = model.get_extra_losses(net_output)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
if len(self.loss_weights) == 1 and len(extra_losses) != 1:
self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
assert len(extra_losses) == len(
self.loss_weights
), f"{len(extra_losses)}, {len(self.loss_weights)}"
for p, coef in zip(extra_losses, self.loss_weights):
if coef != 0 and p is not None:
p = coef * p.float() * sample_size
loss += p
losses.append(p)
logging_output = {
"loss": loss.item() if reduce else loss,
"ntokens": sample_size,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
}
for lk in self.log_keys:
# Only store "logits" and "target" for computing MAP and MAUC
# during validation
if lk == "logits":
if not self.training:
logging_output["logits"] = logits.cpu().numpy()
elif lk == "target":
if not self.training:
logging_output["target"] = target.cpu().numpy()
elif lk in net_output:
logging_output[lk] = float(net_output[lk])
if len(losses) > 1:
for i, l in enumerate(losses):
logging_output[f"loss_{i}"] = l.item()
if self.infonce:
with torch.no_grad():
if logits.numel() == 0:
corr = 0
count = 0
else:
assert logits.dim() > 1, logits.shape
max = logits.argmax(-1) == 0
min = logits.argmin(-1) == 0
both = max & min
corr = max.long().sum().item() - both.long().sum().item()
count = max.numel()
logging_output["correct"] = corr
logging_output["count"] = count
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("nsentences", nsentences)
correct = sum(log.get("correct", 0) for log in logging_outputs)
metrics.log_scalar("_correct", correct)
total = sum(log.get("count", 0) for log in logging_outputs)
metrics.log_scalar("_total", total)
if total > 0:
metrics.log_derived(
"accuracy",
lambda meters: safe_round(
meters["_correct"].sum / meters["_total"].sum, 5
)
if meters["_total"].sum > 0
else float("nan"),
)
builtin_keys = {
"loss",
"ntokens",
"nsentences",
"sample_size",
"correct",
"count",
}
for k in logging_outputs[0]:
if k not in builtin_keys:
val = sum(log.get(k, 0) for log in logging_outputs)
if k.startswith("loss"):
metrics.log_scalar(
k, val / sample_size / math.log(2), sample_size, round=3
)
else:
metrics.log_scalar(k, val / len(logging_outputs), round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
86806da74e6fdfe7c4fefd8d632d1c262190b761 | 8d8663095c119b2c175247bdf7b5fa613c378061 | /2주차 실습/1316.py | d42921edb93ddaa10f650f874673054a04873574 | [] | no_license | mjson1954/piro13 | 047dfc7c6090548f4f67c8b36bd4e06eea493a79 | c81a731ff245231111065482b0cb5edf2687425c | refs/heads/master | 2022-11-20T00:52:49.364816 | 2020-07-17T11:53:05 | 2020-07-17T11:53:05 | 277,566,415 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | def check_group(word):
last_alphabet =""
alphabets = [] # 나온 alphabet list
# aabb
for letter in word:
if letter == last_alphabet: # a
continue
else:
if letter in alphabets:
return False # 그룹 단어가 아님
alphabets.append(letter) # [a, b]
last_alphabet = letter # a->b update
return True # for문을 모두 통과하면 그룹 단어
count = int(input()) # 단어 개수 입력받기
result=0 # 초기화
for _ in range(count):
word = input() # 단어 입력받기
if check_group(word):
result+=1
print(result)
#1. letter가 연속적인가? aabb-> 직전 알파벳이 letter와 같은지 체크
#2. 이미 나왔던 단어인가? aabbaa
#3. 연속을 깨뜨린 letter가 이미 나왔던 단어인지 체크 -> False이면 그룹 단어가 아님 | [
"mjson1954@gmail.com"
] | mjson1954@gmail.com |
7d5984884e7de5b64ec6eac294903973f53520a7 | 032a0c939d96d0e5307dbce86e11faf7060f4ed9 | /lte/gateway/python/magma/pipelined/qos/tc_ops_pyroute2.py | 8d804a69815a66d7d80268276aa7d934f874b634 | [
"BSD-3-Clause"
] | permissive | radha0018/magma | cac9ff3491dd2661e5dc0aa1f9a304a5428e2d2a | 8436966a4bb3cf7fdc3f567704062b6f9568db25 | refs/heads/master | 2023-05-05T08:26:07.132969 | 2021-05-27T18:44:44 | 2021-05-27T18:44:44 | 371,097,174 | 0 | 2 | NOASSERTION | 2021-05-26T16:26:21 | 2021-05-26T16:15:53 | Go | UTF-8 | Python | false | false | 5,564 | py | """
Copyright 2021 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import pprint
from pyroute2 import IPRoute, NetlinkError
from .tc_ops import TcOpsBase
LOG = logging.getLogger('pipelined.qos.tc_pyroute2')
QUEUE_PREFIX = '1:'
PROTOCOL = 0x0800
PARENT_ID = 0x10000
class TcOpsPyRoute2(TcOpsBase):
"""
Create TC scheduler and corresponding filter
"""
def __init__(self):
self._ipr = IPRoute()
self._iface_if_index = {}
LOG.info("initialized")
def create_htb(self, iface: str, qid: str, max_bw: int, rate: str,
parent_qid: str = None) -> int:
"""
Create HTB class for a UE session.
Args:
iface: Egress interface name.
qid: qid number.
max_bw: ceiling in bits per sec.
rate: rate limiting.
parent_qid: HTB parent queue.
Returns:
zero on success.
"""
LOG.debug("Create HTB iface %s qid %s max_bw %s rate %s", iface, qid, max_bw, rate)
try:
# API needs ceiling in bytes per sec.
max_bw = max_bw / 8
if_index = self._get_if_index(iface)
htb_queue = QUEUE_PREFIX + qid
ret = self._ipr.tc("add-class", "htb", if_index,
htb_queue, parent=parent_qid,
rate=str(rate).lower(), ceil=max_bw, prio=1)
LOG.debug("Return: %s", ret)
except (ValueError, NetlinkError) as ex:
LOG.error("create-htb error : %s", ex.code)
LOG.debug(ex, exc_info=True)
return ex.code
return 0
def del_htb(self, iface: str, qid: str) -> int:
"""
Delete given queue from HTB classed
Args:
iface: interface name
qid: queue-id of the HTB class
Returns:
"""
LOG.debug("Delete HTB iface %s qid %s", iface, qid)
try:
if_index = self._get_if_index(iface)
htb_queue = QUEUE_PREFIX + qid
ret = self._ipr.tc("del-class", "htb", if_index, htb_queue)
LOG.debug("Return: %s", ret)
except (ValueError, NetlinkError) as ex:
LOG.error("del-htb error error : %s", ex.code)
LOG.debug(ex, exc_info=True)
return ex.code
return 0
def create_filter(self, iface: str, mark: str, qid: str, proto: int = PROTOCOL) -> int:
"""
Create TC Filter for given HTB class.
"""
LOG.debug("Create Filter iface %s qid %s", iface, qid)
try:
if_index = self._get_if_index(iface)
class_id = int(PARENT_ID) | int(qid, 16)
ret = self._ipr.tc("add-filter", "fw", if_index, int(mark, 16),
parent=PARENT_ID,
prio=1,
protocol=proto,
classid=class_id)
LOG.debug("Return: %s", ret)
except (ValueError, NetlinkError) as ex:
LOG.error("create-filter error : %s", ex.code)
LOG.debug(ex, exc_info=True)
return ex.code
return 0
def del_filter(self, iface: str, mark: str, qid: str, proto: int = PROTOCOL) -> int:
"""
Delete TC filter.
"""
LOG.debug("Del Filter iface %s qid %s", iface, qid)
try:
if_index = self._get_if_index(iface)
class_id = int(PARENT_ID) | int(qid, 16)
ret = self._ipr.tc("del-filter", "fw", if_index, int(mark, 16),
parent=PARENT_ID,
prio=1,
protocol=proto,
classid=class_id)
LOG.debug("Return: %s", ret)
except (ValueError, NetlinkError) as ex:
LOG.error("del-filter error : %s", ex.code)
LOG.debug(ex, exc_info=True)
return ex.code
return 0
def create(self, iface: str, qid: str, max_bw: int, rate=None,
parent_qid: str = None, proto=PROTOCOL) -> int:
err = self.create_htb(iface, qid, max_bw, rate, parent_qid)
if err:
return err
err = self.create_filter(iface, qid, qid, proto)
if err:
return err
return 0
def delete(self, iface: str, qid: str, proto=PROTOCOL) -> int:
err = self.del_filter(iface, qid, qid, proto)
if err:
return err
err = self.del_htb(iface, qid)
if err:
return err
return 0
def _get_if_index(self, iface: str):
if_index = self._iface_if_index.get(iface, -1)
if if_index == -1:
if_index = self._ipr.link_lookup(ifname=iface)
self._iface_if_index[iface] = if_index
return if_index
def _print_classes(self, iface):
if_index = self._get_if_index(iface)
pprint.pprint(self._ipr.get_classes(if_index))
def _print_filters(self, iface):
if_index = self._get_if_index(iface)
pprint.pprint(self._ipr.get_filters(if_index))
| [
"noreply@github.com"
] | radha0018.noreply@github.com |
6b0742c489906576ca7ec31d21f3aa8d5c4568d3 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_hockeyj85_b.py | 89fd456eefc2119405fc60a848830b5031bc2b06 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 292 | py | from sys import stdout
T = int(raw_input())
for t in range(T):
stdout.write("Case #"+str(t+1)+": ")
line = raw_input()
last = "roflcopter"
flips = 0
for a in line:
if a != last:
flips += 1
last = a
if line[len(line) - 1] != '-':
flips -= 1
stdout.write(str(flips)+"\n")
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.