blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M โ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 โ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 โ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
124d53870ab1f32476f7396ada3d0a47298746de | 162eed4191aef4431f94a0db1ad4185b6daa6f67 | /supervised_learning/0x02-tensorflow/5-create_train_op.py | 05322a25cd88afe944df96ecf16ba5df91cd92c0 | [] | no_license | giovannyortegon/holbertonschool-machine_learning | d6897bfb492f9d266302930927416637be3c172d | 8cd5e0f837a5c0facbf73647dcc9c6a3b1b1b9e0 | refs/heads/master | 2022-12-31T15:34:20.489690 | 2020-10-24T03:37:01 | 2020-10-24T03:37:01 | 279,656,017 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | #!/usr/bin/env python3
""" train operation """
import tensorflow as tf
def create_train_op(loss, alpha):
""" create_train_op - creates the training operation.
Args:
loss is the loss of the networkโs prediction
alpha is the learning rate
Returns:
an operation that trains the network using gradient descent
"""
train_op = tf.train.GradientDescentOptimizer(alpha)
grads = train_op.compute_gradients(loss)
return train_op.apply_gradients(grads)
| [
"ortegon.giovanny@hotmail.com"
] | ortegon.giovanny@hotmail.com |
d1c996c98e38caf3f89a4b1b7f101c7d1770330d | 5bd3122d230471b048429f5e9c49a0b39c8a54fc | /Atcoder_contests/ABC/165A.py | 98d42dc081acdb8f08d06c61550ccddab9dc004a | [] | no_license | nao1412/Competitive_Programing_Codes | e230e2fa85027e41c5ee062083801bb299effe9b | 98c29b5ba75e75502cf27fcf365a7aedcd6c273c | refs/heads/main | 2023-06-05T18:45:59.733301 | 2021-06-23T15:02:25 | 2021-06-23T15:02:25 | 374,061,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | k = int(input())
ans = 'OK'
a, b = map(int, input().split())
if b-a+1 >= k:
ans = 'OK'
else:
for i in range(a, b+1):
if i%k == 0:
ans = 'OK'
break
else:
ans = 'NG'
print(ans) | [
"naoya_greeeen_0720@icloud.com"
] | naoya_greeeen_0720@icloud.com |
3d3bdc439906914e4d1a544305fefc7b801e63fd | 6d80c2e28c39b1861d909c7616ce8455f2b28744 | /character_stream.py | 415517a5c4ef622e0d85ae20ea615fa6c5f81221 | [] | no_license | o11c/20131022-parser | ef1778c8469fe397524c0bb9e72fa284cde4af25 | 7da2dfa3a92dcdf32bfa11bb67c6569d3e1e1767 | refs/heads/master | 2020-02-26T13:24:06.954176 | 2013-10-22T22:34:40 | 2013-10-22T22:34:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | class EOFError(Exception):
pass
class CharacterStream(object):
def __init__(self, stream):
self.stream = stream
self.adv()
def get(self):
return self.ch
def adv(self):
self.ch = self.stream.read(1)
if self.ch == '':
raise EOFError()
| [
"cheery@boxbase.org"
] | cheery@boxbase.org |
2d74d3b980a16f4b736de35c0620c251cd4bd605 | 575d197af5bbc31b89df37f8733e81707294948c | /Python2/examples/Excel/14_numeric_format.py | 533f0bc504b9462fadff2bd1f8029ac5a60655f1 | [] | no_license | tisnik/python-programming-courses | 5c7f1ca9cae07a5f99dd8ade2311edb30dc3e088 | 4e61221b2a33c19fccb500eb5c8cdb49f5b603c6 | refs/heads/master | 2022-05-13T07:51:41.138030 | 2022-05-05T15:37:39 | 2022-05-05T15:37:39 | 135,132,128 | 3 | 2 | null | 2021-04-06T12:19:16 | 2018-05-28T08:27:19 | Python | UTF-8 | Python | false | false | 1,247 | py | #!/usr/bin/env python3
# vim: set fileencoding=utf-8
"""Vytvoลenรญ seลกitu s delลกรญ tabulkou, buลky se specifikacรญ ฤรญselnรฉho formรกtu."""
import xlsxwriter
# vytvoลenรญ objektu reprezentujรญcรญho celรฝ seลกit
with xlsxwriter.Workbook("example14.xlsx") as workbook:
# vloลพenรญ novรฉho listu do seลกitu
worksheet = workbook.add_worksheet()
# definice novรฉho stylu
bold_style = workbook.add_format()
bold_style.set_bold()
bold_style.set_font_color("blue")
# definice dalลกรญho novรฉho stylu
red_style = workbook.add_format()
red_style.set_font_color("red")
# definice formรกtu ฤรญsel
numeric_format = workbook.add_format({"num_format": "0.0000"})
# nastavenรญ ลกรญลky sloupcลฏ a stylu
worksheet.set_column("A:A", 8, red_style)
worksheet.set_column("B:B", 14, numeric_format)
worksheet.set_column("C:Z", 2)
# styl pro prvnรญ ลรกdek
worksheet.set_row(0, 20, bold_style)
# buลky s textem
worksheet.write_string("A1", "x")
worksheet.write_string("B1", "1/x")
# buลky s numerickรฝmi hodnotami
for x in range(1, 21):
worksheet.write_number(x, 0, x)
worksheet.write_number(x, 1, 1.0 / x)
# seลกit bude uzavลen automaticky
| [
"ptisnovs@redhat.com"
] | ptisnovs@redhat.com |
ca0dfc4464e55ea5612d52669b8d134596d3404c | 5a1a695829a2d1dbf4daa0736f0fbd6feffc7e63 | /JUNGOL/1707(๋ฌํฝ์ด์ฌ๊ฐํ).py | 07bb09b825bfea02fae29ad62638f417bf139b21 | [] | no_license | juyi212/Algorithm_study | f5d263c5329c994a457bbe897e5e1405d2b1d67a | f225cc593a50b74686111f654f7133707a1d1310 | refs/heads/master | 2023-03-21T20:02:36.138688 | 2021-03-16T14:16:40 | 2021-03-16T14:16:40 | 325,008,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | n = int(input())
dir = [(0, 1), (1, 0), (0, -1), (-1, 0)]
matrix = [[-1]*n for _ in range(n)]
num = 1
visited = [[False]*n for _ in range(n)]
nr, nc = 0, 0
matrix[nr][nc] = 1
visited[nr][nc] = True
cnt = 0
while cnt <= (n*n):
for r, c in dir:
cnt += 1
while True:
nr += r
nc += c
if 0 <= nr < n and 0 <= nc < n and not visited[nr][nc]:
num += 1
visited[nr][nc] = True
matrix[nr][nc] = num
else:
nr -= r
nc -= c
break
for i in matrix:
for j in i:
print(j, end = ' ')
print()
| [
"dea8307@naver.com"
] | dea8307@naver.com |
b7b17a2f843d0c6a17b1aa1e0dbc77074c1d8db7 | 4629bf721ff3d49f9f8e3e57babc325f38fa4b7e | /uliweb/utils/setup.py | 4f740b75ed31de85e1ec0160e159648737b04e0b | [
"BSD-2-Clause"
] | permissive | zhangchunlin/uliweb3 | b3d331fd9b1738f0a38f007a0def240d85c31443 | 3c92763d3172b9f1041ea93816daf4224c8512c0 | refs/heads/master | 2023-03-07T01:49:35.779929 | 2018-09-26T05:51:14 | 2018-09-26T05:51:14 | 152,057,856 | 1 | 1 | BSD-2-Clause | 2018-10-08T10:02:56 | 2018-10-08T10:02:56 | null | UTF-8 | Python | false | false | 5,765 | py | from setuptools import setup
from setuptools.command import build_py as b
import os,sys
import glob
#remove build and dist directory
import shutil
#if os.path.exists('build'):
# shutil.rmtree('build')
#if os.path.exists('dist'):
# shutil.rmtree('dist')
def copy_dir(self, package, src, dst):
self.mkpath(dst)
for r in os.listdir(src):
if r in ['.svn', '_svn']:
continue
fpath = os.path.join(src, r)
if os.path.isdir(fpath):
copy_dir(self, package + '.' + r, fpath, os.path.join(dst, r))
else:
ext = os.path.splitext(fpath)[1]
if ext in ['.pyc', '.pyo', '.bak', '.tmp']:
continue
target = os.path.join(dst, r)
self.copy_file(fpath, target)
def find_dir(self, package, src):
for r in os.listdir(src):
if r in ['.svn', '_svn']:
continue
fpath = os.path.join(src, r)
if os.path.isdir(fpath):
for f in find_dir(self, package + '.' + r, fpath):
yield f
else:
ext = os.path.splitext(fpath)[1]
if ext in ['.pyc', '.pyo', '.bak', '.tmp']:
continue
yield fpath
def build_package_data(self):
for package in self.packages or ():
src_dir = self.get_package_dir(package)
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
copy_dir(self, package, src_dir, build_dir)
setattr(b.build_py, 'build_package_data', build_package_data)
def get_source_files(self):
filenames = []
for package in self.packages or ():
src_dir = self.get_package_dir(package)
filenames.extend(list(find_dir(self, package, src_dir)))
return filenames
setattr(b.build_py, 'get_source_files', get_source_files)
from setuptools.command.develop import develop
from distutils import sysconfig
unlink = os.unlink
def rm(obj):
import shutil
if os.path.exists(obj):
try:
if os.path.isdir(obj):
if os.path.islink(obj):
unlink(obj)
else:
shutil.rmtree(obj)
else:
if os.path.islink(obj):
unlink(obj)
else:
os.remove(obj)
except:
import traceback
traceback.print_exc()
raise
__CSL = None
def symlink(source, link_name):
'''symlink(source, link_name)
Creates a symbolic link pointing to source named link_name
copys from http://stackoverflow.com/questions/1447575/symlinks-on-windows/7924557
'''
global __CSL
if __CSL is None:
import ctypes
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
__CSL = csl
flags = 0
if source is not None and os.path.isdir(source):
flags = 1
if __CSL(link_name, source, flags) == 0:
raise ctypes.WinError()
def pre_run(func):
def _f(self):
global unlink
if self.distribution.package_dir and sys.platform == 'win32':
try:
import ntfslink
except:
print ('You need to install ntfslink package first in windows platform.')
print ('You can find it at https://github.com/juntalis/ntfslink-python')
sys.exit(1)
if not hasattr(os, 'symlink'):
os.symlink = symlink
os.path.islink = ntfslink.symlink.check
unlink = ntfslink.symlink.unlink
func(self)
return _f
develop.run = pre_run(develop.run)
def post_install_for_development(func):
def _f(self):
func(self)
packages = self.distribution.packages
package_dir = self.distribution.package_dir
libpath = sysconfig.get_python_lib()
if not package_dir: return
for p in sorted(packages):
#if the package is something like 'x.y.z'
#then create site-packages/x/y
#then create symlink to z to src directory
ps = p.split('.')
if len(ps)>1:
path = libpath
for x in ps[:-1]:
path = os.path.join(path, x)
if not os.path.exists(path):
os.makedirs(path)
inifile = os.path.join(path, '__init__.py')
if not os.path.exists(inifile):
with open(inifile, 'w') as f:
f.write('\n')
pkg = os.path.join(libpath, *ps)
d = package_dir.get(p, None)
if d is None:
print ("Error: the package %s directory can't be found in package_dir, please config it first" % p)
sys.exit(1)
src = os.path.abspath(os.path.join(os.getcwd(), d))
print ('Linking ', src, 'to', pkg)
rm(pkg)
os.symlink(src, pkg)
return _f
develop.install_for_development = post_install_for_development(develop.install_for_development)
def post_uninstall_link(func):
def _f(self):
func(self)
packages = self.distribution.packages
package_dir = self.distribution.package_dir
if not package_dir: return
libpath = sysconfig.get_python_lib()
for p in sorted(packages, reverse=True):
print ('Unlink... %s' % p)
pkg = os.path.join(libpath, p.replace('.', '/'))
rm(pkg)
return _f
develop.uninstall_link = post_uninstall_link(develop.uninstall_link)
| [
"limodou@gmail.com"
] | limodou@gmail.com |
055b87c0c32ecf5c0e0ae69c04812464e18972ed | c5a921726a3805663d26a2dbaa47e49497931d4e | /TDD_Python/superlists/functional_tests_v3.py | 2bcdfbc7c2b2d6920b2cf500abc85118c4d25264 | [] | no_license | snowdj/cs_course | a50d07548198b4202e8abde01ec572e2cce38ab3 | fa6504cb5145d10952f4615478fa745f4b35ba13 | refs/heads/master | 2020-03-17T15:18:52.190747 | 2018-05-13T08:08:51 | 2018-05-13T08:08:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,652 | py | """
TDD Python, chapter 4, page 38.
"""
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import unittest
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def test_can_start_a_list_and_retrieve_it_later(self):
# She goes to check out its homepage.
self.browser.get('http://localhost:8000')
# She notices the page title and header mention to-do lists.
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# She is invited to enter a to-do item straight away
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
# She types "Buy peacock feathers" into a text box.
inputbox.send_keys('Buy peacock feathers')
# When she hits enter, the page updates, and now the page lists
# "1: Buy peacock feather" as an item in a to-do list table
inputbox.send_keys(Keys.ENTER)
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertTrue(
any(row.text == '1: Buy peacock feathers' for row in rows),
"New to-do item did not appear in table"
)
self.fail('Finish the test!')
if __name__ == '__main__':
unittest.main(warnings='ignore')
| [
"jesse@liu.onl"
] | jesse@liu.onl |
47b0c80ab9d652c8131826c08251e4b823d88274 | d9d1a3ea9f67845e6bbaa97cda4a60a8fc776ce3 | /galtrace/libs/crawler/__init__.py | fa75c505970cf8ee6c207b4c4273a10afc52e9b9 | [] | no_license | legnaleurc/galtrace | 0340bfebd367e45c87eff8254f5cd58550f18721 | 27f88c5db28f197766cd3cc732b5e1eb921d74bf | refs/heads/master | 2021-01-22T23:48:49.352510 | 2015-04-25T15:29:27 | 2015-04-25T15:29:27 | 3,678,797 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | from .sites import UnsupportedLinkError
from .sites import UnavailableLinkError
from .crawler import fetch
__all__ = ['fetch', 'UnsupportedLinkError', 'UnavailableLinkError']
| [
"legnaleurc@gmail.com"
] | legnaleurc@gmail.com |
cde5ef37b383f1c7e93382d8a1058c013371fafb | bb109bd629c67a30a57850ebc97f9a9625aa998f | /wmtexe/cmi/dup.py | 26e81298bdb534a1b20a6862f4198083e7fdeabd | [
"MIT"
] | permissive | csdms/wmt-exe | b0966f27792be853e8469f12a7e78aea24da6bfa | 9f6e5a20e65765389682161b985cab186db88fce | refs/heads/master | 2022-11-15T06:27:23.589160 | 2022-10-25T23:57:21 | 2022-10-25T23:57:21 | 22,662,428 | 0 | 2 | MIT | 2022-10-25T23:57:22 | 2014-08-05T23:04:09 | Python | UTF-8 | Python | false | false | 353 | py | import argparse
import yaml
from .bocca import dup_c_impl
def main():
parser = argparse.ArgumentParser()
parser.add_argument('path', help='Path to impl files')
parser.add_argument('name', help='Name of new class')
args = parser.parse_args()
dup_c_impl(args.path, args.name, destdir='.')
if __name__ == '__main__':
main()
| [
"mcflugen@gmail.com"
] | mcflugen@gmail.com |
59cac7d3b44940ed7343645d2d5df7b8ec308d3a | 6b96a11195094a0023a059ba7d5df95ce58c56f1 | /1527B.py | d2463a6c906bb932f57923f1d8afb8efbdbb7d93 | [] | no_license | ldfdev/CodeForces-Div2-Problems | d932b09ee14a430fd0054d5b295f6016553be2b7 | d18824a4330a4593099d249496ae22f3f69d5f44 | refs/heads/master | 2021-08-11T03:29:18.772870 | 2021-07-25T07:21:09 | 2021-07-29T20:09:43 | 72,371,376 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py |
# explanation bemow:
# 1 0 0 0 0 1
# alice 1 1 0 0 0 1
# bob 1 1 0 0 1 0
# alice 1 1 0 1 1 1
# bob 1 1 1 0 1 1 (rev)
# alice 1 1 1 1 1 1
# 0 0 0
# alice 0 1 0
# bob 1 1 0
# alice 0 1 1 (rev)
# bob 1 1 1
def solve():
n = int(input())
palindrome = list(input())
zeros = len([x for x in palindrome if x == '0'])
if zeros == 1:
return 'BOB'
if zeros & 1:
return 'ALICE'
return 'BOB'
if __name__=='__main__':
for _ in range(int(input().strip())):
print(solve()) | [
"ldf.develop@gmail.com"
] | ldf.develop@gmail.com |
29bd029d56f2d4656129ad4580be26f15d405eac | bbcba0bb02cc62c4d445582172605776ab1be8cb | /save_to_csv.py | 52c9c1e2deb5ed1112c543a1a5324897d69ba610 | [] | no_license | wvuvl/GPND2 | 36f208a9d5cb35bf020c251fc226ce6dfe213187 | b41dd8d662e11ff5999ac4e2392f536f4e62a50c | refs/heads/master | 2023-02-08T09:30:40.795626 | 2020-12-07T03:34:29 | 2020-12-07T03:34:29 | 319,194,499 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | import csv
import numpy as np
import pickle
import pprint
def save_results(results, filename):
with open(filename[:-4] + ".pkl", "wb") as f:
pickle.dump(results, f)
print(results)
pp = pprint.PrettyPrinter()
pp.pprint(results)
percentages = list(results[0].keys())
measures = list(list(results[0].values())[0].keys())
f = open(filename, 'wt')
writer = csv.writer(f)
for m in measures:
writer.writerow((m,))
header = ['Percentage %d' % x for x in percentages]
writer.writerow(header)
for r in results:
row = []
for p in percentages:
if p in r:
row.append(r[p][m])
writer.writerow(tuple(row))
f.close()
mean_f1 = np.asarray([r[50]['f1'] for r in results]).mean()
f = open(filename[:-4] + "_%.3f" % mean_f1, 'w')
f.close()
print('Mean F1 at 50%%: %.3f' % mean_f1)
| [
"stanislav@podgorskiy.com"
] | stanislav@podgorskiy.com |
43b225614126bfa4e218f3018c9185630319aeb4 | c2eba49f66ee0948c0ab089475b02f3a641fafb1 | /xfc_control/migrations/0001_initial.py | 9d000c9fdbc5537eca92dc318a1642a5f7b7b9c4 | [] | no_license | cedadev/django-xfc_control | d4dcb7205c889443c0adba423a095b8b9ba68ffd | 63a792214f267f2beb975d7138c46b449f560dbf | refs/heads/master | 2021-07-15T05:15:02.873093 | 2020-04-21T11:35:46 | 2020-04-21T11:35:46 | 91,581,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,616 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-03 12:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import sizefield.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CachedFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(help_text='Relative path to the file', max_length=2024)),
('size', sizefield.models.FileSizeField(default=0, help_text='Size of the file')),
('first_seen', models.DateTimeField(blank=True, help_text='Date the file was first scanned by the cache_manager', null=True)),
],
),
migrations.CreateModel(
name='CacheDisk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mountpoint', models.CharField(blank=True, help_text='Root directory of cache area', max_length=1024, unique=True)),
('size_bytes', sizefield.models.FileSizeField(default=0, help_text='Maximum size on the disk that can be allocated to the cache area')),
('allocated_bytes', sizefield.models.FileSizeField(default=0, help_text='Amount of space allocated to users')),
('used_bytes', sizefield.models.FileSizeField(default=0, help_text='Used value calculated by update daemon')),
],
),
migrations.CreateModel(
name='ScheduledDeletion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_entered', models.DateTimeField(blank=True, help_text='Date the deletion was entered into the scheduler', null=True)),
('time_delete', models.DateTimeField(blank=True, help_text='Time the deletion will take place', null=True)),
('delete_files', models.ManyToManyField(default=None, help_text='The list of files to be deleted in this schedule', to='xfc_control.CachedFile')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name of user - should be same as JASMIN user name', max_length=254)),
('email', models.EmailField(help_text='Email of user', max_length=254)),
('notify', models.BooleanField(default=False, help_text='Switch notifications on / off')),
('quota_size', sizefield.models.FileSizeField(default=0, help_text='Size of quota allocated to user, in (bytes day)')),
('quota_used', sizefield.models.FileSizeField(default=0, help_text='Size of quota allocated to user, in (bytes day)')),
('hard_limit_size', sizefield.models.FileSizeField(default=0, help_text='Upper limit allocated to user, in bytes. This limit cannot be exceeded.')),
('total_used', sizefield.models.FileSizeField(default=0, help_text='Total size of all files owned by the user.')),
('cache_path', models.CharField(help_text='Relative path to cache area', max_length=2024)),
('cache_disk', models.ForeignKey(help_text='Cache disk allocated to the user', on_delete=django.db.models.deletion.CASCADE, to='xfc_control.CacheDisk')),
],
),
migrations.CreateModel(
name='UserLock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_lock', models.ForeignKey(blank=True, help_text='User that is locked', on_delete=django.db.models.deletion.CASCADE, to='xfc_control.User')),
],
),
migrations.AddField(
model_name='scheduleddeletion',
name='user',
field=models.ForeignKey(help_text='User that the ScheduledDeletion belongs to', on_delete=django.db.models.deletion.CASCADE, to='xfc_control.User'),
),
migrations.AddField(
model_name='cachedfile',
name='user',
field=models.ForeignKey(help_text='User that owns the file', null=True, on_delete=django.db.models.deletion.CASCADE, to='xfc_control.User'),
),
]
| [
"neil.massey@stfc.ac.uk"
] | neil.massey@stfc.ac.uk |
2c01a97f078d58f8355d548b0c2dc3c1a4e8250e | 0c6832a2534ad92fa9c0b3f8c38588d05bf7cdac | /myjson/json_byteified.py | 4c22d6de876a04aaacda398ba15b06cba9383732 | [] | no_license | rlaneyjr/myutils | 62690f932e642e8025f1abbaf871890d2df38aaa | 1966fe15c1e28725486c3286113722bd109d8bbf | refs/heads/master | 2021-07-14T19:15:28.020853 | 2020-06-12T20:51:37 | 2020-06-12T20:51:37 | 163,216,692 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | #!/usr/bin/env python
# title :json_byteified.py
# description :Json Byteified using json_hook
# author :Ricky Laney
# date :20170814
# version :1
# usage :python json_byteified.py or ./json_byteified.py
# notes :
# python_version :2.7.13
'''
Example usage:
>>> json_loads_byteified('{"Hello": "World"}')
{'Hello': 'World'}
>>> json_loads_byteified('"I am a top-level string"')
'I am a top-level string'
>>> json_loads_byteified('7')
7
>>> json_loads_byteified('["I am inside a list"]')
['I am inside a list']
>>> json_loads_byteified('[[[[[["I am inside a big nest of lists"]]]]]]')
[[[[[['I am inside a big nest of lists']]]]]]
>>> json_loads_byteified('{"foo": "bar",
"things": [7, {"qux": "baz",
"moo": {"cow": ["milk"]}}]}')
{'things': [7, {'qux': 'baz', 'moo': {'cow': ['milk']}}], 'foo': 'bar'}
>>> json_load_byteified(open('somefile.json'))
{'more json': 'from a file'}
'''
import json
def json_load_byteified(file_handle):
return _byteify(
json.load(file_handle, object_hook=_byteify),
ignore_dicts=True
)
def json_loads_byteified(json_text):
return _byteify(
json.loads(json_text, object_hook=_byteify),
ignore_dicts=True
)
def _byteify(data, ignore_dicts=False):
# if this is a unicode string, return its string representation
if isinstance(data, unicode):
return data.encode('utf-8')
# if this is a list of values, return list of byteified values
if isinstance(data, list):
return [_byteify(item, ignore_dicts=True) for item in data]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True):
_byteify(value, ignore_dicts=True)
for key, value in data.iteritems()
}
# if it's anything else, return it in its original form
return data
| [
"rlaneyjr@gmail.com"
] | rlaneyjr@gmail.com |
34b8fa298e9825147350ab807b8c4e738840a108 | 833ef0efdcac57f8bc585263fdd303edc06a5caa | /sc-kpm/sc-python/services/common/sc_log.py | 885f80bd62050788b0fd1293fbdad4fe7081c7ae | [
"MIT"
] | permissive | PogudoTanya/sc-machine | f77d5965b9f81cf4852afe0e4d5394f869be44d5 | ffa65770b457968f4e6f39a6d2f2513e1ab9462a | refs/heads/master | 2022-11-10T05:33:22.903073 | 2020-02-22T21:06:54 | 2020-02-22T21:06:54 | 271,785,773 | 0 | 0 | NOASSERTION | 2020-06-12T11:50:20 | 2020-06-12T11:50:19 | null | UTF-8 | Python | false | false | 663 | py | from termcolor import colored, cprint
class Log:
def __init__(self, subsystem_name):
self.name = subsystem_name
def __print_colored(self, level, level_color, message, message_color):
print (colored('[' + self.name + '][' + level + ']', level_color) + colored(': ' + message, message_color))
def debug(self, message):
self.__print_colored('debug', 'grey', message, 'grey')
def info(self, message):
self.__print_colored('info', 'white', message, 'white')
def error(self, message):
self.__print_colored('error', 'red', message, 'red')
def warning(self, message):
self.__print_colored('warning', 'yellow', message, 'yellow') | [
"denis.koronchik@gmail.com"
] | denis.koronchik@gmail.com |
7508a95e0bacdf8bef69e846404a88d24fcc3681 | 2df21455b93cf15328cda87de9831bd23f9d8343 | /GTFparse.py | 88ee302fa60d28a34dbbe6d303f093d08b82178b | [] | no_license | RagnarDanneskjold/ProcessSingleCell | d2e8f07f485319dea6df6d58e5f6cc93662cc983 | bc6e7e4eca5ad77f6b15ead6fc7badaa4f7a7996 | refs/heads/master | 2021-06-15T03:45:09.984440 | 2017-03-21T20:07:56 | 2017-03-21T20:07:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,146 | py | # Aparna Rajpurkar
# This is the GTFparse module of the FastCount.py program
# imports
import re
import operator
from Tree import GeneTree # my Object Oriented classes
def parseValidLine(line):
"""Function that parses a line of a GTF file and returns a useful \
data structure of its fields"""
# split line into array
line_ar = line.rstrip('\n\r').split('\t')
# if line is too short, return nothing
if len(line_ar) < 9:
return {}
# Grab the gene ID from the line using regular expressions
id_string = line_ar[8]
gene_id = re.search(r'gene_id \"(.+?)\";', id_string).group(1)
# construct the results dictionary for this line
result = {
'chrom': line_ar[0],
'feature': line_ar[2],
'start': int(line_ar[3]),
'end': int(line_ar[4]),
'strand': line_ar[6],
'gene_id': gene_id
}
# We are only interested in gene and exon features, so return
# nothing if not gene or exon
if result['feature'] != "gene" and result['feature'] != "exon":
return {}
# return the result dictionary
return result
def parseGTFFile (gtffile, bam_num):
"""Function that handles parsing the GTF file and intializing the GeneTree\
Objects for each chromosome and strand"""
# open the GTF file and initialize local variables
gtf_fp = open(gtffile,"r")
parsedData = dict()
curr_gene = 0
# iterate over every line in the GTF file
for line in gtf_fp:
# skip if this is a header line
if line.startswith('#'):
continue
# parse line into fields dictionary
fields = parseValidLine(line)
# skip if we could not parse, or feature is not of interest
if not fields:
continue
# if we're on a new chromosome, initialize its GeneTree objects
if fields['chrom'] not in parsedData:
# set this chromosome's strand dictionary
parsedData[fields['chrom']] = dict()
# for each strand, intitialize a GeneTree object
# which will store all entries for its genes
parsedData[fields['chrom']]['+'] = GeneTree(fields['chrom'],'+')
parsedData[fields['chrom']]['-'] = GeneTree(fields['chrom'],'-')
# if this feature is a gene, add it to the GeneTree
if fields['feature'] == 'gene':
# call the addNode method of the GeneTree object on this gene
curr_gene = parsedData[fields['chrom']][fields['strand']].addNode(fields,fields['gene_id'], bam_num)
else: # exon
# if this is an exon, add it to the current gene's Tree
parsedData[fields['chrom']][fields['strand']].addExon(fields,curr_gene)
# close the GTF file
gtf_fp.close()
# for each chromosome and strand, call the GeneTree object's balance method
# to ensure optimally efficient find() operations later
for chrom in parsedData:
for strand in parsedData[chrom]:
parsedData[chrom][strand].balanceAll()
# return our data structure
return parsedData
| [
"aparna.arr@gmail.com"
] | aparna.arr@gmail.com |
6fc2b00a799a1bc7eac3492402f25eef3d1aabc9 | 8e225d87038bdca1e5c82b4a875a1c2d25dced0c | /setup.py | cd2dfa11505b41434faed9d03535b1b083d27d7d | [
"MIT"
] | permissive | feifzhou/fortpy | c65b486f942db8cdfa325f1d11dbd37e60b7a0d0 | dc926c9169033ea59d31ea7df7bbe5373633aeb1 | refs/heads/master | 2021-01-15T23:53:18.793577 | 2015-08-26T06:04:48 | 2015-08-26T06:04:48 | 41,407,308 | 0 | 0 | null | 2015-08-26T05:58:56 | 2015-08-26T05:58:56 | null | UTF-8 | Python | false | false | 2,050 | py | #!/usr/bin/env python
try:
from setuptools import setup
args = {}
except ImportError:
from distutils.core import setup
print("""\
*** WARNING: setuptools is not found. Using distutils...
""")
from setuptools import setup
try:
from pypandoc import convert
read_md = lambda f: convert(f, 'rst')
except ImportError:
print("warning: pypandoc module not found, could not convert Markdown to RST")
read_md = lambda f: open(f, 'r').read()
setup(name='Fortpy',
version='1.6.1',
description='Fortran Parsing, Unit Testing and Intellisense',
long_description=read_md('README.md'),
author='Conrad W Rosenbrock',
author_email='rosenbrockc@gmail.com',
url='https://github.com/rosenbrockc/fortpy',
license='MIT',
install_requires=[
"argparse",
"pyparsing",
"python-dateutil",
"paramiko",
"termcolor",
"numpy",
"matplotlib",
"scipy",
],
packages=['fortpy', 'fortpy.parsers', 'fortpy.isense', 'fortpy.testing',
'fortpy.templates', 'fortpy.interop',
'fortpy.printing' ],
scripts=['fortpy/scripts/compare.py', 'fortpy/scripts/convert.py', 'fortpy/scripts/runtests.py',
'fortpy/scripts/analyze.py', 'fortpy/scripts/parse.py', 'fortpy/scripts/ftypes.py'],
package_data={'fortpy': ['isense/builtin.xml']},
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| [
"rosenbrockc@gmail.com"
] | rosenbrockc@gmail.com |
c8d103b20e2d4b98c3aaf390b547b1ce8ff564c1 | c2281d55883a51b2698119e3aeb843df9c8c885b | /Thesis ch 2/ClusteringBuckets/GenericModels/LogisticRegression/12.PredictProb.py | b05ad079af41b543da0b9691ac6c46327c6c7b1c | [] | no_license | akshitasawhney3008/Thesis-Final | 1c004ffc6c2dd6ec711b212f9a35e46ea067c9c7 | 10865bab16bcc2ca4a5d4af345ffb4f2f7222104 | refs/heads/master | 2023-02-01T20:56:43.763024 | 2020-12-10T09:28:45 | 2020-12-10T09:28:45 | 320,037,411 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,897 | py | import numpy as np
from sklearn.metrics import precision_score, recall_score, roc_curve,auc, accuracy_score, matthews_corrcoef, f1_score
import pickle
from sklearn.preprocessing import normalize
iterations = 5
# threshold = 0.5
def getPredictionsGivenThreshold(myMatrix, th):
myList = []
for i in range(myMatrix.shape[0]):
p1 = myMatrix[i, 1]
if p1>= th:
myList.append(1)
else:
myList.append(0)
return np.asarray(myList)
def getResults(predProb, labels):
thresholdList = []
precisionList = []
recallList = []
aucList = []
accuracyListtr = []
accuracyList = []
mcList = []
f1scoreList = []
for threshold in thresholdRange:
matrixPredictions = getPredictionsGivenThreshold(predProb, threshold)
precision = precision_score(labels, matrixPredictions)
recall = recall_score(labels, matrixPredictions)
fpr, tpr, thresholds = roc_curve(labels, matrixPredictions, pos_label=1)
auroc = auc(fpr, tpr)
accuracy = accuracy_score(labels, matrixPredictions)
matthewsCoeff = matthews_corrcoef(labels, matrixPredictions)
f1score = f1_score(labels, matrixPredictions)
thresholdList.append(threshold)
precisionList.append(precision)
recallList.append(recall)
aucList.append(auroc)
accuracyList.append(accuracy)
mcList.append(matthewsCoeff)
f1scoreList.append(f1score)
print(max(accuracyList))
ind = accuracyList.index((max(accuracyList)))
print('Threshold: ' + str(thresholdList[ind]))
print('Precision: ' + str(precisionList[ind]))
print('Recall: ' + str(recallList[ind]))
print('F1: ' + str(f1scoreList[ind]))
print('Accuracy: ' + str(accuracyList[ind]))
print('AUROC: ' + str(aucList[ind]))
print('MCC: ' + str(mcList[ind]) + '\n')
return max(accuracyList),precisionList[ind],recallList[ind], f1scoreList[ind],aucList[ind],mcList[ind]
path = "C://Users//Arushi//PycharmProjects//ThesisChap2//ClusteringBuckets//"
listOfPredictionProbabilities = []
actualpredictions = []
# genenamesFile = open("transformedColumnNames221.txt",'r').readline().rstrip('\n').split(',')
# selectedFeaturesfile = open('SelectedFeatures.csv').readlines()
# flag = 0
#
# list_of_gene_numbers = []
# for line in selectedFeaturesfile:
#
#
# list_of_gene_names = line.rstrip('\n').split(',')
# if len(list_of_gene_names) == 55:
# for gene in list_of_gene_names:
# list_of_gene_numbers.append(genenamesFile.index(gene))
# flag = 1
finacclist = []
finpre = []
finrec =[]
finf1 =[]
finauc =[]
finmcc =[]
thresholdRange = np.linspace(start=0.40, stop=0.60, num=500)
for i in range(iterations):
X_test = np.load(path + 'final_test_binarydata_' + str(i) + '.npy')
Y_test = np.load(path + 'final_test_labels_' + str(i) + '.npy')
# X_test = X_test[:, list_of_gene_numbers]
X_test = X_test.astype('float')
X_test = normalize(X_test)
Y_test = Y_test.astype('float')
Y_test = Y_test.astype(int)
with open('Model_ism_lr' + str(i) + '.pkl', 'rb') as f:
model = pickle.load(f)
predictionsProb_file = open("predictionsProb_ism_lr" + str(i) + ".csv", 'w')
predictionProbabilities = model.predict_proba(X_test)
for prob in predictionProbabilities:
for pr in prob:
predictionsProb_file.write(str(pr) + ',')
predictionsProb_file.write('\n')
acc,pre,rec,f1,au,mcc = getResults(predictionProbabilities, Y_test)
finacclist.append(acc)
finpre.append(pre)
finrec.append(rec)
finf1.append(f1)
finauc.append(au)
finmcc.append(mcc)
print(sum(finacclist)/iterations)
print(sum(finpre)/iterations)
print(sum(finrec)/iterations)
print(sum(finf1)/iterations)
print(sum(finauc)/iterations)
print(sum(finmcc)/iterations)
print('Done') | [
"akshita17143@iiitd.ac.in"
] | akshita17143@iiitd.ac.in |
1f1da02090abd16cccd2e361003b7c7c5129c546 | 045cb1a5638c3575296f83471758dc09a8065725 | /addons/account/tests/test_reconciliation_heavy_load.py | 076ed17493b8981e32bae3e3e33fa1a0c0f50dbb | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,848 | py | from harpiya import api, fields
from harpiya.tests import tagged
from harpiya.addons.account.tests.account_test_classes import AccountingTestCase
@tagged('post_install', '-at_install')
class TestReconciliationHeavyLoad(AccountingTestCase):
"""Check that reconciliation can be done for a move with many lines
"""
def _create_move(self, journal):
values = {
'ref': "Test reconcile - Auto-generated by script",
'journal_id': journal.id,
'state': 'draft',
'company_id': journal.env.user.company_id.id,
}
return journal.env['account.move'].create(values)
def _get_values_account_move_line(
self, account, journal, name, move,
credit=0, debit=0, date=fields.Date.today()):
return {
'journal_id': journal.id,
'name': name,
'account_id': account.id,
'move_id': move.id,
'quantity': 1,
'credit': credit,
'debit': debit,
'date': date,
}
def setUp(self):
super(TestReconciliationHeavyLoad, self).setUp()
self.account_type = self.env.ref('account.data_account_type_receivable')
self.journal = self.env['account.journal'].search([
('type', '=', 'bank'),
('company_id', '=', self.env.user.company_id.id),
], limit=1)
self.account = self.env['account.account'].search([
('user_type_id', '=', self.account_type.id),
('company_id', '=', self.env.user.company_id.id),
], limit=1)
def test_heavy_load_reconciliation(self):
"""Does reconciliation on a move with nb_lines lines.
To avoid burdening uselessly the runbot, we only set nb_lines to 10,
but it should be of order 10^3 to be meaningful.
The day we manage to use system build settings to execute tests
this could be done automatically for "heavy load builds",
but for now this should be changed manually.
"""
total = 0
line_ids = []
amount_per_line = 1
nb_lines = 10 # change this to 1000 or more
move = self._create_move(self.journal)
for i in range(nb_lines):
name = "Move line credit #%s" % i
total += amount_per_line
values = self._get_values_account_move_line(
self.account, self.journal, name, move, credit=amount_per_line)
line_ids.append((0, False, values))
values = self._get_values_account_move_line(
self.account, self.journal, "Move line Debit", move, debit=total)
line_ids.append((0, False, values))
move.write({'line_ids': line_ids})
move.line_ids.reconcile()
self.assertTrue(all(move.line_ids.mapped('reconciled')))
| [
"yasir@harpiya.com"
] | yasir@harpiya.com |
92a9c983b89ac8d402b925347d860d62f034a371 | f50f1aa1f8f139d546db3230a1cb1f53043fd9e6 | /system/base/net-tools/actions.py | 837eeac22cd5a7fb9a0f4446547b1c64394aa1b4 | [] | no_license | pars-linux/corporate2 | 7887961d1552d39bc3b0bef4a60fd3413d9b82bb | 14d1eacfc824fb8d0bff8173e7ac06b36b88d10d | refs/heads/master | 2020-05-26T15:02:12.005654 | 2017-02-27T03:07:14 | 2017-02-27T03:07:14 | 82,476,084 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2009 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
pisitools.dosed("Makefile", "(?m)^(COPTS =.*)", "COPTS = %s -fPIE" % get.CFLAGS())
pisitools.dosed("Makefile", "(?m)^(LOPTS =.*)", "LOPTS = %s -pie" % get.LDFLAGS())
def build():
shelltools.export("CC", get.CC())
autotools.make("libdir")
autotools.make()
autotools.make("ether-wake")
autotools.make("i18ndir")
def install():
autotools.rawInstall("BASEDIR=%s" % get.installDIR())
pisitools.dosbin("ether-wake")
pisitools.dosym("/bin/hostname", "/usr/bin/hostname")
pisitools.dodoc("README", "README.ipv6", "TODO")
| [
"eki@420bcd57-4a62-4fd6-832e-5ede16c90cc9"
] | eki@420bcd57-4a62-4fd6-832e-5ede16c90cc9 |
8ee6b1e2088ea8fcf36bf7ea351d8bec16454b2f | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/parties/activityFSMs.py | 07ecce6deaf62a4abdea57d2c165f9c32a26ee27 | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 4,359 | py | #-------------------------------------------------------------------------------
# Contact: Rob Gordon
# Created: Oct 2008
#
# Purpose: Individual Activity FSMs
#-------------------------------------------------------------------------------
# Panda Imports
from direct.directnotify import DirectNotifyGlobal
# parties imports
from BaseActivityFSM import BaseActivityFSM
from activityFSMMixins import IdleMixin
from activityFSMMixins import RulesMixin
from activityFSMMixins import ActiveMixin
from activityFSMMixins import DisabledMixin
from activityFSMMixins import ConclusionMixin
from activityFSMMixins import WaitForEnoughMixin
from activityFSMMixins import WaitToStartMixin
from activityFSMMixins import WaitClientsReadyMixin
from activityFSMMixins import WaitForServerMixin
class FireworksActivityFSM(BaseActivityFSM, IdleMixin, ActiveMixin, DisabledMixin):
notify = DirectNotifyGlobal.directNotify.newCategory( "FireworksActivityFSM" )
def __init__(self, activity):
FireworksActivityFSM.notify.debug("__init__")
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {
"Idle" : ["Active", "Disabled"],
"Active" : ["Disabled"],
"Disabled" : [],
}
class CatchActivityFSM(BaseActivityFSM, IdleMixin, ActiveMixin, ConclusionMixin):
notify = DirectNotifyGlobal.directNotify.newCategory( "CatchActivityFSM" )
def __init__(self, activity):
CatchActivityFSM.notify.debug("__init__")
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {
"Idle" : ["Active", "Conclusion"],
"Active" : ["Conclusion"],
"Conclusion" : ["Idle"],
}
class TrampolineActivityFSM(BaseActivityFSM, IdleMixin, RulesMixin, ActiveMixin):
notify = DirectNotifyGlobal.directNotify.newCategory( "TrampolineActivityFSM" )
def __init__(self, activity):
TrampolineActivityFSM.notify.debug("__init__")
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {
"Idle" : ["Rules", "Active"], # added Active to this list as the fsm will sometimes get set directly to this from idle when a toon comes late to a party
"Rules" : ["Active", "Idle"],
"Active" : ["Idle"],
}
class DanceActivityFSM(BaseActivityFSM, IdleMixin, ActiveMixin, DisabledMixin):
notify = DirectNotifyGlobal.directNotify.newCategory( "DanceActivityFSM" )
def __init__(self, activity):
DanceActivityFSM.notify.debug("__init__")
BaseActivityFSM.__init__(self, activity)
self.defaultTransitions = {
"Active" : ["Disabled"],
"Disabled" : ["Active"],
}
class TeamActivityAIFSM(BaseActivityFSM, WaitForEnoughMixin, WaitToStartMixin, WaitClientsReadyMixin, ActiveMixin, ConclusionMixin):
notify = DirectNotifyGlobal.directNotify.newCategory("TeamActivityAIFSM")
def __init__(self, activity):
BaseActivityFSM.__init__(self, activity)
self.notify.debug("__init__")
self.defaultTransitions = {
"WaitForEnough" : ["WaitToStart"],
"WaitToStart" : ["WaitForEnough", "WaitClientsReady"],
"WaitClientsReady" : ["WaitForEnough", "Active"],
"Active" : ["WaitForEnough", "Conclusion"],
"Conclusion" : ["WaitForEnough"],
}
class TeamActivityFSM(BaseActivityFSM, WaitForEnoughMixin, WaitToStartMixin, RulesMixin, WaitForServerMixin, ActiveMixin, ConclusionMixin):
notify = DirectNotifyGlobal.directNotify.newCategory("TeamActivityFSM")
def __init__(self, activity):
BaseActivityFSM.__init__(self, activity)
assert(self.notify.debug("__init__"))
self.defaultTransitions = {
"WaitForEnough" : ["WaitToStart"],
"WaitToStart" : ["WaitForEnough", "Rules"],
# Instances without the local toon in the activity will go from Rules directly to Active.
# If a toon drops unexpectedly, the game will revert back to WaitForEnough
"Rules" : ["WaitForServer", "Active", "WaitForEnough"],
"WaitForServer" : ["Active", "WaitForEnough"],
"Active" : ["Conclusion", "WaitForEnough"],
"Conclusion" : ["WaitForEnough"],
}
| [
"66761962+satire6@users.noreply.github.com"
] | 66761962+satire6@users.noreply.github.com |
bf434a2dde5c1b7b25c30d1f2b90a45984deffc7 | 463c053bcf3f4a7337b634890720ea9467f14c87 | /python/ray/ml/__init__.py | 4778a644583fac1d6217ccdfc7faeb8972300648 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | pdames/ray | e8faddc4440976211a6bcead8f8b6e62c1dcda01 | 918d3601c6519d333f10910dc75eb549cbb82afa | refs/heads/master | 2023-01-23T06:11:11.723212 | 2022-05-06T22:55:59 | 2022-05-06T22:55:59 | 245,515,407 | 1 | 1 | Apache-2.0 | 2023-01-14T08:02:21 | 2020-03-06T20:59:04 | Python | UTF-8 | Python | false | false | 209 | py | from ray.ml.checkpoint import Checkpoint
from ray.ml.config import RunConfig, ScalingConfig
from ray.ml.preprocessor import Preprocessor
__all__ = ["Checkpoint", "Preprocessor", "RunConfig", "ScalingConfig"]
| [
"noreply@github.com"
] | pdames.noreply@github.com |
1a994d033d63f26689a109eacd06972b120aeb9f | 128adc6237ecbcb493f927a5de7990be21542526 | /data.py | 684fa7d99b74eca739dd448959e48c1028646882 | [] | no_license | mwitiderrick/Determined-DVC | 529f53cc1959a53b8081d1a0d40665ed95be9265 | 2bbce280f9c955cb76843671a8a9da3e1824c8e0 | refs/heads/master | 2023-02-19T11:58:46.848293 | 2021-01-22T12:26:53 | 2021-01-22T12:26:53 | 331,883,888 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | import tensorflow
import pandas as pd
import numpy as np
def load_training_data():
df = pd.read_csv("data/mnist_train.csv")
x_train = df.drop("label",axis=1)
x_train = x_train.values
y_train = df["label"].values
return x_train, y_train
def load_validation_data():
df = pd.read_csv("data/mnist_test.csv")
x_test = df.drop("label",axis=1)
x_test = x_test.values
y_test = df["label"].values
return x_test, y_test
| [
"mwitiderrick@gmail.com"
] | mwitiderrick@gmail.com |
0dfe69892b820556fd59cbf745f85f75f750a462 | 4fd5b888aff049ecf84fac6969e2d6950c8bf683 | /pyjob/tests/test_cexec.py | 636eb93da237f009416b54d5176ddd4f1318b582 | [
"MIT"
] | permissive | FilomenoSanchez/pyjob | 1d705fa9af4f8ba8827743f6b0c21a2ff9500ff2 | b8dac5e53570f44370c222f97f063d666eeb0d64 | refs/heads/master | 2020-07-18T22:24:02.147767 | 2020-03-03T09:51:29 | 2020-03-03T09:51:29 | 206,324,268 | 0 | 0 | MIT | 2019-09-04T13:20:52 | 2019-09-04T13:20:51 | null | UTF-8 | Python | false | false | 2,326 | py | __author__ = 'Felix Simkovic'
import os
import pytest
import sys
from pyjob.cexec import cexec
from pyjob.exception import PyJobExecutableNotFoundError, PyJobExecutionError
class TestCexec(object):
def test_1(self):
stdout = cexec([sys.executable, '-c', 'import sys; print("hello"); sys.exit(0)'])
assert stdout == 'hello'
def test_2(self):
with pytest.raises(PyJobExecutionError):
cexec([sys.executable, '-c', 'import sys; sys.exit(1)'])
def test_3(self):
cmd = [sys.executable, '-c', 'import sys; print("hello"); sys.exit(1)']
stdout = cexec(cmd, permit_nonzero=True)
assert stdout == 'hello'
def test_4(self):
if sys.version_info < (3, 0):
cmd = [sys.executable, '-c', 'import sys; print(raw_input()); sys.exit(0)']
else:
cmd = [sys.executable, '-c', 'import sys; print(input()); sys.exit(0)']
stdout = cexec(cmd, stdin='hello')
assert stdout == 'hello'
def test_5(self):
cmd = [sys.executable, '-c', 'import os, sys; print(os.getcwd()); sys.exit(0)']
directory = os.path.join(os.getcwd())
stdout = cexec(cmd, cwd=directory)
assert stdout == directory
def test_6(self):
cmd = [sys.executable, '-c', 'import sys; print("hello"); sys.exit(0)']
fname = 'test.log'
with open(fname, 'w') as f:
stdout = cexec(cmd, stdout=f)
assert stdout is None
with open(fname, 'r') as f:
assert f.read().strip() == 'hello'
pytest.helpers.unlink([fname])
def test_7(self):
cmd = [sys.executable, '-c', 'import os, sys; print(os.getcwd()); sys.exit("error message")']
directory = os.path.join(os.getcwd())
with open('stdout.log', 'w') as fstdout, open('stderr.log', 'w') as fstderr:
stdout = cexec(cmd, stdout=fstdout, stderr=fstderr, permit_nonzero=True)
assert stdout is None
with open('stdout.log', 'r') as f:
assert f.read().strip() == directory
with open('stderr.log', 'r') as f:
assert f.read().strip() == 'error message'
pytest.helpers.unlink(['stdout.log', 'stderr.log'])
def test_8(self):
with pytest.raises(PyJobExecutableNotFoundError):
cexec(['fjezfsdkj'])
| [
"felixsimkovic@me.com"
] | felixsimkovic@me.com |
34b81f7e23b59b2b431174df1e4acd91d52f4fd2 | a1d30d667cbf814db1809c31cf68ba75c01f819c | /Google/2. medium/274. H-Index.py | 86a0a49e17a3c9ca641aab1bfbb569642dbecf80 | [] | no_license | yemao616/summer18 | adb5f0e04e6f1e1da6894b0b99a61da3c5cba8ee | 8bb17099be02d997d554519be360ef4aa1c028e3 | refs/heads/master | 2021-06-02T04:32:07.703198 | 2020-01-09T17:45:29 | 2020-01-09T17:45:29 | 110,744,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | # Given an array of citations (each citation is a non-negative integer) of a researcher, write a function to compute the researcher's h-index.
# According to the definition of h-index on Wikipedia: "A scientist has index h if h of his/her N papers have at least h citations each, and the other N โ h papers have no more than h citations each."
# For example, given citations = [3, 0, 6, 1, 5], which means the researcher has 5 papers in total and each of them had received 3, 0, 6, 1, 5 citations respectively. Since the researcher has 3 papers with at least 3 citations each and the remaining two with no more than 3 citations each, his h-index is 3.
# Note: If there are several possible values for h, the maximum one is taken as the h-index.
class Solution(object):
def hIndex(self, citations): # O(nlgn)
citations.sort()
n = len(citations)
for i in xrange(n):
if citations[i] >= n-i:
return n-i
return 0
def hIndex(self, citations): # O(n) space, O(n) time
n = len(citations)
citeCount = [0] * (n+1)
for c in citations:
if c >= n:
citeCount[n] += 1
else:
citeCount[c] += 1
i = n-1
while i >= 0:
citeCount[i] += citeCount[i+1]
if citeCount[i+1] >= i+1:
return i+1
i -= 1
return 0
Further Thoughts
Is it possible to have multiple hh-values?
The answer is NO. One can find this intuitively from Figure 1. The dashed line y = xy=x crosses the histogram once and only once, because the sorted bars are monotonic. It can also be proven from the definition of the hh-index.
| [
"ymao4@ncsu.edu"
] | ymao4@ncsu.edu |
8104d13b788a66efc1a7bcff6fd2911d319a2e9c | 159bd4c0274271aae7cf2d42bc6819957ee626c9 | /viz.py | 5f91147e0c9a32109927e7d3d101b098362d385e | [] | no_license | Schuck9/UG-in-Weighted-Network | aaa9810e8806d6130ec87c275a169009da460abc | 8e2a6ebde2ed4b9e2f6d2a2ca9d84140c2c5e792 | refs/heads/master | 2021-03-01T04:03:05.983146 | 2020-04-24T02:51:34 | 2020-04-24T02:51:34 | 245,752,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,801 | py | """
Ultimatum Game in complex network Visualization
@date: 2020.3.19
@author: Tingyu Mo
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
from img_concat import image_compose
def pq_distribution(value_list):
x_axis = np.arange(0,1.05,1/20) # 21 descrete points,range 0~1,step size 0.05
y_axis = np.zeros(x_axis.size)
for v in value_list:
for i in range(x_axis.size):
if abs(v-x_axis[i]) < 0.05:
y_axis[i] += 1
return y_axis
def pq_distribution_viz(RecordName,time_option = "all"):
# Epoch_list = ['1','100','1000','20000']
Epoch_list = ['100','1000','20000']
result_dir = "./result"
record_dir = os.path.join(result_dir,RecordName)
checkpoint_list = os.listdir(record_dir)
parse_str = checkpoint_list[0].split("_")
del(parse_str[-1])
info_str = '_'.join(parse_str)
save_path =os.path.join(record_dir, info_str+'.jpg')
y_axis_plist = []
y_axis_qlist = []
for Epoch in Epoch_list:
info_e = info_str+"_"+Epoch
Epoch_dir = os.path.join(record_dir,info_e )
strategy_path = os.path.join(Epoch_dir,info_e+"_strategy.csv")
strategy = pd.read_csv(strategy_path)
# strategy.reset_index(drop = True)
pq_array = strategy.values
# np.delete(pq_array,1,axis=1)
p = pq_array[0][1:]
q = pq_array[1][1:]
# del(p[0])
# del(q[0])
p = pq_distribution(p)
q = pq_distribution(q)
y_axis_plist.append(p/10000)
y_axis_qlist.append(q/10000)
plt.figure()
x_axis = np.arange(0,1.05,1/20)
# plt.rcParams['font.sans-serif']=['SimHei']
# plt.rcParams['axes.unicode_minus'] = False
# # plt.title("")
plt.xlabel("p")#x่ฝดpไธ็ๅๅญ
plt.ylabel("D(p)")#y่ฝดไธ็ๅๅญ
plt.plot(x_axis, y_axis_plist[0] ,marker='^',linestyle='-',color='skyblue', label='t = 100')
plt.plot(x_axis, y_axis_plist[1], marker='s',linestyle='-',color='green', label='t = 1000')
plt.plot(x_axis, y_axis_plist[2], marker='*',linestyle='-',color='red', label='t = 20000')
# plt.plot(x_axis, thresholds, color='blue', label='threshold')
plt.legend(loc = 'upper right') # ๆพ็คบๅพไพ
plt.savefig(save_path)
print("Figure has been saved to: ",save_path)
plt.show()
def avg_pq_viz():
'''
Figure 2 like
'''
u = 0.1
info = 'RG_Weighted_0.4'
save_path = "./result/{}_u_{}.jpg".format(info,u)
x_label = [0.001,0.01,0.1,1,10]
x_axis = np.log10(x_label)
avg_list = [ (0.5,0.5),
(0.494011917,0.496625418),(0.498278643,0.471188505),
(0.341997159,0.261274376),(0.124914813,0.115971024),
]
p_axis = list()
q_axis = list()
for stg in avg_list:
p,q = stg
p_axis.append(p)
q_axis.append(q)
plt.figure()
# plt.rcParams['font.family'] = ['sans-serif']
# plt.rcParams['font.sans-serif'] = ['SimHei']
# plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
plt.title(" {} u={}".format(info,u))
plt.xlabel("Selection strength(w)")#x่ฝดpไธ็ๅๅญ
plt.ylabel("Mean")#y่ฝดไธ็ๅๅญ
plt.xticks(x_axis,x_label,fontsize=16)
plt.plot(x_axis, p_axis,marker='^',linestyle='-',color='skyblue', label='Offer (p)')
plt.plot(x_axis, q_axis, marker='s',linestyle='-',color='red', label='Demand (q)')
# plt.plot(x_axis, thresholds, color='blue', label='threshold')
plt.legend(loc = 'upper right') # ๆพ็คบๅพไพ
plt.savefig(save_path)
print("Figure has been saved to: ",save_path)
plt.show()
def data_loader(data_path):
weight_axis = [0.25, 0.3, 0.35, 0.4, 0.55, 0.7, 0.85]
weight_axis = [str(x )for x in weight_axis]
u = [0.001, 0.01, 0.1]
u = [str(i) for i in u]
w = [0.001, 0.01, 0.1, 1, 10]
w = [str(i) for i in w]
data = pd.read_excel(data_path)
data_dict = dict()
for weight_key in weight_axis:
data_dict[weight_key] = dict()
for u_key in u:
data_dict[weight_key][u_key] =dict()
# for w_key in w:
# data_dict[weight_key][u_key][w_key] = np.zeros([1,2])
pq_data =data[['p','q']].dropna()
for i,weight_key in enumerate(weight_axis):
weight_data = pq_data.iloc[15*i:15*(i+1)]
for j,u_key in enumerate(u):
u_data = weight_data.iloc[5*j:5*(j+1)].values
for k,w_key in enumerate(w):
# print(u_data[k])
data_dict[weight_key][u_key][w_key] = u_data[k]
print("data loaded!")
return data_dict
def weighted_graph_viz():
data_path ='./result/Result_data.xlsx'
data_dict= data_loader(data_path)
weight_axis = [0.25, 0.3, 0.35, 0.4, 0.55, 0.7, 0.85]
weight_axis_str = [str(x )for x in weight_axis]
x_axis = weight_axis
pq = ['Offer(p)','Demond(q)']
u = [0.001, 0.01, 0.1]
u = [str(i) for i in u]
w = [0.001, 0.01, 0.1, 1, 10]
w = [str(i) for i in w]
index = np.array([[0,2,4],[1,3,5]])
for k ,role in enumerate(pq):
for j,u_ in enumerate(u):
y_list = []
for w_ in w:
ls = []
for i,weight_key in enumerate(weight_axis_str):
ls.append(data_dict[weight_key][u_][w_][k])
y_list.append(ls)
# print("y_axis done!")
info_str = role+"_"+u_
save_path = './result/Fig/{}_{}.jpg'.format(index[k][j],info_str)
plt.figure()
# plt.rcParams['font.sans-serif']=['SimHei']
# plt.rcParams['axes.unicode_minus'] = False
plt.title(info_str)
plt.xlabel("weight")#x่ฝดpไธ็ๅๅญ
plt.ylabel("{}".format(role))#y่ฝดไธ็ๅๅญ
plt.plot(x_axis, y_list[0] ,marker='>',linestyle='-',color='purple', label='w = 0.001')
plt.plot(x_axis, y_list[1] ,marker='^',linestyle='-',color='skyblue', label='w = 0.01')
plt.plot(x_axis, y_list[2], marker='s',linestyle='-',color='green', label='w = 0.1')
plt.plot(x_axis, y_list[3], marker='*',linestyle='-',color='red', label='w = 1')
plt.plot(x_axis, y_list[4], marker='x',linestyle='-',color='black', label='w = 10')
# plt.plot(x_axis, thresholds, color='blue', label='threshold')
plt.legend(loc = 'upper right') # ๆพ็คบๅพไพ
plt.savefig(save_path)
print("Figure has been saved to: ",save_path)
# plt.show()
if __name__ == '__main__':
# RecordName ='2020-03-03-09-14-20'
# time_option = "all"
# pq_distribution_viz(RecordName,time_option)
# avg_pq_viz()
weighted_graph_viz()
image_compose("./result/Fig/")
| [
"noreply@github.com"
] | Schuck9.noreply@github.com |
7ef8c7d8fbcd5f4846d737fcee832ff469bab76b | 7dc502a62dcc4ff39f572040ba180315981e3ba8 | /src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2021_11_01_preview/operations/_operations.py | d2af43806a36b987abf97f3c948a353bae6592c7 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | dbradish-microsoft/azure-cli-extensions | 2bec15b90666fee7a0a833b407ca2619e25fed86 | fe44a1bb123a58b7e8248850bdc20555ca893406 | refs/heads/master | 2023-08-31T15:19:35.673988 | 2022-02-09T08:50:18 | 2022-02-09T08:50:18 | 252,317,425 | 0 | 0 | MIT | 2020-04-02T00:29:14 | 2020-04-02T00:29:13 | null | UTF-8 | Python | false | false | 5,575 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_list_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2021-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.ContainerService/operations')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2021_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationListResult"]
"""Gets a list of operations.
Gets a list of operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2021_11_01_preview.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.ContainerService/operations'} # type: ignore
| [
"noreply@github.com"
] | dbradish-microsoft.noreply@github.com |
f33f85ac4aba4e4d933e8040620605b393102a38 | 02bdaef6edebcfc1c46bb62dbc79a3a805946ee7 | /ns/scheduler/sp.py | 954d9e976b5227933b1360fdf17c012f18aff21f | [
"Apache-2.0"
] | permissive | chapter09/ns.py | 707ea084306ff04d606a25635d80cfe741183df8 | d8fb5f838e8d163e9b5a872326282dac2238b9c5 | refs/heads/main | 2023-06-25T08:47:05.855105 | 2021-07-30T06:49:19 | 2021-07-30T06:49:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,781 | py | """
Implements a Static Priority (SP) server.
"""
import uuid
from collections import defaultdict as dd
from collections.abc import Callable
import simpy
from ns.packet.packet import Packet
class SPServer:
"""
Parameters
----------
env: simpy.Environment
The simulation environment.
rate: float
The bit rate of the port.
priorities: list or dict
This can be either a list or a dictionary. If it is a list, it uses the flow_id ---
or class_id, if class-based static priority scheduling is activated using the
`flow_classes' parameter below --- as its index to look for the flow (or class)'s
corresponding priority. If it is a dictionary, it contains (flow_id or class_id
-> priority) pairs for each possible flow_id or class_id.
flow_classes: function
This is a function that matches flow_id's to class_ids, used to implement class-based
static priority scheduling. The default is an identity lambda function, which is
equivalent to flow-based WFQ.
zero_buffer: bool
Does this server have a zero-length buffer? This is useful when multiple
basic elements need to be put together to construct a more complex element
with a unified buffer.
zero_downstream_buffer: bool
Does this server's downstream element has a zero-length buffer? If so, packets
may queue up in this element's own buffer rather than be forwarded to the
next-hop element.
debug: bool
If True, prints more verbose debug information.
"""
def __init__(self,
env,
rate,
priorities,
flow_classes: Callable = lambda x: x,
zero_buffer=False,
zero_downstream_buffer=False,
debug=False) -> None:
self.env = env
self.rate = rate
self.prio = priorities
self.flow_classes = flow_classes
self.element_id = uuid.uuid1()
self.stores = {}
self.prio_queue_count = {}
if isinstance(priorities, list):
priorities_list = priorities
elif isinstance(priorities, dict):
priorities_list = priorities.values()
else:
raise ValueError(
'Priorities must be either a list or a dictionary.')
for prio in priorities_list:
if prio not in self.prio_queue_count:
self.prio_queue_count[prio] = 0
self.priorities_list = sorted(self.prio_queue_count, reverse=True)
self.packets_available = simpy.Store(self.env)
self.current_packet = None
self.byte_sizes = dd(lambda: 0)
self.packets_received = 0
self.out = None
self.upstream_updates = {}
self.upstream_stores = {}
self.zero_buffer = zero_buffer
self.zero_downstream_buffer = zero_downstream_buffer
if self.zero_downstream_buffer:
self.downstream_stores = {}
self.debug = debug
self.action = env.process(self.run())
def update(self, packet):
"""The packet has just been retrieved from this element's own buffer, so
update internal housekeeping states accordingly."""
if self.zero_buffer:
self.upstream_stores[packet].get()
del self.upstream_stores[packet]
self.upstream_updates[packet](packet)
del self.upstream_updates[packet]
if self.debug:
print(
f"Sent out packet {packet.packet_id} from flow {packet.flow_id} "
f"belonging to class {self.flow_classes(packet.packet_id)} "
f"of priority {packet.prio[self.element_id]}")
self.prio_queue_count[packet.prio[self.element_id]] -= 1
if self.flow_classes(packet.flow_id) in self.byte_sizes:
self.byte_sizes[self.flow_classes(packet.flow_id)] -= packet.size
else:
raise ValueError("Error: the packet is from an unrecorded flow.")
def packet_in_service(self) -> Packet:
"""
Returns the packet that is currently being sent to the downstream element.
Used by a ServerMonitor.
"""
return self.current_packet
def byte_size(self, queue_id) -> int:
"""
Returns the size of the queue for a particular queue_id, in bytes.
Used by a ServerMonitor.
"""
if queue_id in self.byte_sizes:
return self.byte_sizes[queue_id]
return 0
def size(self, queue_id) -> int:
"""
Returns the size of the queue for a particular queue_id, in the
number of packets. Used by a ServerMonitor.
"""
if queue_id in self.stores:
return len(self.stores[queue_id].items)
return 0
def all_flows(self) -> list:
"""
Returns a list containing all the flow IDs.
"""
return self.byte_sizes.keys()
def total_packets(self) -> int:
"""
Returns the total number of packets currently in the queues.
"""
return sum(self.prio_queue_count.values())
def run(self):
"""The generator function used in simulations."""
while True:
for prio in self.priorities_list:
if self.prio_queue_count[prio] > 0:
if self.zero_downstream_buffer:
ds_store = self.downstream_stores[prio]
packet = yield ds_store.get()
packet.prio[self.element_id] = prio
self.current_packet = packet
yield self.env.timeout(packet.size * 8.0 / self.rate)
self.out.put(packet,
upstream_update=self.update,
upstream_store=self.stores[prio])
self.current_packet = None
else:
store = self.stores[prio]
packet = yield store.get()
packet.prio[self.element_id] = prio
self.update(packet)
self.current_packet = packet
yield self.env.timeout(packet.size * 8.0 / self.rate)
self.out.put(packet)
self.current_packet = None
break
if self.total_packets() == 0:
yield self.packets_available.get()
def put(self, packet, upstream_update=None, upstream_store=None):
""" Sends a packet to this element. """
self.packets_received += 1
flow_id = packet.flow_id
self.byte_sizes[self.flow_classes(flow_id)] += packet.size
if self.total_packets() == 0:
self.packets_available.put(True)
prio = self.prio[self.flow_classes(flow_id)]
self.prio_queue_count[prio] += 1
if self.debug:
print(
"At time {:.2f}: received packet {:d} from flow {} belonging to class {}"
.format(self.env.now, packet.packet_id, flow_id,
self.flow_classes(flow_id)))
if not prio in self.stores:
self.stores[prio] = simpy.Store(self.env)
if self.zero_downstream_buffer:
self.downstream_stores[prio] = simpy.Store(self.env)
if self.zero_buffer and upstream_update is not None and upstream_store is not None:
self.upstream_stores[packet] = upstream_store
self.upstream_updates[packet] = upstream_update
if self.zero_downstream_buffer:
self.downstream_stores[prio].put(packet)
return self.stores[prio].put(packet)
| [
"bli@ece.toronto.edu"
] | bli@ece.toronto.edu |
9e67469d766ce79591979d03e6ccc685e0a7c7b3 | 0d347de6f7fb39ddd3f16905056f95d8397d0f72 | /app/main/views.py | fe26140f3727e94324c4fff33dd4e6932ab2f8eb | [] | no_license | vincentmuya/Movie_review | 83564d36a5e76a49ccebb2c28f633a68f3c050b0 | 5cb1c0c49e790d27086acbb7de1342c5cc3eed60 | refs/heads/master | 2021-05-16T14:38:38.566432 | 2018-02-03T08:43:49 | 2018-02-03T08:43:49 | 118,459,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,295 | py | from flask import render_template,request,redirect,url_for,abort
from . import main
from ..requests import get_movies,get_movie,search_movie
from ..models import Review,User
from .forms import ReviewForm,UpdateProfile
from flask_login import login_required
from .. import db,photos
#views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
# Getting popular movie
popular_movies = get_movies('popular')
upcoming_movie = get_movies('upcoming')
now_showing_movie = get_movies('now_playing')
title = 'Home - Welcome to The best Movie Review Website Online'
search_movie = request.args.get('movie_query')
if search_movie:
return redirect(url_for('.search', movie_name = search_movie))
else:
return render_template('index.html', title = title, popular = popular_movies, upcoming = upcoming_movie, now_showing = now_showing_movie )
@main.route('/movie/<int:id>')
def movie(id):
'''
view movie page function that returns the movies details page and its data
'''
movie = get_movie(id)
title = f'{movie.title}'
reviews = Review.get_reviews(movie.id)
return render_template('movie.html',title = title, movie = movie, reviews = reviews)
@main.route('/search/<movie_name>')
def search(movie_name):
'''
view function to display search results
'''
movie_name_list = movie_name.split(" ")
movie_name_format = "+".join(movie_name_list)
searched_movies = search_movie(movie_name_format)
title = f'search results for {movie_name}'
return render_template('search.html',movies = searched_movies)
@main.route('/movie/review/new/<int:id>', methods = ['GET','POST'])
@login_required
def new_review(id):
form = ReviewForm()
movie = get_movie(id)
if form.validate_on_submit():
title = form.title.data
review = form.review.data
new_review = Review(movie.id,title,movie.poster,review)
new_review.save_review()
return redirect(url_for('.movie',id=movie.id))
title = f'{movie.title} review'
return render_template('new_review.html',title = title, review_form = form, movie = movie)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
| [
"vincentmuya13@gmail.com"
] | vincentmuya13@gmail.com |
452f4c80e4947afd07246f7965349ef536026b55 | 5b3d8f56f4d18dc8809f9f5aa7d2a7089cdbf489 | /TablesRedo/RParagraphs.py | 69dc4358f20ceaff1920f26f5bb299ac9d843406 | [] | no_license | heyliljill/edpsych-cloned | 89ba1a827ed66651b7387b25bc2c188ff344e8d1 | ba02e4789e390bb6488b11608b994ee5678a4b30 | refs/heads/master | 2020-07-26T00:51:41.004018 | 2019-09-14T17:26:45 | 2019-09-14T17:26:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,032 | py | # 5. (Table 7) Mean number of paragraphs represented in reading, per passage (3 paragraphs) (mrimpara,mriipara...4 total)
# a. 3 paragraphs (=3), 2 paragraphs (=2), 1 paragraph (=1), topic only (=4), topic related (=5)
# b. Break up into interest, difficulty, gender
# c. By overall, WDI - Reading subsample, LWDI - Reading subsample
f = open('manova-paragraphs','w+')
def main():
GLMtext = """
GLM mrimpara mriipara mrbmpara mrbipara BY sex
/WSFACTOR=interest 2 Polynomial diff 2 Polynomial
/METHOD=SSTYPE(3) :
/EMMEANS=TABLES(sex) COMPARE ADJ(LSD)
/EMMEANS=TABLES(interest) COMPARE ADJ(LSD)
/EMMEANS=TABLES(diff) COMPARE ADJ(LSD)
/EMMEANS=TABLES(sex*interest)
/EMMEANS=TABLES(sex*diff)
/EMMEANS=TABLES(interest*diff)
/EMMEANS=TABLES(sex*interest*diff)
/CRITERIA=ALPHA(.05)
/WSDESIGN=interest diff interest*diff
/DESIGN=sex.
"""
switchtext = "DATASET ACTIVATE main.\n"
f.write("DATASET NAME main.\n")
text1 = """DATASET COPY mainCopy1 WINDOW=FRONT.\nDATASET ACTIVATE mainCopy1.\n
recode mrimpara mriipara mrbmpara mrbipara (1=1) (else = 0)."""
f.write(text1)
f.write(GLMtext)
f.write(switchtext)
text2 = """DATASET COPY mainCopy2 WINDOW=FRONT.\nDATASET ACTIVATE mainCopy2.\n
recode mrimpara mriipara mrbmpara mrbipara (2=1) (else = 0)."""
f.write(text2)
f.write(GLMtext)
f.write(switchtext)
text3 = """DATASET COPY mainCopy3 WINDOW=FRONT.\nDATASET ACTIVATE mainCopy3.\n
recode mrimpara mriipara mrbmpara mrbipara (3=1) (else = 0)."""
f.write(text3)
f.write(GLMtext)
f.write(switchtext)
text4 = """DATASET COPY mainCopy4 WINDOW=FRONT.\nDATASET ACTIVATE mainCopy4.\n
recode mrimpara mriipara mrbmpara mrbipara (4=1) (else = 0)."""
f.write(text4)
f.write(GLMtext)
f.write(switchtext)
text5 = """DATASET COPY mainCopy5 WINDOW=FRONT.\nDATASET ACTIVATE mainCopy5.\n
recode mrimpara mriipara mrbmpara mrbipara (5=1) (else = 0)."""
f.write(text5)
f.write(GLMtext)
f.write(switchtext)
main() | [
"jillyma@gmail.com"
] | jillyma@gmail.com |
afcb238fc31c3171d21a8cf02075b81d5fbe3ba5 | 115d4be6df61f1e555826f49c2fd605ae83107bd | /solutions/217_contains-duplicate.py | 2241ba5c5b67ae9984a66fbc4398eb9e2eefeaf8 | [] | no_license | ipudu/leetcode | 82dd12236b31b5fc48e20b8cccadc2392bce7b52 | 0e4b0b83c8d3fb50b7db1dc0e1bc55942e91d811 | refs/heads/master | 2021-06-07T14:04:04.114618 | 2020-02-27T21:52:54 | 2020-02-27T21:52:54 | 108,054,385 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | """
Given an array of integers, find if the array contains any duplicates.
Your function should return true if any value appears at least twice in the array, and it should return false if every element is distinct.
Example 1:
Input: [1,2,3,1]
Output: true
Example 2:
Input: [1,2,3,4]
Output: false
Example 3:
Input: [1,1,1,3,3,4,3,2,4,2]
Output: true
"""
class Solution:
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
d = {}
for n in nums:
if n in d.keys():
d[n] += 1
else:
d[n] = 1
for key, value in d.items():
if value >= 2:
return True
return False | [
"rocketsboy@gmail.com"
] | rocketsboy@gmail.com |
5ed045ae7d418ee7f741a74b81d0d00af2b6e967 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/4131.py | 82826060d0b03ea8b01335c5cf9ea63f6c7224ac | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | #! /usr/bin/python
import sets
def readInt():
return int(raw_input())
def readInts():
return map(int, raw_input().split())
def main():
t = readInt()
for i in range(t):
op = 'Case #' + str(i+1) + ': '
r1 = readInt()
for i1 in range(4):
if i1 == r1-1:
g1 = set(readInts())
else:
raw_input()
r2 = readInt()
for i1 in range(4):
if i1 == r2-1:
g2 = set(readInts())
else:
raw_input()
c = g1.intersection(g2)
if len(c) == 1:
op = op + str(c.pop())
elif len(c) == 0:
op = op + 'Volunteer cheated!'
else:
op = op + 'Bad magician!'
print op
if __name__ == '__main__':
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
16f8201b5bb7154c572064a4886dac1f421c6458 | 8a4f5627b58aa54fd6a549f90b3c79b6e285c638 | /Python/Fibonacci SubProblems/tiling_problem.py | a6972bca002d507375da62c6b6a8d8a24a3ccb11 | [] | no_license | MrChepe09/Dynamic-Programming-Series | 334f24af4f834f88840bf5222746d2b7452a33ee | d49e5bd7cb329b0b0f1382eb8627ba0427383499 | refs/heads/master | 2022-11-29T09:40:01.065561 | 2020-08-07T05:15:21 | 2020-08-07T05:15:21 | 283,384,811 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | def tiling(n, m):
a = [0 for _ in range(n+1)]
for i in range(n+1):
if(i<m):
a[i] = 1
elif(i==m):
a[i] = 2
else:
a[i] = a[i-1] + a[i-m]
return a[n]
test = int(input())
for i in range(test):
n = int(input())
m = int(input())
print(tiling(n, m)) | [
"khanujabhupinder09@gmail.com"
] | khanujabhupinder09@gmail.com |
310e48bb177cd293890bd09abeb7ff05b2c2c63c | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/AtlasTest/DatabaseTest/AthenaPoolTest/share/LArCellContReader_jobOptionsReg.py | d8f78625d4d299d402886e24b26a21d14024b1d1 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,239 | py | ###############################################################
#
# Job options file
#
## @file LArCellContReader_jobOptionsReg.py
##
## @brief For Athena POOL test: read in LArCellContainers via explicit collections
##
## @author RD Schaffer <R.D.Schaffer@cern.ch>
#
#==============================================================
## basic job configuration
import AthenaCommon.AtlasUnixStandardJob
## get a handle to the default top-level algorithm sequence
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
## get a handle to the ServiceManager
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
## get a handle to the ApplicationManager
from AthenaCommon.AppMgr import theApp
#--------------------------------------------------------------
# Load POOL support
#--------------------------------------------------------------
import AthenaPoolCnvSvc.ReadAthenaPool
#--------------------------------------------------------------
# Set flags and load det descr
#--------------------------------------------------------------
# For general flags
doAOD = False
doTrigger = False
DetDescrVersion = "ATLAS-CSC-01-02-00"
include( "RecExCond/RecExCommon_flags.py" )
# Set local flags - only need LAr DetDescr
DetFlags.detdescr.ID_setOff()
DetFlags.detdescr.Tile_setOff()
DetFlags.detdescr.Muon_setOff()
# set up all detector description description
include ("RecExCond/AllDet_detDescr.py")
# the correct tag should be specified
svcMgr.IOVDbSvc.GlobalTag="OFLCOND-CSC-00-01-00"
#--------------------------------------------------------------
# Define the output Db parameters (the default value are shown)
#--------------------------------------------------------------
#svcMgr.EventSelector.CollectionType = "ExplicitROOT"
svcMgr.EventSelector.InputCollections = [ "NewEventCollection" ]
#--------------------------------------------------------------
# Event related parameters
#--------------------------------------------------------------
theApp.EvtMax = 20
#--------------------------------------------------------------
# Application: AthenaPoolTest options
#--------------------------------------------------------------
from AthenaPoolTest.AthenaPoolTestConf import LArCellContFakeReader
topSequence += LArCellContFakeReader( "LArCellContFakeReader" )
from AthenaPoolTest.AthenaPoolTestConf import AthenaPoolTestAttrReader
topSequence += AthenaPoolTestAttrReader( "AthenaPoolTestAttrReader" )
#--------------------------------------------------------------
# Set output level threshold (2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL )
#--------------------------------------------------------------
svcMgr.MessageSvc.OutputLevel = INFO
svcMgr.MessageSvc.debugLimit = 100000
svcMgr.ClassIDSvc.OutputLevel = INFO
svcMgr.AthenaSealSvc.OutputLevel = INFO
svcMgr.MetaDataSvc.OutputLevel = DEBUG
#AthenaPoolTestAttrReader.OutputLevel = DEBUG
LArCellContFakeReader.OutputLevel = DEBUG
#StoreGateSvc = Service( "StoreGateSvc" )
#StoreGateSvc.Dump = TRUE
# No stats printout
include( "AthenaPoolTest/NoStats_jobOptions.py" )
#==============================================================
#
# End of job options file
#
###############################################################
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
fc792d3b088a28cbf5de771a55b42ee3a71883f2 | 409c4d0dce72de987dff7c76857499fba8f8b7a0 | /fastset.py | 4d0505c494e2d9af7fc369ae1897fd10bb9681e0 | [] | no_license | crystaleone/test | b4fece7fbc4e8ddd6186ea13245c62970c6d7038 | 4af3964bf6a657e888c7850f07a031440ba29e7a | refs/heads/master | 2021-01-18T19:17:36.924170 | 2017-09-19T03:37:01 | 2017-09-19T03:37:01 | 86,895,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | import set
class Set(set.Set):
def __init__(self, value = []):
self.data = {}
self.concat(value)
def intersect(self, other):
res = {}
for x in other:
if x in self.data:
res[x] = None
return Set(res.keys())
def union(self, other):
res = {}
for x in other:
res[x] = None
for x in self.data.keys():
res[x] = None
return Set(res.keys())
def concat(self, value):
for x in value: self.data[x] = None
def __getitem__(self, ix):
return list(self.data.keys())[ix]
def __repr__(self):
return '<Set:%r>' % list(self.data.keys())
| [
"403868144@qq.com"
] | 403868144@qq.com |
91eab4fa8185c49b8477407a722f1c6715895fb2 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/playground/memre/armv7l/obsolete/corp2/x11/util/xorg-util/actions.py | 5173c2317879124ca4f74274e77256d2c546affd | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2006-2008 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import crosstools as autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
WorkDir = "."
Skip = ("patches", "pisiBuildState", ".")
def setup():
for package in shelltools.ls("."):
if package.startswith(Skip):
continue
shelltools.cd(package)
if package.startswith("xorg-cf-files"):
pisitools.dosed("host.def", "_PARDUS_CC_", "%(CC)s" % autotools.environment)
pisitools.dosed("host.def", "_PARDUS_CXX_", "%(CXX)s" % autotools.environment)
pisitools.dosed("host.def", "_PARDUS_AS_", "%(AS)s" % autotools.environment)
pisitools.dosed("host.def", "_PARDUS_LD_", "%(LD)s" % autotools.environment)
pisitools.dosed("host.def", "_PARDUS_CFLAGS_", "%(CFLAGS)s" % autotools.environment)
pisitools.dosed("host.def", "_PARDUS_LDFLAGS_", "%(LDFLAGS)s" % autotools.environment)
autotools.configure("--with-config-dir=/usr/share/X11/config")
shelltools.cd("../")
def build():
for package in shelltools.ls("."):
if package.startswith(Skip):
continue
shelltools.cd(package)
autotools.make()
shelltools.cd("../")
def install():
for package in shelltools.ls("."):
if package.startswith(Skip):
continue
shelltools.cd(package)
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
shelltools.cd("../")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
1376cb7097bd78ec34020abe5909b6c7788177ca | c609730a43596a2d3303f072fc97d9cf681fac7b | /cagey/autohome_newcar/autohome_newcar/pipelines.py | c3d57bcd32af5c7814ca4370dea1aa74f9349e84 | [] | no_license | sinnettluo/ChenProject | 5403311c0c7b78c484145e16d692abff00d2a110 | 0e33ecf1683afb22f1deb4bd54294c41aed8a46b | refs/heads/master | 2023-03-22T23:48:08.430178 | 2020-09-02T15:05:02 | 2020-09-02T15:05:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import logging
import pymongo
from scrapy.utils.project import get_project_settings
from scrapy.exceptions import DropItem
settings = get_project_settings()
class AutohomeNewcarPipeline(object):
def __init__(self):
self.mongocounts = 0
self.connection = pymongo.MongoClient(
settings['MONGODB_SERVER'],
settings['MONGODB_PORT']
)
db = self.connection[settings['MONGODB_DB_SAVE']]
self.collection = db[settings['MONGODB_WRITE_COLLECTION']]
def process_item(self, item, spider):
logging.log(msg="Car added to MongoDB database!", level=logging.INFO)
self.mongocounts += 1
logging.log(msg=f"scrapy {self.mongocounts} items", level=logging.INFO)
if len(item["carinfo"]) == 0:
raise DropItem(f"Unqualified data! --> {item['url']}")
else:
self.collection.insert(item)
return item
def close_spider(self, spider):
self.connection.close()
| [
"1316446041@qq.com"
] | 1316446041@qq.com |
862086ef086ece1194fb916bde5ba9f0315ac214 | ac64fda7f1bfc92f7897efd60b8f3f0aeb22b4d7 | /syntactic_mutations/udacity/mutants/mutant51.py | a96c3a128cbcd12bf06722bf26693aee24cb014f | [] | no_license | dlfaults/mutation_operators_evaluation | ea7f33459ba7bcf7d70092d9db8b40f9b338d516 | 7d1ff30e901931a46bf8908e9bb05cae3daa5f0f | refs/heads/master | 2020-12-27T15:45:07.262012 | 2020-02-03T12:22:01 | 2020-02-03T12:22:01 | 237,955,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.layers import Lambda, Conv2D, MaxPooling2D, Dropout, Dense, Flatten
from batch_generator import Generator
from utils import INPUT_SHAPE, batch_generator, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS
from keras import backend as K
from PIL import Image
import numpy as np
def build_model(args):
'''
Modified NVIDIA model
'''
pass
model.add(Lambda((lambda x: ((x / 127.5) - 1.0)), input_shape=INPUT_SHAPE))
model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='elu'))
model.add(Conv2D(64, (3, 3), activation='elu'))
model.add(Dropout(args.keep_prob))
model.add(Flatten())
model.add(Dense(100, activation='elu'))
model.add(Dense(50, activation='elu'))
model.add(Dense(10, activation='elu'))
model.add(Dense(1))
return model
def train_model(x_train, x_valid, y_train, y_valid, model_name, args):
'''
Train the model
'''
model = build_model(args)
model.compile(loss='mean_squared_error', optimizer=Adam(lr=args.learning_rate))
train_generator = Generator(x_train, y_train, True, args)
validation_generator = Generator(x_valid, y_valid, False, args)
model.fit_generator(train_generator, validation_data=\
validation_generator, epochs=\
args.nb_epoch, use_multiprocessing=\
False, max_queue_size=\
10, workers=\
4)
model.save(model_name) | [
"gunel71@gmail.com"
] | gunel71@gmail.com |
3e4c15f5894ce5582ec1c8f2b54085c0fbfeb742 | 06cf972369c30da9d98b296bcbc26a826aa98126 | /aloisioimoveis/locations/tests/serializers/test_serializer_neighborhood.py | 35cc0545c519585355f8815f4d1e162ca82666f7 | [] | no_license | thiagorossener/aloisioimoveis | 2597422af6ac058ed3b8aa6e58f0f8913488a7fe | f9d974440f9a8cc875da8a1d4a5c885429563c1b | refs/heads/master | 2021-06-16T23:02:11.193518 | 2021-02-01T14:17:10 | 2021-02-01T14:17:10 | 94,144,023 | 18 | 17 | null | 2021-06-10T20:35:48 | 2017-06-12T21:55:18 | JavaScript | UTF-8 | Python | false | false | 654 | py | from django.test import TestCase
from model_mommy import mommy
from aloisioimoveis.locations.models import City, Neighborhood
from aloisioimoveis.locations.serializers import NeighborhoodSerializer
class NeighborhoodSerializerTest(TestCase):
def test_serializer(self):
"""Neighborhood serialization should return dict with id and name"""
city = mommy.make(City, name='Taubatรฉ')
neighborhood = mommy.make(Neighborhood, name='Independรชncia', city=city)
serializer = NeighborhoodSerializer(neighborhood)
self.assertDictEqual({'id': 1, 'name': 'Independรชncia'},
serializer.data)
| [
"thiago.rossener@gmail.com"
] | thiago.rossener@gmail.com |
79957b5682bbec421f84acc58f582cf4bee98906 | b1c7a768f38e2e987a112da6170f49503b9db05f | /accounts/migrations/0001_initial.py | 16cd2e320b91ce057984484170e3cb744ebb5223 | [] | no_license | Niladrykar/bracketerp | 8b7491aa319f60ec3dcb5077258d75b0394db374 | ca4ee60c2254c6c132a38ce52410059cc6b19cae | refs/heads/master | 2022-12-11T04:23:07.504966 | 2019-03-18T06:58:13 | 2019-03-18T06:58:13 | 176,218,029 | 1 | 0 | null | 2022-12-08T03:01:46 | 2019-03-18T06:27:37 | JavaScript | UTF-8 | Python | false | false | 981 | py | # Generated by Django 2.0.5 on 2018-09-07 10:46
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
bases=('auth.user', models.Model),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"niladry.kar85@gmail.com"
] | niladry.kar85@gmail.com |
025dd83f0936e55ee882cf696aa5f658a0d79663 | fd19962d7c1f37e8bdabf7946c48516f640e2ff3 | /product/management/commands/mycommand.py | bf55951d9ca2bb8991c5b28074bbba8a7932a5d5 | [] | no_license | amurakho/django_ready_to_delete | fe71bb727ad20ef134d3752568a043614acb4c64 | 0fed1890ce556bac301278e444426619dd0f2903 | refs/heads/master | 2022-12-05T12:01:02.412789 | 2020-08-20T12:30:31 | 2020-08-20T12:30:31 | 289,000,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Creates groups'
def handle(self, *args, **options):
print('Done')
| [
"amurakho@gmail.com"
] | amurakho@gmail.com |
a6f56459317ed81b634e5e6f5ac92dd207e7ed70 | f84540a209490c4d3ee7583c4668fe1c8b1c230e | /Graph/TopologicalSort/CourseScheduleII.py | b261982bba05af554e26c9924d34c7ae784eb183 | [] | no_license | TimMKChang/AlgorithmSampleCode | 9e08a3a88f24b9645ca70f834970650d400fd259 | d5bcdce147bd6c3f05648962ca2096f79e4f003f | refs/heads/master | 2023-07-19T15:28:27.722181 | 2021-09-09T07:02:22 | 2021-09-09T07:02:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | from typing import List
from collections import defaultdict
class TopologicalSort: # by DFS (recursive)
def __init__(self):
self.ans = None
self.index = None
def dfs(self, node, edges, visited, curVisited):
for v in edges[node]:
if v in curVisited:
return False
if v not in visited:
visited.add(v)
curVisited.add(v)
if not self.dfs(v, edges, visited, curVisited):
return False
curVisited.remove(v)
self.ans[self.index] = node
self.index -= 1
return True
def sort(self, N, edges):
self.ans = [None for _ in range(N)]
self.index = N - 1
visited = set()
curVisited = set()
for n in range(N):
if n not in visited:
visited.add(n)
curVisited.add(n)
if not self.dfs(n, edges, visited, curVisited):
return []
curVisited.remove(n)
return self.ans
class Solution:
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
edges = defaultdict(list)
for v, u in prerequisites:
edges[u].append(v)
return TopologicalSort().sort(numCourses, edges)
s = Solution()
numCourses = 4
prerequisites = [[1,0],[2,0],[3,1],[3,2]]
print(s.canFinish(numCourses, prerequisites)) | [
"gn01168178@yahoo.com.tw"
] | gn01168178@yahoo.com.tw |
8706de911986f56f365524ecf0b45299673202ac | f13c586b82224c07f28f7bb7d9dd503e64eb5cb2 | /tests/drawer/test_drawer_utils.py | dbb661b0f0863c546d53e12356b0e139287236e5 | [
"Apache-2.0",
"MPL-1.1"
] | permissive | therooler/pennylane | 095f104e40254be2ed3050bc7be9ea9d2ee11ebd | fde1f24bd784d6ee2af5c980c2d5010b4c2bbe54 | refs/heads/master | 2023-04-29T13:32:43.115108 | 2023-04-18T09:41:42 | 2023-04-18T09:41:42 | 202,356,685 | 0 | 0 | Apache-2.0 | 2019-08-14T13:30:39 | 2019-08-14T13:30:38 | null | UTF-8 | Python | false | false | 5,547 | py | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the pennylane.drawer.utils` module.
"""
import pytest
import pennylane as qml
from pennylane.drawer.utils import default_wire_map, convert_wire_order, unwrap_controls
from pennylane.wires import Wires
class TestDefaultWireMap:
"""Tests ``_default_wire_map`` helper function."""
def test_empty(self):
"""Test creating an empty wire map"""
wire_map = default_wire_map([])
assert wire_map == {}
def test_simple(self):
"""Test creating a wire map with wires that do not have successive ordering"""
ops = [qml.PauliX(0), qml.PauliX(2), qml.PauliX(1)]
wire_map = default_wire_map(ops)
assert wire_map == {0: 0, 2: 1, 1: 2}
def test_string_wires(self):
"""Test wire map works with string labelled wires."""
ops = [qml.PauliY("a"), qml.CNOT(wires=("b", "c"))]
wire_map = default_wire_map(ops)
assert wire_map == {"a": 0, "b": 1, "c": 2}
class TestConvertWireOrder:
"""Tests the ``convert_wire_order`` utility function."""
def test_no_wire_order(self):
"""Test that a wire map is produced if no wire order is passed."""
ops = [qml.PauliX(0), qml.PauliX(2), qml.PauliX(1)]
wire_map = convert_wire_order(ops)
assert wire_map == {0: 0, 2: 1, 1: 2}
def test_wire_order_ints(self):
"""Tests wire map produced when initial wires are integers."""
ops = [qml.PauliX(0), qml.PauliX(2), qml.PauliX(1)]
wire_order = [2, 1, 0]
wire_map = convert_wire_order(ops, wire_order)
assert wire_map == {2: 0, 1: 1, 0: 2}
def test_wire_order_str(self):
"""Test wire map produced when initial wires are strings."""
ops = [qml.CNOT(wires=("a", "b")), qml.PauliX("c")]
wire_order = ("c", "b", "a")
wire_map = convert_wire_order(ops, wire_order)
assert wire_map == {"c": 0, "b": 1, "a": 2}
def test_show_all_wires_false(self):
"""Test when `show_all_wires` is set to `False` only used wires are in the map."""
ops = [qml.PauliX("a"), qml.PauliY("c")]
wire_order = ["a", "b", "c", "d"]
wire_map = convert_wire_order(ops, wire_order, show_all_wires=False)
assert wire_map == {"a": 0, "c": 1}
def test_show_all_wires_true(self):
"""Test when `show_all_wires` is set to `True` everything in ``wire_order`` is included."""
ops = [qml.PauliX("a"), qml.PauliY("c")]
wire_order = ["a", "b", "c", "d"]
wire_map = convert_wire_order(ops, wire_order, show_all_wires=True)
assert wire_map == {"a": 0, "b": 1, "c": 2, "d": 3}
class TestUnwrapControls:
"""Tests the ``unwrap_controls`` utility function."""
# pylint:disable=too-few-public-methods
@pytest.mark.parametrize(
"op,expected_control_wires,expected_control_values",
[
(qml.PauliX(wires="a"), Wires([]), None),
(qml.CNOT(wires=["a", "b"]), Wires("a"), None),
(qml.ctrl(qml.PauliX(wires="b"), control="a"), Wires("a"), [True]),
(
qml.ctrl(qml.PauliX(wires="b"), control=["a", "c", "d"]),
Wires(["a", "c", "d"]),
[True, True, True],
),
(
qml.ctrl(qml.PauliZ(wires="c"), control=["a", "d"], control_values=[True, False]),
Wires(["a", "d"]),
[True, False],
),
(
qml.ctrl(
qml.CRX(0.3, wires=["c", "e"]),
control=["a", "b", "d"],
control_values=[True, False, False],
),
Wires(["a", "b", "d", "c"]),
[True, False, False, True],
),
(
qml.ctrl(qml.CNOT(wires=["c", "d"]), control=["a", "b"]),
Wires(["a", "b", "c"]),
[True, True, True],
),
(
qml.ctrl(qml.ctrl(qml.CNOT(wires=["c", "d"]), control=["a", "b"]), control=["e"]),
Wires(["e", "a", "b", "c"]),
[True, True, True, True],
),
(
qml.ctrl(
qml.ctrl(
qml.CNOT(wires=["c", "d"]), control=["a", "b"], control_values=[False, True]
),
control=["e"],
control_values=[False],
),
Wires(["e", "a", "b", "c"]),
[False, False, True, True],
),
],
)
def test_multi_defined_control_values(
self, op, expected_control_wires, expected_control_values
):
"""Test a multi-controlled single-qubit operation with defined control values."""
control_wires, control_values = unwrap_controls(op)
assert control_wires == expected_control_wires
assert control_values == expected_control_values
| [
"noreply@github.com"
] | therooler.noreply@github.com |
1b40eb68b83d8a1ec91107ae2cbc6b3056e9faa8 | ce083128fa87ca86c65059893aa8882d088461f5 | /python/flask-mail-labs/.venv/lib/python2.7/site-packages/simplekv/_compat.py | e6366d3e29f008720a7016d76636ecbd2a50fdef | [] | no_license | marcosptf/fedora | 581a446e7f81d8ae9a260eafb92814bc486ee077 | 359db63ff1fa79696b7bc803bcfa0042bff8ab44 | refs/heads/master | 2023-04-06T14:53:40.378260 | 2023-03-26T00:47:52 | 2023-03-26T00:47:52 | 26,059,824 | 6 | 5 | null | 2022-12-08T00:43:21 | 2014-11-01T18:48:56 | null | UTF-8 | Python | false | false | 824 | py | """Helpers for python 2/3 compatibility"""
import sys
PY2 = sys.version_info[0] == 2
if not PY2:
from configparser import ConfigParser
else:
from ConfigParser import ConfigParser
if not PY2:
from urllib.parse import quote as url_quote, unquote as url_unquote
else:
from urllib import quote as url_quote
from urllib import unquote as url_unquote
if not PY2:
from urllib.parse import urlparse
else:
from urlparse import urlparse
if not PY2:
imap = map
else:
from itertools import imap
if not PY2:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
if not PY2:
import pickle
else:
try:
import cPickle as pickle
except ImportError:
import pickle
xrange = range if not PY2 else xrange
| [
"marcosptf@yahoo.com.br"
] | marcosptf@yahoo.com.br |
9728ac841e892564c6b2a480d4bea9f443d747fc | 42652e0f4025eed896fbe91667bd495523fd4c3b | /app/config/__init__.py | 6c813f40f8bab2bce4dae58abbd9b637d3b88743 | [] | no_license | g10guang/whoyoungblog | 47ef0a8ae1e75e422618fd59f4666287a09c4ec2 | a3fd93bd7591700f492ae8806f7f1f2c32643b27 | refs/heads/master | 2022-12-15T14:44:45.476103 | 2017-11-23T11:55:41 | 2017-11-23T11:55:41 | 106,388,722 | 3 | 0 | null | 2022-12-08T00:39:06 | 2017-10-10T08:19:56 | Python | UTF-8 | Python | false | false | 745 | py | #!/usr/bin/env python3
# coding=utf-8
# author: Xiguang Liu<g10guang@foxmail.com>
# 2017-09-10 18:11
import os
def load_config():
"""
ๅ ่ฝฝ้
็ฝฎๆไปถ
:return:
"""
MODE = os.environ.get('BLOG_MODE')
try:
if MODE == 'PRODUCT':
# ๅจ็ไบง็ฏๅข๏ผๅฐๆ ๅ่พๅ
ฅๆต่พๅบๆต้ๅฎๅๅฐๆไปถไธญ
import sys
f = open('std.txt', 'w')
sys.stderr = f
sys.stdout = f
from app.config.product import ProductConfig
return ProductConfig
else:
from app.config.develop import DevelopConfig
return DevelopConfig
except ImportError:
from app.config.default import Config
return Config
| [
"g10guang@gmail.com"
] | g10guang@gmail.com |
484c2ced2ec5392403047eef0cd58b15067fc246 | 2d6557110f59d1c28bc83d58e60bd8de8e64d693 | /formatloader.py | 9cec2d59dace465a76001e164fcec1ddf444695a | [
"MIT"
] | permissive | restful-open-annotation/oa-adapter | 8067df0227e3820ef84c493f518c80c463c2e431 | d40f935cdef380675e1efa051ad2e1f4b8c8ae7a | refs/heads/master | 2021-01-23T22:39:05.060992 | 2015-03-18T14:19:26 | 2015-03-18T14:19:26 | 31,967,902 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,097 | py | #!/usr/bin/env python
"""File format support module loader.
The import and export formats supported by the adapter are determined
at runtime based on the format support "modules" found in the formats/
directory.
"""
import sys
import os
# Directory containing format modules.
FORMAT_DIRECTORY = 'formats'
# Attributes that every module should have.
# format_name: string giving the short name of the format.
# mimetypes: list of MIME types that should be associated with the format.
# from_jsonld: function rendering JSON-LD to string in the format.
# to_jsonld: function parsing string in the format to JSON-LD.
REQUIRED_ATTRIBUTES = [
'format_name',
'mimetypes',
'from_jsonld',
'to_jsonld',
]
def _is_valid(m, err=None):
"""Returns if the given module has all required attributes."""
if err is None:
err = sys.stderr
for a in REQUIRED_ATTRIBUTES:
try:
getattr(m, a)
except AttributeError, e:
print >> err, 'Module %s is not valid: %s' % (m.__name__, e)
return False
return True
def _is_format_module(fn):
if fn == '__init__.py':
return False
return fn.endswith('_format.py')
def _load_format_module(dir, mn):
if mn.endswith('.py'):
mn = mn[:-3]
try:
mod = __import__(dir, fromlist=[mn])
except:
raise
return getattr(mod, mn)
def load(dir=FORMAT_DIRECTORY):
"""Load format processing modules."""
# Load everything matching the naming conventions.
modules = []
for fn in (f for f in os.listdir(dir) if _is_format_module(f)):
module = _load_format_module(dir, fn)
if module is None:
continue
modules.append(module)
# Filter to exclude modules that don't have the required attributes.
valid = []
seen = set()
for module in (m for m in modules if _is_valid(m)):
if module.format_name in seen:
print >> sys.stderr, 'Duplicate format %s' % module.format_name
else:
valid.append(module)
seen.add(module)
return valid
| [
"sampo.pyysalo@gmail.com"
] | sampo.pyysalo@gmail.com |
65dbc73ef2e946e62d80b18d9cbf68805e58637a | abc422f58ad053bcbb6653ba15b66e46d220a199 | /scripts/performance/sanity.py | 330359f2b4452e9d5401bc95c45d30a88e4ebebb | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | tungstenfabric/tf-test | d3efff59bca931b614d0008260b2c0881d1fc009 | 4b9eca7eb182e5530223131ecab09d3bdf366407 | refs/heads/master | 2023-02-26T19:14:34.345423 | 2023-01-11T08:45:18 | 2023-01-11T10:37:25 | 265,231,958 | 8 | 22 | null | 2023-02-08T00:53:29 | 2020-05-19T11:46:12 | Python | UTF-8 | Python | false | false | 3,973 | py | import os
import fixtures
import testtools
from common.connections import ContrailConnections
from tcutils.wrappers import preposttest_wrapper
from common.contrail_test_init import ContrailTestInit
from performance.verify import PerformanceTest
class PerformanceSanity(testtools.TestCase, PerformanceTest):
def setUp(self):
super(PerformanceSanity, self).setUp()
if 'TEST_CONFIG_FILE' in os.environ:
self.input_file = os.environ.get('TEST_CONFIG_FILE')
else:
self.input_file = 'params.ini'
self.inputs = ContrailTestInit(self.input_file)
self.connections = ContrailConnections(self.inputs)
self.agent_inspect = self.connections.agent_inspect
self.quantum_h = self.connections.quantum_h
self.nova_h = self.connections.nova_h
self.vnc_lib = self.connections.vnc_lib
self.logger = self.inputs.logger
self.analytics_obj = self.connections.analytics_obj
def cleanUp(self):
self.logger.info("Cleaning up")
super(PerformanceSanity, self).cleanUp()
@preposttest_wrapper
def test_performance_netperf_within_vn_TCP_STREAM(self):
"""Check the throughput between the VM's within the same VN for TCP_STREAM"""
return self.test_check_netperf_within_vn(duration=60)
@preposttest_wrapper
def test_performance_netperf_within_vn_TCP_STREAM_with_MPLSoGRE(self):
"""Check the throughput between the VM's within the same VN for TCP_STREAM using MPLSoGRE"""
return self.test_check_netperf_within_vn(encap='MPLSoGRE', duration=60)
@preposttest_wrapper
def test_performance_netperf_within_vn_TCP_RR(self):
"""TCP Request/Response test between the VM's within the same VN"""
return self.test_check_netperf_within_vn(test_name='TCP_RR')
@preposttest_wrapper
def test_performance_netperf_within_vn_with_UDP_STREAM(self):
"""Check the throughput between the VM's within the same VN for UDP_STREAM"""
return self.test_check_netperf_within_vn(test_name='UDP_STREAM', duration=60)
@preposttest_wrapper
def test_performance_netperf_within_vn_UDP_STREAM_with_MPLSoGRE(self):
"""Check the throughput between the VM's within the same VN for UDP_STREAM using MPLSoGRE"""
return self.test_check_netperf_within_vn(encap='MPLSoGRE', duration=60)
@preposttest_wrapper
def test_performance_netperf_within_vn_UDP_RR(self):
"""UDP Request/Response test between the VM's within the same VN"""
return self.test_check_netperf_within_vn(test_name='UDP_RR')
@preposttest_wrapper
def test_performance_netperf_in_diff_vn(self):
"""Check the throughput between the VM's different VN"""
return self.test_check_netperf_within_vn(no_of_vn=2)
@preposttest_wrapper
def test_performance_ping_latency_within_vn(self):
"""Check the ping latency between the VM's within the same VN"""
return self.test_ping_latency()
@preposttest_wrapper
def test_performance_ping_latency_within_vn_icmp_flood(self):
"""Check the ping latency between the VM's within the same VN"""
return self.test_ping_latency(no_of_pkt=20)
@preposttest_wrapper
def test_flow_setup_within_vn_1000_flows(self):
"""Check the flow setup rate between the VM's within the same VN"""
return self.test_check_flow_setup_within_vn(no_of_flows=1000, dst_port_min=1000, dst_port_max=2001,
src_port_min=10000, src_port_max=10000)
@preposttest_wrapper
def test_flow_setup_within_vn_20000_flows(self):
"""Check the flow setup rate between the VM's within the same VN"""
return self.test_check_flow_setup_within_vn(no_of_flows=20000, dst_port_min=1000, dst_port_max=21000,
src_port_min=10000, src_port_max=10000)
if __name__ == '__main__':
unittest.main()
| [
"andrey-mp@yandex.ru"
] | andrey-mp@yandex.ru |
489a11f287a6771c194f8f5dd5b9cd086e85815e | c676bf5e77ba43639faa6f17646245f9d55d8687 | /tests/ut/python/optimizer/test_debug_location.py | 80793f37a130c8579a93c052e7e05c095ab1897f | [
"Apache-2.0",
"BSD-3-Clause-Open-MPI",
"MPL-2.0-no-copyleft-exception",
"LGPL-2.1-only",
"BSD-3-Clause",
"MPL-2.0",
"MPL-1.0",
"Libpng",
"AGPL-3.0-only",
"MPL-1.1",
"LicenseRef-scancode-proprietary-license",
"MIT",
"IJG",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"GPL-2.0-only",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | zhengnengjin/mindspore | 1e2644e311f54a8bd17010180198a46499e9c88f | 544b859bb5f46611882749088b44c5aebae0fba1 | refs/heads/master | 2022-05-13T05:34:21.658335 | 2020-04-28T06:39:53 | 2020-04-28T06:39:53 | 259,522,589 | 2 | 0 | Apache-2.0 | 2020-04-28T03:35:33 | 2020-04-28T03:35:33 | null | UTF-8 | Python | false | false | 5,973 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.nn as nn
import pytest
from mindspore import context
from mindspore import Tensor, Parameter
from mindspore.nn.wrap.cell_wrapper import WithLossCell
from mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager
from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell
from mindspore.ops import operations as P
from mindspore.nn.optim import Momentum
from mindspore.ops import functional as F
from mindspore.common import dtype as mstype
from mindspore.train import Model
from ....dataset_mock import MindData
from mindspore.nn.optim import Lamb
from mindspore.ops._utils import _get_broadcast_shape
from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, prim_attr_register
from mindspore.ops._grad.grad_base import bprop_getters
from mindspore.ops._grad.grad_math_ops import binop_grad_common
context.set_context(mode=context.GRAPH_MODE)
class MockNeg(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
"""init MockNeg"""
self.init_prim_io_names(inputs=['x'], outputs=['y'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
raise TypeError("InferError")
return input_x
class MockSub(PrimitiveWithInfer):
@prim_attr_register
def __init__(self):
"""init MockSub"""
self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
def infer_shape(self, x_shape, y_shape):
return _get_broadcast_shape(x_shape, y_shape)
def infer_dtype(self, x_dtype, y_dtype):
return x_dtype
@bprop_getters.register(MockSub)
def get_bprop_mock_sub(self):
"""Grad definition for `MockSub` operation."""
neg_func = MockNeg()
def bprop(x, y, out, dout):
return binop_grad_common(x, y, dout, neg_func(dout))
return bprop
class Net(nn.Cell):
def __init__(self, in_features, out_features):
super(Net, self).__init__()
self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name="weight")
self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name="bias")
self.matmul = P.MatMul()
self.add = P.TensorAdd()
def construct(self, input):
output = self.add(self.matmul(input, self.weight), self.bias)
return output
class NetFP16(nn.Cell):
def __init__(self, in_features, out_features):
super(NetFP16, self).__init__()
self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name="weight")
self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name="bias")
self.matmul = P.MatMul()
self.add = P.TensorAdd()
self.cast = P.Cast()
def construct(self, input):
output = self.cast(self.add(self.matmul(self.cast(input, mstype.float16), self.cast(self.weight, mstype.float16)),
self.cast(self.bias, mstype.float16)), mstype.float32)
return output
def get_axis(x):
shape = F.shape(x)
length = F.tuple_len(shape)
perm = F.make_range(0, length)
return perm
class MSELoss(nn.Cell):
def __init__(self):
super(MSELoss, self).__init__()
self.reduce_sum = P.ReduceSum()
self.square = P.Square()
self.reduce_mean = P.ReduceMean()
self.sub = MockSub()
def construct(self, data, label):
diff = self.sub(data, label)
return self.reduce_mean(self.square(diff), get_axis(diff))
class NegCell(nn.Cell):
def __init__(self):
super(NegCell, self).__init__()
self.neg = MockNeg()
def construct(self, x):
return self.neg(x)
class Net3(nn.Cell):
def __init__(self):
super().__init__()
self.tuple = (NegCell(), nn.ReLU())
def construct(self, x):
for op in self.tuple:
x = op(x)
return x
def test_op_forward_infererror():
input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
input_me = Tensor(input_np)
net = Net3()
with pytest.raises(TypeError) as e:
net(input_me)
class SequenceNet(nn.Cell):
def __init__(self):
super().__init__()
self.seq = nn.SequentialCell([nn.AvgPool2d(3, 1), nn.ReLU(), nn.Flatten()])
def construct(self, x):
x = self.seq(x) + bbb
return x
def test_sequential_resolve_error():
input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
input_me = Tensor(input_np)
net = SequenceNet()
with pytest.raises(RuntimeError) as e:
net(input_me)
def test_compile_grad_error():
inputs = Tensor(np.ones([16, 16]).astype(np.float32))
label = Tensor(np.zeros([16, 16]).astype(np.float32))
lr = Tensor(np.ones([1], np.float32) * 0.1)
net = NetFP16(16, 16)
loss = MSELoss()
optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9)
net_with_loss = WithLossCell(net, loss)
scale_manager = DynamicLossScaleManager()
update_cell = scale_manager.get_update_cell()
train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell = update_cell)
train_network.set_train()
with pytest.raises(TypeError) as e:
train_network(inputs, label)
print (e)
| [
"leon.wanghui@huawei.com"
] | leon.wanghui@huawei.com |
dbe7062912a362af2c02a691b4f36f395063b5d0 | b45d66c2c009d74b4925f07d0d9e779c99ffbf28 | /tests/integration_tests/retail_tests/implementation/retail_client_controller_test_collection.py | 332c22c2cefad9e1f013767370ee271605807543 | [] | no_license | erezrubinstein/aa | d96c0e39762fe7aaeeadebbd51c80b5e58576565 | a3f59ba59519183257ed9a731e8a1516a4c54b48 | refs/heads/master | 2021-03-12T23:44:56.319721 | 2016-09-18T23:01:17 | 2016-09-18T23:01:17 | 22,665,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,726 | py | from __future__ import division
from tests.integration_tests.framework.svc_test_collection import ServiceTestCollection
from retail.v010.data_access.controllers.client_controller import ClientController
from retail.v010.data_access.controllers.user_controller import UserController
from retail.v010.data_access.retail_data_helper import RetailDataHelper
class RetailClientControllerTestCollection(ServiceTestCollection):
test_user_start = 456
test_client_start = 456
test_user_counter = 0
test_client_counter = 0
def initialize(self):
self.user_controller = UserController()
self.client_controller = ClientController()
self.retail_data_helper = RetailDataHelper(self.config, self.user_controller)
self.retail_data_helper.add_default_data()
def setUp(self):
self.__get_default_users()
def tearDown(self):
pass
@classmethod
def increment_test_user_counter(cls):
cls.test_user_counter += 1
@classmethod
def increment_test_client_counter(cls):
cls.test_client_counter += 1
##------------------------------------ Tests --------------------------------------##
def test_create_client(self):
client = self.__create_test_client()
correct_dict = {
'user_emails': [],
'description': u'company set out to take over the world',
'contact_email': u'taquinas@nexusri.com',
'contact_name': u'Thomas Aquinas',
'contact_phone': u'555-123-1234'
}
self.test_case.assertDictContainsSubset(correct_dict, client)
def test_get_client(self):
client = self.user_controller.Client.get('Signal Data')
self.test_case.assertDictContainsSubset(self.client_signal, client.serialize())
def test_find_client(self):
client = self.user_controller.Client.find(name='Signal Data')
self.test_case.assertDictContainsSubset(self.client_signal, client.serialize())
def test_find_clients(self):
client = self.__create_test_client()
clients = self.user_controller.Client.find_all(name=client["name"])
self.test_case.assertEqual(len(clients), 1)
def test_update_client(self):
client = self.__create_test_client(serialize=False)
update_dict = {
'name': 'Arnold Schwarzenegger',
'description': "Oh, you think you're bad, huh? You're a ******* choir boy compared to me! A CHOIR BOY!",
'contact_name': 'Jericho Cane',
'contact_email': 'jcane@nexusri.com',
'contact_phone': '555-9922342342342343242313'
}
self.client_controller.update_client('test@nexusri.com', client["name"], update_dict)
updated_client = self.client_controller.Client.get(update_dict['name'])
self.test_case.assertDictContainsSubset(update_dict, updated_client.serialize())
def test_delete_client(self):
# create blue shift client
client = self.__create_test_client()
# create user to add to client so we can test that deleting a client doesn't delete the users in its list
ali_g = self.__create_test_user(client_name=client["name"])
# delete client, make sure the user for ali g still exists
self.client_controller.delete_client(self.user_admin['email'], client["name"])
client = self.client_controller.Client.get(client["name"])
self.test_case.assertIsNone(client)
ali_g = self.client_controller.User.get(ali_g["email"])
self.test_case.assertIsNone(ali_g)
##------------------------------------ Private helpers --------------------------------------##
def __get_default_users(self):
self.user_admin = self.user_controller.User.get("admin@nexusri.com", serialize=True)
self.client_signal = self.user_controller.Client.get("Signal Data", serialize=True)
self.role_user = self.user_controller.Role.get('user', serialize=True)
self.role_client_support = self.user_controller.Role.get('client_support', serialize=True)
def __create_test_user(self, client_name, actor_email='test@nexusri.com', serialize=True):
password = 'yoyoyoyo%s' % (self.test_user_counter + self.test_user_start)
user_dict = {
'name': "test_user_%s" % (self.test_user_counter + self.test_user_start),
'email': "test_email_%s@nexusri.com" % (self.test_user_counter + self.test_user_start),
'password': password,
'active': True,
'client': client_name,
'retail_access': True,
'retailer_access': False,
'roles': ['user']
}
user = self.user_controller.create_user(actor_email, user_dict, serialize=False)
user.update(active=True, password=user_dict["password"])
updated_user = self.user_controller.User.get(user.email, serialize=False)
self.increment_test_user_counter()
# Return unhashed password separately, because it's not returned in user object
return updated_user.serialize() if updated_user and serialize else updated_user
def __create_test_client(self, actor_email='test@nexusri.com', serialize=True):
client_dict = {
'name': 'test_client_%s' % (self.test_client_counter + self.test_client_start),
'description': 'company set out to take over the world',
'contact_name': 'Thomas Aquinas',
'contact_email': 'taquinas@nexusri.com',
'contact_phone': '555-123-1234'
}
client = self.client_controller.create_client(actor_email, client_dict, serialize=serialize)
self.increment_test_client_counter()
return client
| [
"erezrubinstein@hotmail.com"
] | erezrubinstein@hotmail.com |
91d65a5caf20739fb4868145dd7e221f99f2e082 | 0eb599c3bbfa6e5b31516913b88cc9db3a1311ce | /ARC/arc097c.py | 33bff188d24ca2f5e1af4718c9b4e6b11b22207b | [] | no_license | Linus-MK/AtCoder | 5b84dc88c2d2773d0f97ed18265d303290da7879 | a587e89a9e0c2ab4d36b09176bcc95e901e14326 | refs/heads/master | 2022-11-25T05:37:12.148722 | 2022-11-17T16:04:10 | 2022-11-17T16:04:10 | 169,840,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | s = input()
k = int(input())
substring_list = []
for i in range(1, 5+1):
for start in range(len(s) - i + 1):
substring_list.append(s[start:start+i])
# print(substring_list)
substring_list = sorted(set(substring_list))
print(substring_list[k-1])
# https://stackoverflow.com/questions/2931672/what-is-the-cleanest-way-to-do-a-sort-plus-uniq-on-a-python-list
# sortใใฆuniqueใใๅ ดๅใฏไธๅsetใไฝฟใใจใใ
| [
"13600386+Linus-MK@users.noreply.github.com"
] | 13600386+Linus-MK@users.noreply.github.com |
8f3ea69126b695c4a7334a63b869e3810b969865 | fcd64a87118a8c1e060449d8fd5b02034ac3dea7 | /test/test_payments_search_body.py | c4e5d43d4fe46a88ccc618f278f0274b2083b92f | [] | no_license | carlosgalvez-tiendeo/python-paycomet_client | 2b68e4e1f7cfbab81d50357513f79753cf8c2f0e | 71f1fe29495ce67e37aaed4ecc9acf5994de011a | refs/heads/master | 2023-08-03T02:27:50.857164 | 2021-06-16T13:04:46 | 2021-06-16T13:04:46 | 377,492,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | # coding: utf-8
"""
PAYCOMET REST API
PAYCOMET API REST for customers. # noqa: E501
OpenAPI spec version: 2.28.0
Contact: tecnico@paycomet.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import paycomet_client
from paycomet_client.models.payments_search_body import PaymentsSearchBody # noqa: E501
from paycomet_client.rest import ApiException
class TestPaymentsSearchBody(unittest.TestCase):
"""PaymentsSearchBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPaymentsSearchBody(self):
"""Test PaymentsSearchBody"""
# FIXME: construct object with mandatory attributes with example values
# model = paycomet_client.models.payments_search_body.PaymentsSearchBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"carlos.galvez@tiendeo.com"
] | carlos.galvez@tiendeo.com |
d774292b3bb342a201bdfb191b2b2bb3c62edcb1 | f216fec073bcb94d34fadf3b149f6ad8e0541198 | /scorestatistics_ui.py | d815d8c4a97c198a142806d8765979e797604d6d | [] | no_license | liuyuhang791034063/ScoreSystem | 44a6742a72a34e1673c46f3b00e2cdfd6553979b | ecb53a6dc2ae490ddc1028aa67b99a187fe366d9 | refs/heads/master | 2020-03-24T20:43:21.746962 | 2018-09-20T09:49:58 | 2018-09-20T09:49:58 | 142,993,767 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,276 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'scorestatistics.ui'
#
# Created by: PyQt5 UI code generator 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Score(object):
def setupUi(self, Score):
Score.setObjectName("Score")
Score.resize(537, 412)
self.tableWidget = QtWidgets.QTableWidget(Score)
self.tableWidget.setGeometry(QtCore.QRect(50, 110, 421, 241))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.label = QtWidgets.QLabel(Score)
self.label.setGeometry(QtCore.QRect(40, 50, 461, 31))
font = QtGui.QFont()
font.setFamily("ๅพฎ่ฝฏ้
้ป")
font.setPointSize(8)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.retranslateUi(Score)
QtCore.QMetaObject.connectSlotsByName(Score)
def retranslateUi(self, Score):
_translate = QtCore.QCoreApplication.translate
Score.setWindowTitle(_translate("Score", "ๆ็ปฉ็ป่ฎก"))
self.label.setText(_translate("Score", "TextLabel"))
| [
"liuyuhang791034063@qq.com"
] | liuyuhang791034063@qq.com |
d5f632db1dc076d3677f639263dbe4cc575ad8da | a62fdd0beb6c47cc704c1192b68b0bcfcd024304 | /Python/I/07-CURSORS/1/form.py | 12e820ffa975daf039e565eaa2572d6436bb302e | [] | no_license | a6461/Qt-PyQt | da1895b4faccda80b8079ecdca79f1ea525daa0a | 404bd7fbbc432ebeaa1a486fc8e005d47aed9cfd | refs/heads/master | 2020-03-14T22:16:48.714825 | 2018-06-12T20:45:58 | 2018-06-12T20:45:58 | 131,817,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | from ui_form import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class Form(Ui_Form, QWidget):
names = []
cursors = []
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
self.setFixedSize(self.size())
self.names = self.enumToStr(Qt, Qt.CursorShape)
self.cursors = [QCursor(Qt.CursorShape(i))
for i in range(len(self.names))]
self.pushButton.setProperty(
'tag', self.names.index('ArrowCursor'))
def enumToStr(self, namespace, enum):
names = {}
for value in dir(namespace):
key = getattr(namespace, value)
if isinstance(key, enum):
names[key] = value
names = [value for (key, value) in sorted(names.items())]
return names
def on_pushButton_mousePressed(self, event):
k = self.pushButton.property('tag')
c = len(self.names)
if event.buttons() == Qt.LeftButton:
k = (k + 1) % c
elif event.buttons() == Qt.RightButton:
k = (k - 1 + c) % c
self.pushButton.setText(self.names[k])
self.pushButton.setCursor(self.cursors[k])
self.pushButton.setProperty('tag', k)
| [
"a6461@yandex.ru"
] | a6461@yandex.ru |
f18e9d3496ede63fe418afc7bf811b6426910845 | 74dff48428867fd99ab780a96357619285cadf73 | /finch-collector-master/finchcollector/settings.py | 93fd1a44be8f4df50c9d949892b975b5e8579703 | [] | no_license | HermanSidhu96/finchcollector | 28ed65455dc139a23e27eeddd9939fa58d6c69ea | 561751bdbe77537cf341bbbb7d180ac327428e83 | refs/heads/master | 2022-12-15T10:00:03.121582 | 2020-09-16T02:50:18 | 2020-09-16T02:50:18 | 295,905,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,087 | py | """
Django settings for finchcollector project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wn^p5^p@1=1$t2l0aywg2&ay-&-mwjqwtjm)83w@b^ojs!_%)('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'main_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'finchcollector.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'finchcollector.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'finchcollector',
'USER': 'postgres',
'PASSWORD' : ''
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"you@example.com"
] | you@example.com |
88ce8bab5ece0b9764f01300ba6eab12ebc66fd0 | e8db7be2278994660a1cb1cde3ac479d802b5b05 | /my_datasets/zsre_with_description.py | e6687188be4bc399c3ea178667e0d23a9d40a016 | [
"MIT"
] | permissive | INK-USC/hypter | 4cbf8929bc49d01e648197381c94c22fea233b95 | 732551e1e717b66ad26ba538593ed184957ecdea | refs/heads/main | 2023-03-28T00:27:53.887999 | 2021-04-02T04:37:52 | 2021-04-02T04:37:52 | 318,406,884 | 13 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,781 | py | import os
import json
import re
import string
import numpy as np
from tqdm import tqdm
import torch
from torch.utils.data import Dataset, TensorDataset, DataLoader, RandomSampler, SequentialSampler
from .utils import MyQADataset, MyDataLoader
from .zsre import ZSREData
from .zsre_relations import ZSRE_RELATIONS
class ZSREWithDescriptionData(ZSREData):
def load_dataset(self, tokenizer, do_return=False):
self.tokenizer = tokenizer
postfix = 'Withdescription-' + tokenizer.__class__.__name__.replace("zer", "zed")
preprocessed_path = os.path.join(
"/".join(self.data_path.split("/")[:-1]),
self.data_path.split("/")[-1].replace(".json", "-{}.json".format(postfix)))
if self.load and os.path.exists(preprocessed_path):
# load preprocessed input
self.logger.info("Loading pre-tokenized data from {}".format(preprocessed_path))
with open(preprocessed_path, "r") as f:
input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, \
metadata = json.load(f)
else:
print("Start tokenizing ... {} instances".format(len(self.data)))
questions = [add_description(d["input"]) for d in self.data]
if self.data_type != "test":
answers = [[item["answer"] for item in d["output"]] for d in self.data]
else:
answers = [['TEST_NO_ANSWER'] for d in self.data]
answers, metadata = self.flatten(answers)
if self.args.do_lowercase:
questions = [question.lower() for question in questions]
answers = [answer.lower() for answer in answers]
if self.args.append_another_bos:
questions = ["<s> "+question for question in questions]
answers = ["<s> " +answer for answer in answers]
print(questions[:10])
print(answers[:10])
print("Tokenizing Input ...")
question_input = tokenizer.batch_encode_plus(questions,
pad_to_max_length=True,
max_length=self.args.max_input_length)
print("Tokenizing Output ...")
answer_input = tokenizer.batch_encode_plus(answers,
pad_to_max_length=True)
input_ids, attention_mask = question_input["input_ids"], question_input["attention_mask"]
decoder_input_ids, decoder_attention_mask = answer_input["input_ids"], answer_input["attention_mask"]
if self.load:
preprocessed_data = [input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
metadata]
with open(preprocessed_path, "w") as f:
json.dump([input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
metadata], f)
self.dataset = MyQADataset(input_ids, attention_mask,
decoder_input_ids, decoder_attention_mask,
in_metadata=None, out_metadata=metadata,
is_training=self.is_training)
self.logger.info("Loaded {} examples from {} data".format(len(self.dataset), self.data_type))
if do_return:
return self.dataset
def add_description(input_str):
split_idx = input_str.index('[SEP]')
rel_name = input_str[split_idx+6:]
description = ZSRE_RELATIONS[rel_name]["description"]
return "{} [SEP] description: {}".format(input_str, description)
def get_accuracy(prediction, groundtruth):
if type(groundtruth)==list:
if len(groundtruth)==0:
return 0
return np.max([int(prediction==gt) for gt in groundtruth])
return int(prediction==gt)
def get_exact_match(prediction, groundtruth):
if type(groundtruth)==list:
if len(groundtruth)==0:
return 0
return np.max([get_exact_match(prediction, gt) for gt in groundtruth])
return (normalize_answer(prediction) == normalize_answer(groundtruth))
def normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
| [
"qinyuany@usc.edu"
] | qinyuany@usc.edu |
5d86d8330729cfa6f01aaf6860470f7b01e20f0b | 2bd3b981412f81ff6d5085394aad71258bed3c1b | /hw3/scripts/validate_svm_fusion.py | fdd4ac3584a4ca43f66baa2ed4b2acc84beaf37a | [] | no_license | JayC1208/11-775-Large-Scale-Multimedia-Analysis | e4d8e685cca437a6431d2f531e25d80bb1ceeeb9 | ca88fa2c0701477e3fdb424374ba6cb1b9126c43 | refs/heads/master | 2023-08-30T21:19:09.110017 | 2017-03-16T02:52:38 | 2017-03-16T02:52:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,947 | py | from sklearn.metrics import roc_curve
from sklearn.calibration import CalibratedClassifierCV
from sklearn import preprocessing
import commands
import numpy as np
import os
from sklearn.svm.classes import SVC
from sklearn.svm import LinearSVC
import cPickle
import sys
import pickle
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
import itertools
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from brew.base import Ensemble, EnsembleClassifier
from brew.stacking.stacker import EnsembleStack, EnsembleStackClassifier
from brew.combination.combiner import Combiner
def import_imtraj_txt(file_path):
imtraj_line = (open(file_path,"r").readlines()[0]).strip()
fields = imtraj_line.split(' ')
field_tuples = [x.split(':') for x in fields]
# The sparse vector position is 1 based
num_fields = [ (int(x[0])-1, np.float(x[1])) for x in field_tuples]
new_vect = np.zeros(32768)
for field_id, field_val in num_fields:
new_vect[field_id] = field_val
return new_vect
if __name__ == '__main__':
if len(sys.argv) != 6:
print "Usage: {0} model_file feat_dir event_name round_num output_file".format(sys.argv[0])
print "output_file path to save the prediction score"
print "event_name P001/2/3"
print "round_num 0,1,2?"
print "feat_dir -- dir of feature files"
print "model_file reads from the svm model train_file_P00X_Xround_imtraj/SIFT/CNN"
exit(1)
model_file = sys.argv[1]
feat_dir = sys.argv[2]
event_name=sys.argv[3]
round_num = int(sys.argv[4])
output_file = sys.argv[5]
pipe_lrSVC=pickle.load(open(model_file+'.pickle','rb'))
test_list="list/"+event_name+"_validation_"+str(round_num)
X=np.asarray([])
count=0
for line in open(test_list,"r"):
count=count+1
# if count%100==0:
# print count
audio_name=line.split(" ")[0]
label=line.split(" ")[1].split("\n")[0]
if "imtraj" in feat_dir:
feat_vec=import_imtraj_txt(feat_dir+audio_name+".spbof")
else:
feat_vec=np.genfromtxt(feat_dir+audio_name,delimiter=";")
if len(X)==0:
X=[feat_vec]
else:
X=np.append(X,[feat_vec],axis=0)
Y=pipe_lrSVC.predict_proba(preprocessing.scale(X))
groundtruth_label="list/"+event_name+"_validation_label_"+str(round_num)
Y_truth=[]
for line in open(groundtruth_label,"r"):
Y_truth+=[int(line.strip())]
fclassification_write=open(output_file.replace("pred/","classification/"),"w")
Y_discrete=pipe_lrSVC.predict(preprocessing.scale(X))
#print Y_discrete
#Y_discrete=[1 if y[1]>y[0] else 0 for y in Y_discrete]
for i in range(len(Y_discrete)):
fclassification_write.write(str(Y_discrete[i])+"\n")
fclassification_write.close()
fwrite=open(output_file,"w")
for i in range(len(Y)):
fwrite.write(str(Y[i][1])+"\n")
fwrite.close()
ap_output=commands.getstatusoutput("ap "+groundtruth_label+" "+output_file)
print model_file.split(".")[1]+" 3 FOLD ROUND "+str(round_num)+" CROSS VALIDATION RESULT MAP: "+ap_output[1].split(": ")[1]
print model_file.split(".")[1]+" 3 FOLD ROUND "+str(round_num)+" CROSS VALIDATION RESULT CLASS ACCURACY: "+str(accuracy_score(Y_truth,Y_discrete))
print model_file.split(".")[1]+" 3 FOLD ROUND "+str(round_num)+" CROSS VALIDATION RESULT TRUE POSITIVE RATE: "+str(recall_score(Y_truth,Y_discrete))
CM=confusion_matrix(Y_truth,Y_discrete)
# print str(CM[1][1]*1.0/(CM[1][1]+CM[1][0]))
print model_file.split(".")[1]+" 3 FOLD ROUND "+str(round_num)+" CROSS VALIDATION RESULT TRUE NEGATIVE RATE: "+str(CM[0][0]*1.0/(CM[0][0]+CM[0][1]))
| [
"xeniaqian94@gmail.com"
] | xeniaqian94@gmail.com |
9e5ee3e62bcf6a844290cd40365ca82a1e9a8db3 | ba6d11be23574d98210a6ea8df02cbc8afae325e | /tokenizers/javalex/javasmartlex.py | 36664977def81a3d4d1d13a1ef928550377cb5f4 | [
"MIT"
] | permissive | sayon/ignoreme | 921596af5645731fa66a7ec31c11992deecd28e2 | d3d40c2eb3b36d1ea209abb9a710effba3d921c3 | refs/heads/master | 2020-11-26T15:27:41.567436 | 2014-11-07T14:59:06 | 2014-11-07T14:59:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,336 | py | __author__ = 'novokonst'
import ply.lex as lex
JAVA_KEYWORDS = [
'abstract'
, 'assert'
, 'boolean'
, 'break'
, 'byte'
, 'case'
, 'catch'
, 'char'
, 'class'
, 'const'
, 'continue'
, 'default'
, 'do'
, 'double'
, 'else'
, 'enum'
, 'extends'
, 'final'
, 'finally'
, 'float'
, 'for'
, 'goto'
, 'if'
, 'implements'
, 'import'
, 'instanceof'
, 'int'
, 'interface'
, 'long'
, 'native'
, 'new'
, 'package'
, 'private'
, 'protected'
, 'public'
, 'return'
, 'short'
, 'static'
, 'strictfp'
, 'super'
, 'switch'
, 'synchronized'
, 'this'
, 'throw'
, 'throws'
, 'transient'
, 'try'
, 'void'
, 'volatile'
, 'while'
]
class JavaTokenizer:
MY_KEYWORDS = JAVA_KEYWORDS
RESERVED = {kw: kw for kw in MY_KEYWORDS}
tokens = RESERVED.values() + [
'ID'
, 'STRING_LITERAL'
, 'NUMBER'
, 'ANNOTATION'
, 'COMMENT'
, 'LINE_COMMENT'
, 'MULTI_COMMENT_LEFT'
, 'MULTI_COMMENT_RIGHT'
]
def check_comment(fn):
def wrapped(self, t):
if self.nested_comment:
t.type = 'COMMENT'
return t
else:
return fn(self, t)
wrapped.__doc__ = fn.__doc__
return wrapped
@check_comment
def t_ID(self, t):
t.type = self.__class__.RESERVED.get(t.value, 'ID')
return t
@check_comment
def t_STRING_LITERAL(self, t):
return t
@check_comment
def t_NUMBER(self, t):
return t
@check_comment
def t_ANNOTATION(self, t):
return t
def t_LINE_COMMENT(self, t):
t.type = 'COMMENT'
return t
def t_MULTI_COMMENT_LEFT(self, t):
self.nested_comment += 1
t.type = 'COMMENT'
return t
def t_MULTI_COMMENT_RIGHT(self, t):
self.nested_comment -= 1
t.type = 'COMMENT'
return t
t_ignore = ' \t'
def t_error(self, t):
# self.skipped.append(t.value)
t.lexer.skip(1)
def __init__(self, **kwargs):
self.t_ID.__func__.__doc__ = r'[a-zA-z_][a-zA-Z0-9_]*'
self.t_STRING_LITERAL.__func__.__doc__ = r'\'.*\''
self.t_NUMBER.__func__.__doc__ = r'\d+'
self.t_ANNOTATION.__func__.__doc__ = r'@[a-zA-z_][a-zA-Z0-9_]*'
self.t_LINE_COMMENT.__func__.__doc__ = r'//.*'
self.t_MULTI_COMMENT_LEFT.__func__.__doc__ = r'/\*.*'
self.t_MULTI_COMMENT_RIGHT.__func__.__doc__ = r'.*\*/'
self.skipped = []
self.nested_comment = 0
self.lexer = lex.lex(module=self, **kwargs)
def refresh(self):
self.skipped = []
self.nested_comment = 0
def tokenize(self, data):
self.lexer.input(data)
self.out_token_dict = {}
while True:
tok = self.lexer.token()
if not tok: break
self.out_token_dict[tok.type] = self.out_token_dict.get(tok.type, [])
self.out_token_dict[tok.type].append(tok)
return self.out_token_dict
def keywords_ex_stats(self, extra_type_list=[]):
keys = JavaTokenizer.MY_KEYWORDS + extra_type_list
return {k: self.out_token_dict.get(k, []) for k in keys} | [
"novokrest013@gmail.com"
] | novokrest013@gmail.com |
7ff4e3512240a49138a5b303e8e9525d6c638e6d | d47067156da51bfed44ae4e465f9ac9831c4138a | /app.py | 2ca5a247d1c78747c6151593d111101b0d8f4b31 | [] | no_license | sylvaus/datawebserver | 264398a1324fafef78980c7c0f85f4c5f4ae37b0 | 13cdb31302b7c0a7ff1e9cb7a3c0709caeaf166e | refs/heads/master | 2020-04-28T00:59:16.070904 | 2019-03-10T21:16:45 | 2019-03-10T21:16:45 | 174,837,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,340 | py | import atexit
import configparser
import socket
from multiprocessing import Queue
from threading import Thread
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
from data_server.data_dispatcher import DataDispatcher
from data_server.data_server import DataServer, ThreadedTCPRequestHandler
from data_server.rolling_data_base import RollingDataBase
config = configparser.ConfigParser()
config.read("app.ini")
app = Flask(__name__)
app.config['SECRET_KEY'] = config.get("server.graphs", "secret_key")
socket_io = SocketIO(app)
# Configuring and loading database
database = RollingDataBase(config.get("database", "db_folder"),
auto_save_s=config.getint("database", "auto_save_s"))
database.load()
# Configuring DataServer
if config.getboolean("server.data", "auto_host"):
data_server_host = socket.gethostbyname(socket.gethostname())
else:
data_server_host = config.getboolean("server.data", "host")
data_server_port = config.getint("server.data", "port")
data_queue = Queue()
data_server = DataServer((data_server_host, data_server_port),
ThreadedTCPRequestHandler,
data_queue=data_queue)
def dispatch_update(data):
name, x_y = data
socket_io.emit("data_update", [name, x_y])
database.add(name, x_y)
data_dispatcher = DataDispatcher(data_queue, dispatch_update)
@socket_io.on('request_initial_values')
def give_initial_params():
emit("initial_values", database.get_all())
@app.route('/')
def main_page():
return render_template('index.html')
@atexit.register
def closing_resources():
print("Closing resources")
data_server.shutdown()
data_dispatcher.stop()
database.stop_auto_save()
database.save()
if __name__ == '__main__':
data_server_thread = Thread(target=data_server.serve_forever)
data_server_thread.start()
data_dispatcher_thread = Thread(target=data_dispatcher.run)
data_dispatcher_thread.start()
if config.getboolean("server.graphs", "auto_host"):
graph_server_host = socket.gethostbyname(socket.gethostname())
else:
graph_server_host = config.getboolean("server.graphs", "host")
graph_server_port = config.getint("server.graphs", "port")
socket_io.run(app, host=graph_server_host, port=graph_server_port)
| [
"pierreyves.breches74@gmail.com"
] | pierreyves.breches74@gmail.com |
f8bcf1d6c11905d75cef6915a64cabd14eac0127 | 2ed86a79d0fcd299ad4a01310954c5eddcf01edf | /homeassistant/components/goodwe/__init__.py | b5872ed3deaa396f83867e451c4d61a51cafba2e | [
"Apache-2.0"
] | permissive | konnected-io/home-assistant | 037f12c87bb79e19220192eb918e49db1b1a8b3e | 2e65b77b2b5c17919939481f327963abdfdc53f0 | refs/heads/dev | 2023-05-11T08:57:41.891518 | 2023-05-07T20:03:37 | 2023-05-07T20:03:37 | 109,931,626 | 24 | 10 | Apache-2.0 | 2023-02-22T06:24:01 | 2017-11-08T05:27:21 | Python | UTF-8 | Python | false | false | 2,370 | py | """The Goodwe inverter component."""
from goodwe import InverterError, connect
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.entity import DeviceInfo
from .const import (
CONF_MODEL_FAMILY,
DOMAIN,
KEY_COORDINATOR,
KEY_DEVICE_INFO,
KEY_INVERTER,
PLATFORMS,
)
from .coordinator import GoodweUpdateCoordinator
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the Goodwe components from a config entry."""
hass.data.setdefault(DOMAIN, {})
host = entry.data[CONF_HOST]
model_family = entry.data[CONF_MODEL_FAMILY]
# Connect to Goodwe inverter
try:
inverter = await connect(
host=host,
family=model_family,
retries=10,
)
except InverterError as err:
raise ConfigEntryNotReady from err
device_info = DeviceInfo(
configuration_url="https://www.semsportal.com",
identifiers={(DOMAIN, inverter.serial_number)},
name=entry.title,
manufacturer="GoodWe",
model=inverter.model_name,
sw_version=f"{inverter.firmware} / {inverter.arm_firmware}",
)
# Create update coordinator
coordinator = GoodweUpdateCoordinator(hass, entry, inverter)
# Fetch initial data so we have data when entities subscribe
await coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN][entry.entry_id] = {
KEY_INVERTER: inverter,
KEY_COORDINATOR: coordinator,
KEY_DEVICE_INFO: device_info,
}
entry.async_on_unload(entry.add_update_listener(update_listener))
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
async def update_listener(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
| [
"noreply@github.com"
] | konnected-io.noreply@github.com |
2f2e5e0161230e936733e0a3bf0fbc147785f021 | a2f63e108409adabc88aaa4e5eee9603d4085f3d | /vea-plaza1/farmacia/apps/compras/forms.py | fa43c3492e6f904b4fe4feb009e9ff87dff88592 | [] | no_license | armandohuarcaya/Sistema-Union | 098569606a8259d76a42f2508f59dbf33ddb2e31 | 9f093e8952008fffcbb86ecf1e49f7c86184557e | refs/heads/master | 2021-08-22T23:37:12.758059 | 2017-12-01T12:53:38 | 2017-12-01T12:53:38 | 112,769,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | from .models import * # Change as necessary
from django.forms import ModelForm
from django import forms
from django.utils.text import capfirst
class TodoListForm(ModelForm):
class Meta:
model = Cabecera
exclude =('trabajador',)
widgets = {
'codigo': forms.TextInput(attrs={'class': 'form-control'}),
'distribuidor': forms.Select(attrs={'class': 'form-control'}),
'laboratorio': forms.Select(attrs={'class': 'form-control'}),
}
class TodoItemForm(forms.ModelForm):
class Meta:
model = DetalleCompra
exclude = ('list',)
widgets = {
'medicamento': forms.Select(attrs={'class': 'form-control'}),
'cantidad': forms.NumberInput(attrs={'class': 'form-control'}),
}
# def __init__(self, *args, **kwargs):
# super(TodoItemForm, self).__init__(*args, **kwargs)
# self.fields['medicamento'] = forms.CharField(
# label=capfirst(u'Producto')
# )
class RangoForm (forms.Form):
fecha_i = forms.DateField(widget = forms.TextInput(attrs={'class':'form-control', 'id':'Fecha_i', 'data-date-format':'dd/mm/yyyy'}))
fecha_f = forms.DateField(widget = forms.TextInput(attrs={'class':'form-control', 'id':'Fecha_f', 'data-date-format':'dd/mm/yyyy'}))
| [
"armando_huarcaya@upeu.edu.pe"
] | armando_huarcaya@upeu.edu.pe |
8bd1377691cf724779af709f24241b528b2a1b8a | aef69557d8960205a780e61b7c2dfbb1d7733449 | /Code/Theo/labs/peaksandvalleys.py | cb64dea931ac33118b64d0a46eff3440cbc5c589 | [] | no_license | sbtries/class_pandaaaa | 579d6be89a511bdc36b0ce8c95545b9b704a734a | bbf9c419a00879118a55c2c19e5b46b08af806bc | refs/heads/master | 2023-07-18T14:18:25.881333 | 2021-09-02T22:48:29 | 2021-09-02T22:48:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | def main():
data = [1 , 2 , 3 ,4 , 5 , 6 , 7 , 6 , 5 , 4 , 5 , 6 , 7 , 8 , 9 , 8 , 7 , 6 , 7 , 8 , 9]
peak_indices = peaks(data)
valley_indices = valleys(data)
print(valley_indices)
print(peak_indices)
print(peaks_and_valleys(peak_indices,valley_indices))
visualization(data)
exit()
def peaks(data):
peak_indices = []
i = 1
n = len(data)
while i < n-1:
if data[i-1] < data[i] and data[i+1] < data[i]:
peak_indices.append(i)
i += 1
return peak_indices
def valleys(data):
valley_indices = []
i = 1
n = len(data)
while i < n-1:
if data[i-1] > data[i] and data[i+1] > data[i]:
valley_indices.append(i)
i += 1
return valley_indices
def peaks_and_valleys(peaks,valleys):
p_v = []
p_v.extend(peaks)
p_v.extend(valleys)
p_v.sort()
return p_v
def visualization(data):
line = []
i = 0
j = 0
max = 0
for val in data:
if val > max:
max = val
n = len(data)
for i in range(max):
for j in range(n):
if data[j] >= max-i:
line.append('X')
else:
line.append(' ')
print(line)
line.clear()
main() | [
"trowlett@gmail.com"
] | trowlett@gmail.com |
67066240d43e45422d72fc3cfb0f6f4f40c47a7f | 612592a63c9c7b46f4b84f6b2ea6a18187e4838e | /TWOSC_autotest/TWOSC_autotest_web/PO/plan/fahuodan/fahuodan_editPlan.py | fa45dfb9161e946dbd0c086f78e57ef5d0c8b0a8 | [] | no_license | xiaoxiangLiu/test_gitbase | 5c624175b58897d3314c178924fd79fc48925a96 | da1b9c82b8501f838892a3a5089392311603309e | refs/heads/master | 2021-05-08T14:56:34.612042 | 2018-03-08T16:32:39 | 2018-03-08T16:32:39 | 120,100,809 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,898 | py | __author__ = '38720'
# coding=utf-8
from base_page.base import Page
from selenium.webdriver.common.by import By
class EditPlan(Page):
'''็ผ่พๅ่ดงๅไฟกๆฏ้กต'''
# ๅบๅบๆถ้ด
chuku_time_loc = (By.CSS_SELECTOR, '#wareHouseTimeStr')
# ๅบๅบๆถ้ด้ๆฉๆไปฝๆ้ฎ
month_time_loc = (By.CSS_SELECTOR, 'body > div:nth-child(11) > div.datetimepicker-days > table > thead > tr:nth-child(1) > th.switch')
# ๅบๅบๆถ้ดไธๆ
january_time_loc = (By.CSS_SELECTOR, 'body > div:nth-child(11) > div.datetimepicker-months > table > tbody > tr > td > span:nth-child(1)')
# ๅบๅบๆถ้ดไธๆฅ
day_time_loc = (By.CSS_SELECTOR, 'body > div:nth-child(11) > div.datetimepicker-days > table > tbody > tr:nth-child(2) > td:nth-child(1)')
# ๅบๅบๆถ้ด13๏ผ00
hour_time_loc = (By.CSS_SELECTOR, 'body > div:nth-child(11) > div.datetimepicker-hours > table > tbody > tr > td > span:nth-child(14)')
# ๅฐ่ดงๆถ้ด
daohuo_time_loc = (By.CSS_SELECTOR, '#arriveTimeStr')
# ๅฐ่ดงๆถ้ด้ๆฉๆไปฝ
month_time_daohuo_loc = (By.XPATH, '/html/body/div[8]/div[3]/table/thead/tr[1]/th[2]')
# ๅฐ่ดงๆถ้ดไธๆ
january_time_daohuo_loc = (By.XPATH, '/html/body/div[8]/div[4]/table/tbody/tr/td/span[1]')
# ๅฐ่ดงๆถ้ด12ๅท
day_time_daohuo_loc = (By.CSS_SELECTOR, 'body > div:nth-child(12) > div.datetimepicker-days > table > tbody > tr:nth-child(3) > td:nth-child(5)')
# ๅฐ่ดงๆถ้ด10:00
hour_time_daohuo_loc = (By.XPATH, '/html/body/div[8]/div[2]/table/tbody/tr/td/span[11]')
# ๅ่ดงๅๅ็งฐlable
fahuodan_lable_loc = (By.CSS_SELECTOR, '#editInfoForm > div > div:nth-child(1) > label')
# ๆไบค
submit_loc = (By.CSS_SELECTOR, '#GT_CommonModal > div > div > div.box-footer > div > button.btn.btn-primary')
# ็งป้คๅบๅบๆถ้ดๆงไปถ
def remove_time_js(self):
js = "$('input[id=wareHouseTimeStr').removeAttr('readonly')"
return self.script(js)
# ่พๅ
ฅๅบๅบๆถ้ด
def send_chuku_time(self):
js = "$('input[id=wareHouseTimeStr').removeAttr('readonly')"
self.script(js)
return self.find_element(*self.chuku_time_loc).send_keys('2016-10-10-22:00')
# ็งป้คๅฐ่ดงๆถ้ดๅนถ่พๅ
ฅๆถ้ด
def send_daohuo_time(self):
js = "$('input[id=arriveTimeStr').removeAttr('readonly')"
self.script(js)
return self.find_element(*self.daohuo_time_loc).send_keys('2018-10-10-22:00')
# ็นๅปlable
def click_lable(self):
return self.find_element(*self.fahuodan_lable_loc).click()
# ็นๅปๆไบค
def click_submit(self):
return self.find_element(*self.submit_loc).click()
# ็นๅปๅบๅบๆถ้ด
def click_chuku_time(self):
return self.find_element(*self.chuku_time_loc).click()
# ็นๅป้ๆฉๆไปฝ
def click_month_time(self):
return self.find_element(*self.month_time_loc).click()
# ็นๅปไธๆ
def click_january_time(self):
return self.find_element(*self.january_time_loc).click()
# ็นๅปไธๆฅ
def click_day_time(self):
return self.find_element(*self.day_time_loc).click()
# ็นๅป13๏ผ00
def click_hour_time(self):
return self.find_element(*self.hour_time_loc).click()
# ็นๅปๅฐ่ดงๆถ้ด
def click_daohuo_time(self):
return self.find_element(*self.daohuo_time_loc).click()
# ็นๅปๅฐ่ดงๆถ้ด้ๆฉๆไปฝ
def click_month_daohuo(self):
return self.find_element(*self.month_time_daohuo_loc).click()
# ็นๅปๅฐ่ดงๆถ้ด้ๆฉ1ๆ
def click_january_daohuo(self):
return self.find_element(*self.january_time_daohuo_loc).click()
# ็นๅป12ๅท
def click_day_daohuo(self):
return self.find_element(*self.day_time_daohuo_loc).click()
# ็นๅป10๏ผ00
def click_hour_daohuo(self):
return self.find_element(*self.hour_time_daohuo_loc).click()
| [
"38720034@qq.com"
] | 38720034@qq.com |
2999639695e22cf860ff0c331939e7a44e1660da | fcdfe976c9ed60b18def889692a17dc18a8dd6d7 | /ros/py_ros/baxter/sonar1.py | 05674e6f2009f101a7a4af39626441ee58c5c8ee | [] | no_license | akihikoy/ay_test | 4907470889c9bda11cdc84e8231ef3156fda8bd7 | a24dfb720960bfedb94be3b4d147e37616e7f39a | refs/heads/master | 2023-09-02T19:24:47.832392 | 2023-08-27T06:45:20 | 2023-08-27T06:45:20 | 181,903,332 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | #!/usr/bin/python
#\file sonar1.py
#\brief Baxter: getting data from sonor sensor
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Oct.29, 2015
import roslib
import rospy
import sensor_msgs.msg
import baxter_interface
import time
import math
def IsPointInFront(points, max_angle, max_dist):
for p in points:
angle= math.atan2(p.y,p.x)
dist= math.sqrt(p.x*p.x+p.y*p.y)
#print (abs(angle),dist),
if abs(angle)<max_angle and dist<max_dist:
return True
#print ''
return False
def CallBack(msg):
#print '----------------'
#print msg
if IsPointInFront(msg.points,30.0/180.0*math.pi,1.1): print 'Found a near point!',msg.header.seq
if __name__=='__main__':
rospy.init_node('baxter_test')
sub_msg= rospy.Subscriber('/robot/sonar/head_sonar/state', sensor_msgs.msg.PointCloud, CallBack)
rospy.spin()
#rospy.signal_shutdown('Done.')
| [
"info@akihikoy.net"
] | info@akihikoy.net |
8debe55a0166fa775d3843b6dc1d86d2907efc04 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /aqA6KSHRCwfE44Q9m_5.py | 99fb26451b467fde54932142a65767af8a9f7fe5 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | """
October 22nd is CAPS LOCK DAY. Apart from that day, every sentence should be
lowercase, so write a function to **normalize** a sentence.
Create a function that takes a string. If the string is **all uppercase
characters** , convert it to **lowercase** and add an **exclamation mark** at
the end.
### Examples
normalize("CAPS LOCK DAY IS OVER") โ "Caps lock day is over!"
normalize("Today is not caps lock day.") โ "Today is not caps lock day."
normalize("Let us stay calm, no need to panic.") โ "Let us stay calm, no need to panic."
### Notes
Each string is a sentence and should start with an uppercase character.
"""
def normalize(txt):
if all(all(i.isupper() for i in x) for x in txt.split(' ')):
return (txt.lower()).capitalize() + '!'
return txt
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
a8d4b395303242fdc048b7b4a0099f1f74d70414 | 12a0b2172cb480d08406c1302652b078615e5747 | /plut/table.py | ab41b0dce88a7cd8f0f97277f552101289ac7843 | [] | no_license | kuntzer/plut | 60bb59f21e5952935e471916954b6ab48d98794b | 2924f36dc3b1c8ca1449cd666ca6c1577b03a77a | refs/heads/master | 2021-01-19T09:58:19.432997 | 2017-04-20T15:16:33 | 2017-04-20T15:16:33 | 87,803,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,325 | py | """
Helpers for astropy.table arrays
"""
import numpy as np
import astropy.table
import datetime
import copy
import logging
logger = logging.getLogger(__name__)
def info(cat, txt=True):
"""
Returns a new table "describing" the content of the table cat.
"""
colnames = cat.colnames
dtypes = [cat[colname].dtype for colname in colnames]
ndims = [cat[colname].ndim for colname in colnames]
shapes = [cat[colname].shape for colname in colnames]
infotable = astropy.table.Table([colnames, dtypes, ndims, shapes], names=("colname", "dtype", "ndim", "shape"))
infotable.sort("colname")
infotable.meta = cat.meta
if txt:
lines = infotable.pformat(max_lines=-1, max_width=-1)
lines.append("")
lines.append("Number of rows: {}".format(len(cat)))
lines.append("Number of columns: {}".format(len(cat.colnames)))
lines.append("Metadata: {}".format(str(infotable.meta.items())))
return "\n".join(lines)
else:
return infotable
class Selector:
"""
Provides a simple way of getting "configurable" sub-selections of rows from a table.
"""
def __init__(self, name, criteria):
"""
:param name: a short string describing this selector (like "star", "low_snr", ...)
:param criteria: a list of tuples describing the criteria. Each of these tuples starts
with a string giving its type, followed by some arguments.
Illustration of the available criteria (all limits are inclusive):
- ``("in", "tru_rad", 0.5, 0.6)`` : ``"tru_rad"`` is between 0.5 and 0.6 ("in" stands for *interval*) and *not* masked
- ``("max", "snr", 10.0)`` : ``"snr"`` is below 10.0 and *not* masked
- ``("min", "adamom_flux", 10.0)`` : ``"adamom_flux"`` is above 10.0 and *not* masked
- ``("inlist", "subfield", (1, 2, 3))`` : ``subfield`` is among the elements in the tuple or list (1,2,3) and *not* masked.
- ``("is", "Flag", 2)`` : ``"Flag"`` is exactly 2 and *not* masked
- ``("nomask", "pre_g1")`` : ``"pre_g1"`` is not masked
- ``("mask", "snr")`` : ``"snr"`` is masked
"""
self.name = name
self.criteria = criteria
def __str__(self):
"""
A string describing the selector
"""
return "'%s' %s" % (self.name, repr(self.criteria))
def combine(self, *others):
"""
Returns a new selector obtained by merging the current one with one or more others.
:param others: provide one or several other selectors as arguments.
.. note:: This does **not** modify the current selector in place! It returns a new one!
"""
combiname = "&".join([self.name] + [other.name for other in others])
combicriteria = self.criteria
for other in others:
combicriteria.extend(other.criteria)
return Selector(combiname, combicriteria)
def select(self, cat):
"""
Returns a copy of cat with those rows that satisfy all criteria.
:param cat: an astropy table
"""
if len(self.criteria) is 0:
logger.warning("Selector %s has no criteria!" % (self.name))
return copy.deepcopy(cat)
passmasks = []
for crit in self.criteria:
if cat[crit[1]].ndim != 1:
logger.warning("Selecting with multidimensional column ('{}', shape={})... hopefully you know what you are doing.".format(crit[1], cat[crit[1]].shape))
if crit[0] == "in":
if len(crit) != 4: raise RuntimeError("Expected 4 elements in criterion %s" % (str(crit)))
passmask = np.logical_and(cat[crit[1]] >= crit[2], cat[crit[1]] <= crit[3])
if np.ma.is_masked(passmask):
passmask = passmask.filled(fill_value=False)
# Note about the "filled": if crit[2] or crit[3] englobe the values "underneath" the mask,
# some masked crit[1] will result in a masked "passmask"!
# But we implicitly want to reject masked values here, hence the filled.
elif crit[0] == "max":
if len(crit) != 3: raise RuntimeError("Expected 3 elements in criterion %s" % (str(crit)))
passmask = (cat[crit[1]] <= crit[2])
if np.ma.is_masked(passmask):
passmask = passmask.filled(fill_value=False)
elif crit[0] == "min":
if len(crit) != 3: raise RuntimeError("Expected 3 elements in criterion %s" % (str(crit)))
passmask = (cat[crit[1]] >= crit[2])
if np.ma.is_masked(passmask):
passmask = passmask.filled(fill_value=False)
elif crit[0] == "inlist":
if len(crit) != 3: raise RuntimeError("Expected 3 elements in criterion %s" % (str(crit)))
passmask = np.in1d(np.asarray(cat[crit[1]]), crit[2]) # This ignores any mask
if np.ma.is_masked(passmask): # As the mask is ignored by in1d, this is probably worthless and will never happen
passmask = passmask.filled(fill_value=False)
# So we need to deal with masked elements manually:
if hasattr(cat[crit[1]], "mask"): # i.e., if this column is masked:
passmask = np.logical_and(passmask, np.logical_not(cat[crit[1]].mask))
elif crit[0] == "is":
if len(crit) != 3: raise RuntimeError("Expected 3 elements in criterion %s" % (str(crit)))
passmask = (cat[crit[1]] == crit[2])
if np.ma.is_masked(passmask):
passmask = passmask.filled(fill_value=False)
elif (crit[0] == "nomask") or (crit[0] == "mask"):
if len(crit) != 2: raise RuntimeError("Expected 2 elements in criterion %s" % (str(crit)))
if hasattr(cat[crit[1]], "mask"): # i.e., if this column is masked:
if crit[0] == "nomask":
passmask = np.logical_not(cat[crit[1]].mask)
else:
passmask = cat[crit[1]].mask
else:
logger.warning("Criterion %s is facing an unmasked column!" % (str(crit)))
passmask = np.ones(len(cat), dtype=bool)
else:
raise RuntimeError("Unknown criterion %s" % (crit))
logger.debug("Criterion %s of '%s' selects %i/%i rows (%.2f %%)" %
(crit, self.name, np.sum(passmask), len(cat), 100.0 * float(np.sum(passmask))/float(len(cat))))
assert len(passmask) == len(cat)
passmasks.append(passmask) # "True" means "pass" == "keep this"
# Combining the passmasks:
passmasks = np.logical_not(np.column_stack(passmasks)) # "True" means "reject"
combimask = np.logical_not(np.sum(passmasks, axis=1).astype(bool)) # ... and "True" means "keep this" again.
logger.info("Selector '%s' selects %i/%i rows (%.2f %%)" %
(self.name, np.sum(combimask), len(cat), 100.0 * float(np.sum(combimask))/float(len(cat))))
return cat[combimask]
| [
"thibault.kuntzer@epfl.ch"
] | thibault.kuntzer@epfl.ch |
f2c61f6ea2a8a16d27c3a861620bfe4d898bc0d0 | 55b84573458f72618c9d285fd449e8328b75af4f | /test.py | 642d7e9940b463fad5b34d1e231bd1568e731442 | [
"MIT"
] | permissive | obs145628/py-linear-regression | 2984749d8b2295a044fff92904ae7f3daec98b07 | 8dafcfa0c1b889177afa61629540cacf8dcc0080 | refs/heads/master | 2021-04-27T17:37:10.715036 | 2018-02-21T11:00:54 | 2018-02-21T11:00:54 | 122,325,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | import numpy as np
import dataset_boston
import lineareg
X, y = dataset_boston.load_boston()
lineareg.train(X, y, 1000, 1e-8)
| [
"obs145628@gmail.com"
] | obs145628@gmail.com |
12afd11a82d7eedeaa2bf128ce1a1df056ad69bb | 88023c9a62994e91291c67088156a2894cc26e9e | /tests/test_core.py | 7a80ecc593d53ee3f44a38022cb4ff9fe2622c00 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | toros-astro/corral | 41e9d0224d734c4268bf5161d472b3c0375842f0 | 75474b38ff366330d33644461a902d07374a5bbc | refs/heads/master | 2023-06-10T15:56:12.264725 | 2018-09-03T17:59:41 | 2018-09-03T17:59:41 | 44,282,921 | 6 | 5 | BSD-3-Clause | 2023-03-24T12:03:17 | 2015-10-14T23:56:40 | Python | UTF-8 | Python | false | false | 2,715 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Cabral, Juan; Sanchez, Bruno & Berois, Martรญn
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
# DOCS
# =============================================================================
"""All core functions functionalities tests"""
# =============================================================================
# IMPORTS
# =============================================================================
from corral import core, VERSION
import mock
from .base import BaseTest
# =============================================================================
# BASE CLASS
# =============================================================================
class TestCore(BaseTest):
def test_get_version(self):
actual = core.get_version()
expected = VERSION
self.assertEqual(actual, expected)
def test_setup_environment(self):
with mock.patch("corral.db.setup") as setup:
with mock.patch("corral.db.load_models_module") as load_mm:
core.setup_environment()
self.assertTrue(setup.called)
self.assertTrue(load_mm.called)
| [
"jbc.develop@gmail.com"
] | jbc.develop@gmail.com |
095fea23bcf8497a6557e3979f106a29b0294879 | 53c4ec58760768fc9073793cf17cd8c55978c3af | /annotator/uniformer/mmcv/runner/utils.py | c5befb8e56ece50b5fecfd007b26f8a29124c0bd | [
"Apache-2.0"
] | permissive | HighCWu/ControlLoRA | 0b6cab829134ed8377f22800b0e1d648ddf573b0 | 3b8481950867f61b2cf072b1f156d84f3363ac20 | refs/heads/main | 2023-08-05T08:51:25.864774 | 2023-02-28T13:06:24 | 2023-02-28T13:06:24 | 603,359,062 | 421 | 20 | Apache-2.0 | 2023-08-02T02:14:40 | 2023-02-18T09:12:15 | Python | UTF-8 | Python | false | false | 2,936 | py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import random
import sys
import time
import warnings
from getpass import getuser
from socket import gethostname
import numpy as np
import torch
import annotator.uniformer.mmcv as mmcv
def get_host_info():
"""Get hostname and username.
Return empty string if exception raised, e.g. ``getpass.getuser()`` will
lead to error in docker container
"""
host = ''
try:
host = f'{getuser()}@{gethostname()}'
except Exception as e:
warnings.warn(f'Host or user not found: {str(e)}')
finally:
return host
def get_time_str():
return time.strftime('%Y%m%d_%H%M%S', time.localtime())
def obj_from_dict(info, parent=None, default_args=None):
"""Initialize an object from dict.
The dict must contain the key "type", which indicates the object type, it
can be either a string or type, such as "list" or ``list``. Remaining
fields are treated as the arguments for constructing the object.
Args:
info (dict): Object types and arguments.
parent (:class:`module`): Module which may containing expected object
classes.
default_args (dict, optional): Default arguments for initializing the
object.
Returns:
any type: Object built from the dict.
"""
assert isinstance(info, dict) and 'type' in info
assert isinstance(default_args, dict) or default_args is None
args = info.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
if parent is not None:
obj_type = getattr(parent, obj_type)
else:
obj_type = sys.modules[obj_type]
elif not isinstance(obj_type, type):
raise TypeError('type must be a str or valid type, but '
f'got {type(obj_type)}')
if default_args is not None:
for name, value in default_args.items():
args.setdefault(name, value)
return obj_type(**args)
def set_random_seed(seed, deterministic=False, use_rank_shift=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
rank_shift (bool): Whether to add rank number to the random seed to
have different random seed in different threads. Default: False.
"""
if use_rank_shift:
rank, _ = mmcv.runner.get_dist_info()
seed += rank
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
| [
"highcwu@163.com"
] | highcwu@163.com |
b0eab014e58477ee27d3b7011ca54aee830dd3b6 | a2777caa6247ab826c4902cc5a14bacfd3507215 | /eggify/migrations/0045_auto_20190608_0634.py | 436444884d5ad10093458da13388b39dd1036e00 | [] | no_license | ad-egg/eggify | 2c7aa96f1588aefa94236f3a39693c5b9a1f931d | a7d1cbc319ca52fc9e14f574cadebfdad5bad3e3 | refs/heads/master | 2022-05-05T22:54:45.942426 | 2020-10-21T20:03:32 | 2020-10-21T20:03:32 | 190,267,814 | 3 | 6 | null | 2022-04-22T21:25:19 | 2019-06-04T19:38:53 | Python | UTF-8 | Python | false | false | 968 | py | # Generated by Django 2.2.2 on 2019-06-08 06:34
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('eggify', '0044_merge_20190608_0634'),
]
operations = [
migrations.AlterField(
model_name='eggnt',
name='id',
field=models.CharField(default='ce821d7f-ad2d-4bb7-9399-d73887475548', editable=False, max_length=50, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='eggnt',
name='updated_at',
field=models.DateTimeField(default=datetime.datetime(2019, 6, 8, 6, 34, 39, 904850, tzinfo=utc), editable=False, verbose_name='updated at'),
),
migrations.AlterField(
model_name='eggnt',
name='words',
field=models.TextField(help_text='Enter text to be eggified!'),
),
]
| [
"539@holbertonschool.com"
] | 539@holbertonschool.com |
286984d3003df45e062240f9a4fd9db5bedd3497 | 89260668655a46278e8f22a5807b1f640fd1490c | /mySite/records/get_person.py | 6c0e2075d500dfb86e34d2cbb2b5d26de09a12f3 | [] | no_license | happychallenge/mySite | 926136859c5b49b7fd8baff09b26f375b425ab30 | ddbda42d5d3b9c380a594d81a27875b4ad10358b | refs/heads/master | 2020-12-30T13:28:31.018611 | 2017-11-08T02:52:18 | 2017-11-08T02:52:18 | 91,218,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | import re
import json
import requests
from bs4 import BeautifulSoup
def get_person_info(person):
search_url = 'http://people.search.naver.com/search.naver' # Search URL
params = { 'where':'nexearch' , 'query': person } # Search Parameters
html = requests.get(search_url, params=params).text # BeautifulSoup
pattern = re.compile('(?<=oAPIResponse :)(.*?)(?=, sAPIURL)', re.DOTALL)
matches = pattern.search(html)
if matches == None:
return None
data = matches[0]
result = json.loads(data) # Json Data Load from Javascript
listPerson = result['data']['result']['itemList'] # Get person info
result = {} # ๋ฆฌํด๊ฐ ์ด๊ธฐ
for index, item in enumerate(listPerson):
sub = {}
sub['id'] = item['object_id'] # ID Read
sub['name'] = item['m_name'] # name
sub['birth_year'] = item['birth_year'] # Birth Year
job = []
for jobs in item['job_info']:
job.append(jobs['job_name'])
sub['job'] = job
result[index] = sub
if index == 1:
break
return result
if __name__ == '__main__':
print(get_person_info('์ต๊ฒฝํ')) | [
"happychallenge@outlook.com"
] | happychallenge@outlook.com |
e89bf4569b09f51daffc997e16d3331e722abc6d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_truism.py | d1c7cd25dfb6033280337472ff04169465e52272 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py |
#calss header
class _TRUISM():
def __init__(self,):
self.name = "TRUISM"
self.definitions = [u'a statement that is so obviously true that it is almost not worth saying: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
7773438bf26b8b815417e140e5cc64fe0116bb7a | 5f0089466266e5a118b8257a49623b095f972737 | /leetcode/offerIsComing/ๅจๆ่งๅ/ๅๆ Offer 48. ๆ้ฟไธๅซ้ๅคๅญ็ฌฆ็ๅญๅญ็ฌฆไธฒ .py | 62b7b1a158f640bc6440bb1ed463dced37400b0d | [
"Apache-2.0"
] | permissive | wenhaoliang/leetcode | 983526484b67ee58656c8b0fd68483de82112a78 | b19233521c4e9a08cba6267eda743935e1fb06a6 | refs/heads/main | 2023-07-19T04:31:00.214433 | 2021-09-04T10:54:17 | 2021-09-04T10:54:17 | 401,963,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,092 | py | """
่ฏทไปๅญ็ฌฆไธฒไธญๆพๅบไธไธชๆ้ฟ็ไธๅ
ๅซ้ๅคๅญ็ฌฆ็ๅญๅญ็ฌฆไธฒ๏ผ่ฎก็ฎ่ฏฅๆ้ฟๅญๅญ็ฌฆไธฒ็้ฟๅบฆใ
็คบไพ1:
่พๅ
ฅ: "abcabcbb"
่พๅบ: 3
่งฃ้: ๅ ไธบๆ ้ๅคๅญ็ฌฆ็ๆ้ฟๅญไธฒๆฏ "abc"๏ผๆไปฅๅ
ถ้ฟๅบฆไธบ 3ใ
็คบไพ 2:
่พๅ
ฅ: "bbbbb"
่พๅบ: 1
่งฃ้: ๅ ไธบๆ ้ๅคๅญ็ฌฆ็ๆ้ฟๅญไธฒๆฏ "b"๏ผๆไปฅๅ
ถ้ฟๅบฆไธบ 1ใ
็คบไพ 3:
่พๅ
ฅ: "pwwkew"
่พๅบ: 3
่งฃ้: ๅ ไธบๆ ้ๅคๅญ็ฌฆ็ๆ้ฟๅญไธฒๆฏ"wke"๏ผๆไปฅๅ
ถ้ฟๅบฆไธบ 3ใ
่ฏทๆณจๆ๏ผไฝ ็็ญๆกๅฟ
้กปๆฏ ๅญไธฒ ็้ฟๅบฆ๏ผ"pwke"ๆฏไธไธชๅญๅบๅ๏ผไธๆฏๅญไธฒใ
้พๆฅ๏ผhttps://leetcode-cn.com/problems/zui-chang-bu-han-zhong-fu-zi-fu-de-zi-zi-fu-chuan-lcof
"""
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
"""
็ถๆๅฎไน๏ผdp[j] ่กจ็คบๅฐjไธบๆญขๆ้ฟ็ๅญๅญ็ฌฆไธฒ
่ฝฌ็งปๆน็จ๏ผๅบๅฎๆ่พน็j๏ผๅๅทฆๆฅๆพไธs[j]็ธ็ญ็s[i]๏ผ
1ใi < 0, ไธบๅๅทฆๆฅๆพไธๅฐไธs[j]็ธ็ญ็ๅญ็ฌฆ๏ผ ๆญคๆถd[j] = dp[j-1] + 1
2ใdp[j-1] < j - i, j-i่ฟ้่กจ็คบๆ่พน็ๅฐๅทฆ่พน็ไน้ด็ๅญ็ฌฆไธฒ้ฟๅบฆ๏ผ
่ฅๅฐไบๅ่กจ็คบๅทฆ่พน็้ฃไธช็ธ็ญ็ๅญ็ฌฆๅฏน็ฎๅๆ้ฟๅญไธฒ้ฟๅบฆๆ ๅฝฑๅ๏ผ
ๅๅฏๅฐๆ่พน็็่ฟไธชๅญ็ฌฆๅ ไธๅป๏ผๅณd[j] = dp[j-1] + 1
3ใdp[j-1] >= j - i, ๅณๅทฆ่พน็็้ฃไธชๅญ็ฌฆๅจ็ฎๅ็ๆ้ฟๅญๅญ็ฌฆ้้ข๏ผ
ๆไปฅ่ฆ้ๆฐ่ฎก็ฎๆ้ฟๅญๅญ็ฌฆไธฒ้ฟๅบฆ๏ผๅณdp[j] = j - i
ๅๅง็ถๆ: dp[0] = 0
่ฟๅๅผ: ่ฎพ็ฝฎไธไธชresๆฅ่ฎฐๅฝๆๅคงๅผ
"""
length = len(s)
dp = [0] * length
res = 0
for j in range(length):
i = j - 1
while i >= 0 and s[i] != s[j]:
i -= 1 # ็บฟๆงๆฅๆพ i
if dp[j - 1] < j - i:
dp[j] = dp[j - 1] + 1
else:
dp[j] = j - i
res = max(res, dp[j])
return res
class Solution1:
def lengthOfLongestSubstring(self, s: str) -> int:
"""
็ถๆๅฎไน: dp[j] ่กจ็คบๅฐ jไธบๆญขๆ้ฟ็ๆ ้ๅคๅญ็ฌฆๅญไธฒ ้ฟๅบฆ
่ฝฌ็งปๆน็จ๏ผ้ๅฏนdp[j-1] >? j-i ๆฅๅ็ฑป่ฎจ่ฎบ
s[i] ไธบ ้ๅๅพๅบ๏ผไธs[j]็ธๅ็ๅญ็ฌฆ๏ผ่ฅi<0ๅ่กจ็คบๆฒกๆๅญ็ฌฆไธs[j]ๅน้
j-i ่กจ็คบไธs[j]ๅน้
็ๅญ็ฌฆs[i]็้ฟๅบฆ
1ใ dp[j-1] >= j-i => dp[j] = j-i
่กจ็คบdp[j-1]็้ฟๅบฆ๏ผๆไธs[j]็ธๅน้
็[i]็้ฟๅบฆๅ
ๅซ่ฟๅปไบ๏ผๆไปฅ่ฆๆดๆฐ้ฟๅบฆไธบj-iๅผๅง่ฎกๆฐ
2ใdp[j-1] < j-i => dp[j] = dp[j-1] + 1
่กจ็คบdp[j-1]็้ฟๅบฆๅฐไบไธs[j]็ธๅน้
็[i]็้ฟๅบฆ๏ผ้ฃไนๅฐฑๆๅฝๅๅ
็ด ็้ฟๅบฆๅ ไธ
ๅๅง็ถๆ๏ผdp[0] = 0
่ฟๅ็ปๆ๏ผ ่ฎพ็ฝฎไธไธชresๆฅ่ฎฐๅฝๆๅคงๅผ
"""
n = len(s)
dp = [0] * n
res = 0
for j in range(n):
i = j - 1
while i >= 0 and s[i] != s[j]:
i -= 1
if dp[j-1] < j - i:
dp[j] = dp[j - 1] + 1
else:
dp[j] = j - i
res = max(res, dp[j])
return res
if __name__ == "__main__":
A = Solution()
n = "abcabcbb"
print(A.lengthOfLongestSubstring(n))
A = Solution1()
n = "abcabcbb"
print(A.lengthOfLongestSubstring(n))
"""
ไธพไธชไพๅญ๏ผๆฏๅฆโabcdbaaโ๏ผ็ดขๅผไป0ๅผๅงใ
ๆไปฌๅฎนๆๅพๅฐ๏ผๅฝ j = 4ๆถ๏ผไปฅs[4]็ปๅฐพๅญ็ฌฆไธฒsub[4] = โcdbโ็ ้ฟๅบฆdp[4] =3ใ
ๆฅไธๆฅๆไปฌ็ j +1็ๆ
ๅตใๆ นๆฎๅฎไน๏ผsub[4]ๅญ็ฌฆไธฒไธญ็ๅญ็ฌฆ่ฏๅฎไธ้ๅค๏ผ
ๆไปฅๅฝ j = 5ๆถ๏ผ่ฟๆถ่ท็ฆปๅญ็ฌฆs[5]็ๅทฆไพง็้ๅคๅญ็ฌฆa็็ดขๅผ i = 0๏ผ
ไนๅฐฑๆฏ่ฏดs[ 0 ]ๅจๅญๅญ็ฌฆไธฒsub[ 4 ]ไนๅคไบ๏ผ
ไปฅs[5]็ปๅฐพ็ๅญ็ฌฆไธฒ่ช็ถๅจsub[4]็ๅบ็กไธๅ ไธๅญ็ฌฆs[5]ๅฐฑๆๆไบๆฐ็ๆ้ฟ็ไธ้ๅค็ๅญไธฒsub[5]๏ผ
้ฟๅบฆdp[5] = dp[4] + 1; ๆฅไธๆฅๆไปฌ็ปง็ปญ็ j =6็ๆ
ๅต๏ผ
่ฟๆถs[6]็ๅทฆไพง้ๅคๅญ็ฌฆa็็ดขๅผ i = 5๏ผ่ฏฅ้ๅคๅญ็ฌฆๅจsub[ 5 ]ไธญใ
ๆฐ็ๆ้ฟไธ้ๅค็ๅญไธฒsub[6]ๅทฆ่พน็ไปฅ i ็ปๅฐพ๏ผ้ฟๅบฆdp[6] = j - i = 1ใ
"""
| [
"641541452@qq.com"
] | 641541452@qq.com |
e344a5b4330e8d47e8503070ac4ef83d4fca5268 | 7246faf9a222269ce2612613f58dc5ff19091f10 | /baekjoon/1000~2999/1371_๊ฐ์ฅ ๋ง์ ๊ธ์.py | 0254885023ff1b56daf37d238d999f47f0496e46 | [] | no_license | gusdn3477/Algorithm_Study | 87a2eb72a8488d9263a86db70dadc7944434d41d | 3fefe1dcb40122157845ffc542f41cb097711cc8 | refs/heads/main | 2023-08-30T12:18:21.412945 | 2021-09-28T13:00:11 | 2021-09-28T13:00:11 | 308,364,230 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | dic = {}
while True:
try:
a = input()
for i in a:
if i == ' ':
continue
if i not in dic:
dic[i] = 1
else:
dic[i] += 1
except:
break
ar = list(dic.items())
ar = sorted(ar, key=lambda x: (-x[1], x[0]))
M = ar[0][1]
for i in range(len(ar)):
if ar[i][1] == M:
print(ar[i][0], end='') | [
"gusdn3477@naver.com"
] | gusdn3477@naver.com |
6ad57d1eb19e11eb751d93b7746d619a3a66884e | 88b4b883c1a262b5f9ca2c97bf1835d6d73d9f0b | /src/api/python/hce/dc_db/FieldRecalculator.py | 17b54851ece91f315cf2d8b5be50cf8186b57a42 | [] | no_license | hce-project/hce-bundle | 2f93dc219d717b9983c4bb534884e4a4b95e9b7b | 856a6df2acccd67d7af640ed09f05b2c99895f2e | refs/heads/master | 2021-09-07T22:55:20.964266 | 2018-03-02T12:00:42 | 2018-03-02T12:00:42 | 104,993,955 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,498 | py | '''
@package: dc
@author scorp
@link: http://hierarchical-cluster-engine.com/
@copyright: Copyright © 2013-2014 IOIX Ukraine
@license: http://hierarchical-cluster-engine.com/license/
@since: 0.1
'''
import dc_db.Constants as Constants
import dc_db.FieldRecalculatorDefaultCriterions as DefCriterions
import dc.EventObjects
import app.Utils as Utils # pylint: disable=F0401
logger = Utils.MPLogger().getLogger()
# #FieldRecalculator class makes come common processing of databse fields recalculation (using in Task classes)
class FieldRecalculator(object):
def __init__(self):
pass
# #commonSiteRecalculate - common recalculate method
#
# @param queryCallback function for queries execution
# @param additionCause additional SQL cause
# @param fieldName - processing field name (of `sites` tables)
# @param siteId - site id
def commonSiteRecalculate(self, defaultCritName, fieldName, siteId, queryCallback):
UPDATE_SQL_TEMPLATE = "UPDATE `sites` SET `%s`=(SELECT COUNT(*) FROM dc_urls.%s %s) WHERE `id` = '%s'"
tableName = Constants.DC_URLS_TABLE_NAME_TEMPLATE % siteId
criterionsString = DefCriterions.getDefaultCriterions(defaultCritName, siteId, queryCallback)
query = UPDATE_SQL_TEMPLATE % (fieldName, tableName, criterionsString, siteId)
queryCallback(query, Constants.PRIMARY_DB_ID)
# #siteResourcesRecalculate - recalculate sites.Resources field
#
def siteResourcesRecalculate(self, siteId, queryCallback):
# self.commonSiteRecalculate(queryCallback, "State>3 AND Crawled>0", "Resources", siteId)
# self.commonSiteRecalculate("Crawled>0 AND Size>0", "Resources", siteId, queryCallback)
self.commonSiteRecalculate(DefCriterions.CRIT_RESOURCES, "Resources", siteId, queryCallback)
# #siteContentsRecalculate - recalculate sites.Contents field
#
def siteContentsRecalculate(self, siteId, queryCallback):
# self.commonSiteRecalculate(queryCallback, "State=7 AND Crawled>0 AND Processed>0", "Contents", siteId)
self.commonSiteRecalculate(DefCriterions.CRIT_CONTENTS, "Contents", siteId, queryCallback)
# updateCollectedURLs updates sites.CollectedURLs field
#
# @param siteId - siteId
# @param queryCallback - callback sql function
def updateCollectedURLs(self, siteId, queryCallback):
QUERY_TEMPLATE = "UPDATE `sites` SET `CollectedURLs`=(SELECT count(*) FROM dc_urls.%s %s) WHERE `Id`='%s'"
tableName = Constants.DC_URLS_TABLE_NAME_TEMPLATE % siteId
criterionsString = DefCriterions.getDefaultCriterions(DefCriterions.CRIT_CLURLS, siteId, queryCallback)
query = QUERY_TEMPLATE % (tableName, criterionsString, siteId)
queryCallback(query, Constants.PRIMARY_DB_ID)
# updateNewURLs updates sites.newURLs field
#
# @param siteId - siteId
# @param queryCallback - callback sql function
def updateNewURLs(self, siteId, queryCallback):
QUERY_TEMPLATE = "UPDATE `sites` SET `NewURLs`=(SELECT count(*) FROM dc_urls.%s %s) WHERE `Id`='%s'"
tableName = Constants.DC_URLS_TABLE_NAME_TEMPLATE % siteId
criterionsString = DefCriterions.getDefaultCriterions(DefCriterions.CRIT_NURLS, siteId, queryCallback)
query = QUERY_TEMPLATE % (tableName, criterionsString, siteId)
queryCallback(query, Constants.PRIMARY_DB_ID)
# updateErrors updates sites.Errors field
#
# @param siteId - siteId
# @param queryCallback - callback sql function
def updateErrors(self, siteId, queryCallback):
QUERY_TEMPLATE = "UPDATE `sites` SET `Errors`=(SELECT count(*) FROM dc_urls.%s %s) WHERE `Id`='%s'"
tableName = Constants.DC_URLS_TABLE_NAME_TEMPLATE % siteId
criterionsString = DefCriterions.getDefaultCriterions(DefCriterions.CRIT_ERRORS, siteId, queryCallback)
query = QUERY_TEMPLATE % (tableName, criterionsString, siteId)
queryCallback(query, Constants.PRIMARY_DB_ID)
# updateDeletedURLs updates sites.deletedURLs field
#
# @param siteId - siteId
# @param queryCallback - callback sql function
def updateDeletedURLs(self, siteId, queryCallback):
QUERY_TEMPLATE_SELECT = "SELECT count(*) FROM %s %s"
tableName = Constants.DC_URLS_TABLE_NAME_TEMPLATE % siteId
criterionsString = DefCriterions.getDefaultCriterions(DefCriterions.CRIT_DURLS, siteId, queryCallback)
query = QUERY_TEMPLATE_SELECT % (tableName, criterionsString)
res = queryCallback(query, Constants.FOURTH_DB_ID, Constants.EXEC_INDEX, True)
if res is not None and len(res) > 0 and len(res[0]) > 0:
count = res[0][0]
QUERY_TEMPLATE_UPDATE = "UPDATE `sites` SET `DeletedURLs`=%s WHERE `Id`='%s'"
query = QUERY_TEMPLATE_UPDATE % (str(count), siteId)
queryCallback(query, Constants.PRIMARY_DB_ID)
# commonRecalc method makes all recalculations
#
# @param siteId - siteId
# @param queryCallback - callback sql function
# @param recalcType - full or partial recalculating
def commonRecalc(self, siteId, queryCallback, recalcType=dc.EventObjects.FieldRecalculatorObj.FULL_RECALC):
self.siteResourcesRecalculate(siteId, queryCallback)
self.siteContentsRecalculate(siteId, queryCallback)
if recalcType == dc.EventObjects.FieldRecalculatorObj.FULL_RECALC:
self.updateCollectedURLs(siteId, queryCallback)
self.updateNewURLs(siteId, queryCallback)
self.updateDeletedURLs(siteId, queryCallback)
self.updateSiteCleanupFields(siteId, queryCallback)
# updateSiteCleanupFields recalculates some site's fields in SiteCleanUpTask operation
#
# @param siteId - siteId
# @param queryCallback - callback sql function
def updateSiteCleanupFields(self, siteId, queryCallback):
QUERY_TEMPLATE = "UPDATE `sites` SET `Size`=%s, `Errors`=%s, `ErrorMask`=%s, `AVGSpeed`=%s WHERE `Id`='%s'"
tableName = Constants.DC_URLS_TABLE_NAME_TEMPLATE % siteId
localSize = "`Size`"
localErrors = "`Errors`"
localErrorMask = "`ErrorMask`"
localSpeed = "`AVGSpeed`"
TMP_QUERY_TEMPLATE = "SELECT SUM(`Size`) FROM %s WHERE " + DefCriterions.CRIT_CRAWLED_THIS_NODE
query = TMP_QUERY_TEMPLATE % tableName
res = queryCallback(query, Constants.SECONDARY_DB_ID)
if res is not None and len(res) > 0 and res[0] is not None and len(res[0]) > 0 and res[0][0] is not None:
localSize = str(res[0][0])
TMP_QUERY_TEMPLATE = "SELECT COUNT(*) FROM %s WHERE `errorMask` > 0 AND " + DefCriterions.CRIT_CRAWLED_THIS_NODE
query = TMP_QUERY_TEMPLATE % tableName
res = queryCallback(query, Constants.SECONDARY_DB_ID)
if res is not None and len(res) > 0 and res[0] is not None and len(res[0]) > 0 and res[0][0] is not None:
localErrors = str(res[0][0])
TMP_QUERY_TEMPLATE = "SELECT BIT_OR(`errorMask`) FROM %s WHERE " + DefCriterions.CRIT_CRAWLED_THIS_NODE
query = TMP_QUERY_TEMPLATE % tableName
res = queryCallback(query, Constants.SECONDARY_DB_ID)
if res is not None and len(res) > 0 and res[0] is not None and len(res[0]) > 0 and res[0][0] is not None:
localErrorMask = str(res[0][0])
TMP_QUERY_TEMPLATE = "SELECT AVG(`size`/`crawlingTime`*1000) FROM %s WHERE `crawlingTime` > 0 AND " + \
DefCriterions.CRIT_CRAWLED_THIS_NODE
query = TMP_QUERY_TEMPLATE % tableName
res = queryCallback(query, Constants.SECONDARY_DB_ID)
if res is not None and len(res) > 0 and res[0] is not None and len(res[0]) > 0 and res[0][0] is not None:
localSpeed = str(res[0][0])
query = QUERY_TEMPLATE % (localSize, localErrors, localErrorMask, localSpeed, siteId)
queryCallback(query, Constants.PRIMARY_DB_ID)
| [
"bgv@bgv-d9"
] | bgv@bgv-d9 |
d4c7f258b88d04c56e29c54bb385cb510d252186 | 7f6b06334e6556ac91a19d410149129217070d5e | /cuda/cython/naive_add/setup.py | 22ef269ec790486bdb8db5710a18aeb9b8ce4ced | [] | no_license | saullocastro/programming | a402e5b7c34c80f0ce22e8a29ce7975b263f19c3 | 499938a566348649218dc3c0ec594a4babe4f1a4 | refs/heads/master | 2021-01-20T10:42:50.840178 | 2020-08-26T07:56:35 | 2020-08-26T07:56:35 | 21,904,820 | 11 | 2 | null | 2020-08-26T07:56:36 | 2014-07-16T14:47:32 | Python | UTF-8 | Python | false | false | 625 | py | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [Extension('my_cuda',
sources=['my_cuda.pyx'],
libraries=['cuda_blockadd', 'cuda_threadadd',
'cuda_btadd', 'cuda_longadd'],
language='c',
extra_compile_args=['/openmp',
'/O2', '/favor:INTEL64', '/fp:fast'],
extra_link_args=[],
)]
setup(name = 'my_cuda',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules)
| [
"saullogiovani@gmail.com"
] | saullogiovani@gmail.com |
0ca3723e3a18f9b95623bfa22b6bc0ea65396537 | deafd775f238b2836f77b9140f4d6e14a3f3c06d | /python/ABC/ABC097/ABC097_B.py | 33adf33d1f0b2d4a6326a31c043fcede83728e3d | [] | no_license | knakajima3027/Atcoder | ab8e2bf912173b7523fddbb11b38abd7e296762e | 64cb32fcc4b99501f2f84496e5535e1e7b14c467 | refs/heads/master | 2021-06-22T03:58:03.777001 | 2020-12-19T11:23:49 | 2020-12-19T11:23:49 | 135,173,223 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | X = int(input())
max1 = 1
max2 = 0
for b in range(1, X+1):
for q in range(2, X+1):
if b**q > X:
break
max1 = b**q
if max1 > max2:
max2 = max1
print(max2)
| [
"kamihate1012@gmail.com"
] | kamihate1012@gmail.com |
96c79e41e1b372df9a1a4032bb02e97a2ae2b108 | 1959b8a6fb4bd61a6f87cd3affe39f2e3bdbf962 | /{{cookiecutter.repo_name}}/data_prep/wham/scripts/constants.py | cca06053bada8df02ed0092e6deb6aa1bafb6822 | [
"CC-BY-NC-4.0",
"MIT"
] | permissive | pseeth/cookiecutter-nussl | a85d61d00b2352d105a8f68aa66aab6ae670894b | fab73e7b1f3d393409360d31662600d6fe434c37 | refs/heads/master | 2023-01-05T00:35:17.528898 | 2020-02-27T17:21:51 | 2020-02-27T17:21:51 | 222,519,043 | 6 | 1 | MIT | 2022-12-26T21:00:36 | 2019-11-18T18:42:37 | Jupyter Notebook | UTF-8 | Python | false | false | 179 | py | NUM_BANDS = 4
SNR_THRESH = -6.
PRE_NOISE_SECONDS = 2.0
SAMPLERATE = 16000
MAX_SAMPLE_AMP = 0.95
MIN_SNR_DB = -3.
MAX_SNR_DB = 6.
PRE_NOISE_SAMPLES = PRE_NOISE_SECONDS * SAMPLERATE | [
"prem@u.northwestern.edu"
] | prem@u.northwestern.edu |
7616992a58654839cafd96f3f3cecb93b25ed1eb | 82fce9aae9e855a73f4e92d750e6a8df2ef877a5 | /Lab/venv/lib/python3.8/site-packages/OpenGL/raw/EGL/NV/coverage_sample.py | 0bfd3b6a9c0e69a1160f97d03cc07bb73773a2a0 | [] | no_license | BartoszRudnik/GK | 1294f7708902e867dacd7da591b9f2e741bfe9e5 | 6dc09184a3af07143b9729e42a6f62f13da50128 | refs/heads/main | 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.constant import Constant as _C
# Code generation uses this
# End users want this...
from OpenGL.raw.EGL import _errors
_EXTENSION_NAME = 'EGL_NV_coverage_sample'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.EGL, 'EGL_NV_coverage_sample', error_checker=_errors._error_checker)
EGL_COVERAGE_BUFFERS_NV = _C('EGL_COVERAGE_BUFFERS_NV', 0x30E0)
EGL_COVERAGE_SAMPLES_NV = _C('EGL_COVERAGE_SAMPLES_NV', 0x30E1)
| [
"rudnik49@gmail.com"
] | rudnik49@gmail.com |
68a134c008ed3034618e4f6e7ed24250bea0cd2b | 7d85c42e99e8009f63eade5aa54979abbbe4c350 | /game/lib/coginvasion/suit/DroppableCollectableJellybeans.py | 39de5dba276a764c3073b5510817ddd5fdc69b91 | [] | no_license | ToontownServerArchive/Cog-Invasion-Online-Alpha | 19c0454da87e47f864c0a5cb8c6835bca6923f0e | 40498d115ed716f1dec12cf40144015c806cc21f | refs/heads/master | 2023-03-25T08:49:40.878384 | 2016-07-05T07:09:36 | 2016-07-05T07:09:36 | 348,172,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | """
Filename: DroppableCollectableJellybeans.py
Created by: blach (22Mar15)
"""
from direct.directnotify.DirectNotifyGlobal import directNotify
from DroppableCollectableObject import DroppableCollectableObject
class DroppableCollectableJellybeans(DroppableCollectableObject):
notify = directNotify.newCategory("DroppableCollectableJellybeans")
def __init__(self):
DroppableCollectableObject.__init__(self)
def unload(self):
self.collectSfx = None
DroppableCollectableObject.unload(self)
| [
"brianlach72@gmail.com"
] | brianlach72@gmail.com |
edae3b83d112182e5fd0828eeaf173b4a0a018e3 | 35b3e64d760b7091859a40f17c24c00681f116fa | /pythonไธค้ขๅไธ้ข/kerasๅท็งฏ็ฅ็ป็ฝ็ป2.py | 34c1b01a6fd1a853dca5236f12701dc8676872b0 | [] | no_license | weiyinfu/TwoEatOne | 8b9d27a28ba8469fc96fb9790cec04b30a19e21f | d87dfbbab550fc8bd0da3fec076034494da23bdc | refs/heads/master | 2022-12-27T18:44:58.028285 | 2020-10-13T14:18:49 | 2020-10-13T14:18:49 | 79,469,916 | 1 | 0 | null | 2020-10-13T14:18:50 | 2017-01-19T16:04:02 | Python | UTF-8 | Python | false | false | 1,080 | py | import keras
from keras.layers import *
x_input = Input((4, 4, 1))
x_flat = Flatten()(x_input)
"""
็ป่ฎกๆฏไธชๆฃๅญ็ๆดปๅจ่ฝๅ,ไฝฟ็จ2*2ๅท็งฏๆ ธ
ๅ ไธบ้่ฆ็ป่ฎกๆๅผ,ๆไปฅๅคๆฅๅ ๅฑ
"""
free_space = Conv2D(2, (2, 2), padding='SAME', activation='sigmoid')(x_input)
free_space = Conv2D(2, (2, 2), padding='VALID')(free_space)
"""
ๅๅญ่ง,2*3ๅ3*2็ๅท็งฏๆ ธ
"""
eat1 = Conv2D(2, (2, 3), padding='VALID', activation='sigmoid')(x_input)
eat2 = Conv2D(2, (3, 2), padding='VALID', activation='sigmoid')(x_input)
m = Concatenate()([Flatten()(i) for i in (eat1, eat2, free_space)] + [x_flat])
"""
ๆๅresnet
"""
m = Dense(300, activation='relu')(Concatenate()([m, x_flat]))
m = Dense(16, activation='relu')(Concatenate()([m, x_flat]))
logits = Dense(3, activation='softmax')(m)
m = keras.models.Model(inputs=x_input, outputs=logits)
m.compile(keras.optimizers.RMSprop(0.01), "categorical_crossentropy", metrics=['accuracy'])
import data
x, y = data.get_data(True, True)
x = x.reshape(-1, 4, 4, 1)
batch_size = 120
m.fit(x, y, batch_size=batch_size, epochs=1000)
| [
"wei.yinfu@qq.com"
] | wei.yinfu@qq.com |
4b208ef5abfe9ce4582acf2547252620f895832c | 675cdd4d9d2d5b6f8e1383d1e60c9f758322981f | /pipeline/0x00-pandas/14-visualize.py | 3a39452384e56b070eccd726adcc09dee9cb1d2a | [] | no_license | AndresSern/holbertonschool-machine_learning-1 | 5c4a8db28438d818b6b37725ff95681c4757fd9f | 7dafc37d306fcf2ea0f5af5bd97dfd78d388100c | refs/heads/main | 2023-07-11T04:47:01.565852 | 2021-08-03T04:22:38 | 2021-08-03T04:22:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,356 | py | #!/usr/bin/env python3
from datetime import date
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from_file = __import__('2-from_file').from_file
df = from_file('coinbaseUSD_1-min_data_2014-12-01_to_2019-01-09.csv', ',')
df = df.drop('Weighted_Price', axis=1)
df = df.rename(columns={'Timestamp': 'Date'})
df['Date'] = pd.to_datetime(df['Date'], unit='s')
df = df[df['Date'] >= '2017-01-01']
df = df.set_index('Date')
df['Close'].fillna(method='ffill', inplace=True)
df["High"].fillna(method="ffill", inplace=True)
df["Low"].fillna(method="ffill", inplace=True)
df["Open"].fillna(method="ffill", inplace=True)
df.fillna(method='ffill')
df['Volume_(BTC)'].fillna(value=0, inplace=True)
df['Volume_(Currency)'].fillna(value=0, inplace=True)
high = df['High'].groupby(pd.Grouper(freq='D')).max()
low = df['Low'].groupby(pd.Grouper(freq='D')).min()
open = df['Open'].groupby(pd.Grouper(freq='D')).mean()
close = df['Close'].groupby(pd.Grouper(freq='D')).mean()
volume_btc = df['Volume_(BTC)'].groupby(pd.Grouper(freq='D')).sum()
volume_currency = df['Volume_(Currency)'].groupby(pd.Grouper(freq='D')).sum()
plt.plot(open)
plt.plot(high)
plt.plot(low)
plt.plot(close)
plt.plot(volume_btc)
plt.plot(volume_currency)
plt.legend()
plt.xlabel('Date')
# plt.ylim([0, 3000000])
# plt.yticks(np.arange(0, 3000000, 500000))
plt.show()
| [
"bouzouitina.hamdi@gmail.com"
] | bouzouitina.hamdi@gmail.com |
f24bb3459c66b3776bb45d37e75802752e515c55 | 17f527d6936397270183a35d7097e0a99de16cb5 | /utokyo_ist_pastexam/2011_w/6.py | 4829c80cda541037aa901a1d74f2f2bf80003ab2 | [] | no_license | ryosuke071111/algorithms | e942f043d08c7c7e2c926ed332ee2b8c44bdf0c5 | 867764450cc0f2a709fa2f743d9a0d95001e9296 | refs/heads/master | 2020-05-14T17:14:39.314064 | 2019-04-17T12:58:12 | 2019-04-17T12:58:12 | 181,888,623 | 11 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | from uti import *
f1=open('s3.txt')
f1=f1.read().strip('\n')
ls=[]
while len(f1)>0:
ls.append(f1[:1000])
f1=f1[1000:]
# #ๅง็ธฎ
strings=""
dics=[]
for i in ls:
string,dic=compression(i)
strings+=string
# dics.append(dic)
print(strings)
#ๅฑ้
print()
ans=decompression(string)
print(ans)
| [
"ryosuke0711993@gmail.com"
] | ryosuke0711993@gmail.com |
6e31a4079ade55066b95c9d5ff511edb6da190a9 | 3e9766f25777f7695247a45dd730ae60fd0a6c73 | /main.py | 168d4d1412a91d65894e4f0180c9cb890d2b537f | [] | no_license | ivanmarkov97/RaseTransportManager | 69de4599e3195b1945c8595a99a687e235e0891c | d8275d75e94efdbba76a63e557d33eff49b7ce4e | refs/heads/master | 2020-03-20T02:44:11.515512 | 2018-06-12T20:17:11 | 2018-06-12T20:17:11 | 137,122,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,575 | py | import tkinter as tk
from service import *
from DBhelper import DBhelper
def click_event():
global my_rases
global all_rases
global yscroll
global db_helper
print("click")
db_helper = DBhelper()
prev_all_rases = db_helper.get("rase")
print(time_away.get())
print(city_from.get(city_from.curselection()))
print(city_to.get(city_to.curselection()))
print(company.get())
print(plane.get())
t_away = time_away.get()
c_from = city_from.get(city_from.curselection())
c_to = city_to.get(city_to.curselection())
com = company.get()
pl = plane.get()
main(t_away, c_from, c_to, com, pl)
all_rases = tk.Listbox(root, width=90, height=10)
all_rases.grid(row=6, column=1)
yscroll = tk.Scrollbar(command=all_rases.yview, orient=tk.VERTICAL)
yscroll.grid(row=6, column=2, sticky='ns')
all_rases.configure(yscrollcommand=yscroll.set)
db_helper = DBhelper()
my_rases = db_helper.get("rase")
print(my_rases)
i = 0
while i < len(my_rases) - 1:
item_rase = []
for it in my_rases[i]:
item_rase.append(copy.deepcopy(str(it)))
if prev_all_rases[i][1] != my_rases[i][1]:
all_rases.insert(tk.END, str(item_rase))
all_rases.itemconfig(i, {'bg': 'yellow'})
else:
all_rases.insert(tk.END, str(item_rase))
i += 1
item_rase = []
for it in my_rases[i]:
item_rase.append(copy.deepcopy(str(it)))
all_rases.insert(tk.END, str(item_rase))
all_rases.itemconfig(i, {'bg': 'light green'})
lines = len(my_rases)
all_rases.yview_scroll(lines, 'units')
root = tk.Tk()
tk.Label(root, text="ะัะตะผั ะฒัะปะตัะฐ", relief=tk.RIDGE, anchor='s', width=12).grid(row=0)
tk.Label(root, text="ะขะพัะบะฐ ะฒัะปะตัะฐ", relief=tk.RIDGE, anchor='s', width=12).grid(row=1)
tk.Label(root, text="ะขะพัะบะฐ ะฟัะธะปะตัะฐ", relief=tk.RIDGE, anchor='s', width=12).grid(row=2)
tk.Label(root, text="ะะพะผะฟะฐะฝะธั", relief=tk.RIDGE, anchor='s', width=12).grid(row=3)
tk.Label(root, text="ะกะฐะผะพะปะตั", relief=tk.RIDGE, anchor='s', width=12).grid(row=4)
tk.Label(root, text="ะ ะตะนัั", relief=tk.RIDGE, anchor='s', width=12).grid(row=6)
time_away = tk.Entry(root, width=50)
time_away.insert(0, '2018-05-05 12:30:00')
time_away.grid(row=0, column=1)
city_from = tk.Listbox(root, width=50, height=2, exportselection=0)
city_from.grid(row=1, column=1)
company = tk.Entry(root, width=50)
company.grid(row=3, column=1)
plane = tk.Entry(root, width=50)
plane.grid(row=4, column=1)
b1 = tk.Button(root, text='ะะพะฑะฐะฒะธัั', command=click_event)
b1.grid(row=5, column=1)
for item in ["St.Petersberg, Pulkovo", "Moscow, Sheremetyevo"]:
city_from.insert(tk.END, item)
city_to = tk.Listbox(root, width=50, height=2, exportselection=0)
city_to.grid(row=2, column=1)
for item in ["St.Petersberg, Pulkovo", "Moscow, Sheremetyevo"]:
city_to.insert(tk.END, item)
all_rases = tk.Listbox(root, width=90, height=10)
all_rases.grid(row=6, column=1)
yscroll = tk.Scrollbar(command=all_rases.yview, orient=tk.VERTICAL)
yscroll.grid(row=6, column=2, sticky='ns')
all_rases.configure(yscrollcommand=yscroll.set)
db_helper = DBhelper()
my_rases = db_helper.get("rase")
print(my_rases)
for rase in my_rases:
item_rase = []
for it in rase:
item_rase.append(copy.deepcopy(str(it)))
all_rases.insert(tk.END, str(item_rase))
lines = len(my_rases)
all_rases.yview_scroll(lines, 'units')
root.geometry("650x350")
root.resizable(width=False, height=False)
root.mainloop()
| [
"ivanmarkov1997@gmail.com"
] | ivanmarkov1997@gmail.com |
f00209d2c8adb18f187d9a07b18295d08eb5a998 | 84b0c9adeeba03e8dadf20346aae61e5a343d8c6 | /tf_agents/bandits/networks/global_and_arm_feature_network_test.py | f4c51f215542a38f20c67391e6012069a7d7b8ba | [
"Apache-2.0"
] | permissive | trinity77/agents | fa8e0a31898f4cc5178d8108c86ede95d2f36aa3 | baf18f275294e902f462404d21168ca4697e2f6f | refs/heads/master | 2022-10-19T05:14:01.172058 | 2020-06-03T19:50:57 | 2020-06-03T19:50:57 | 269,180,393 | 0 | 0 | Apache-2.0 | 2020-06-03T19:50:58 | 2020-06-03T19:50:02 | null | UTF-8 | Python | false | false | 5,364 | py | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.bandits.networks.global_and_arm_feature_network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.bandits.networks import global_and_arm_feature_network as gafn
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.specs import tensor_spec
from tf_agents.utils import test_utils
parameters = parameterized.named_parameters(
{
'testcase_name': 'batch2feat4act3',
'batch_size': 2,
'feature_dim': 4,
'num_actions': 3
}, {
'testcase_name': 'batch1feat7act9',
'batch_size': 1,
'feature_dim': 7,
'num_actions': 9
})
class GlobalAndArmFeatureNetworkTest(parameterized.TestCase,
test_utils.TestCase):
@parameters
def testCreateFeedForwardCommonTowerNetwork(self, batch_size, feature_dim,
num_actions):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, feature_dim, num_actions)
net = gafn.create_feed_forward_common_tower_network(obs_spec, (4, 3, 2),
(6, 5, 4), (7, 6, 5))
input_nest = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=(batch_size,))
output, _ = net(input_nest)
self.evaluate(tf.compat.v1.global_variables_initializer())
output = self.evaluate(output)
self.assertAllEqual(output.shape, (batch_size, num_actions))
@parameters
def testCreateFeedForwardDotProductNetwork(self, batch_size, feature_dim,
num_actions):
obs_spec = bandit_spec_utils.create_per_arm_observation_spec(
7, feature_dim, num_actions)
net = gafn.create_feed_forward_dot_product_network(obs_spec, (4, 3, 4),
(6, 5, 4))
input_nest = tensor_spec.sample_spec_nest(
obs_spec, outer_dims=(batch_size,))
output, _ = net(input_nest)
self.evaluate(tf.compat.v1.global_variables_initializer())
output = self.evaluate(output)
self.assertAllEqual(output.shape, (batch_size, num_actions))
def testCreateFeedForwardCommonTowerNetworkWithFeatureColumns(
self, batch_size=2, feature_dim=4, num_actions=3):
obs_spec = {
'global': {
'dense':
tensor_spec.TensorSpec(shape=(feature_dim,), dtype=tf.float32),
'composer':
tensor_spec.TensorSpec((), tf.string)
},
'per_arm': {
'name': tensor_spec.TensorSpec((num_actions,), tf.string),
'fruit': tensor_spec.TensorSpec((num_actions,), tf.string)
}
}
columns_dense = tf.feature_column.numeric_column(
'dense', shape=(feature_dim,))
columns_composer = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'composer', ['wolfgang', 'amadeus', 'mozart']))
columns_name = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'name', ['bob', 'george', 'wanda']))
columns_fruit = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'fruit', ['banana', 'kiwi', 'pear']))
net = gafn.create_feed_forward_common_tower_network(
observation_spec=obs_spec,
global_layers=(4, 3, 2),
arm_layers=(6, 5, 4),
common_layers=(7, 6, 5),
global_preprocessing_combiner=tf.compat.v2.keras.layers.DenseFeatures(
[columns_dense, columns_composer]),
arm_preprocessing_combiner=tf.compat.v2.keras.layers.DenseFeatures(
[columns_name, columns_fruit]))
input_nest = {
'global': {
'dense': tf.constant(np.random.rand(batch_size, feature_dim)),
'composer': tf.constant(['wolfgang', 'mozart'])
},
'per_arm': {
'name':
tf.constant([[['george'], ['george'], ['george']],
[['bob'], ['bob'], ['bob']]]),
'fruit':
tf.constant([[['banana'], ['banana'], ['banana']],
[['kiwi'], ['kiwi'], ['kiwi']]])
}
}
output, _ = net(input_nest)
self.evaluate([
tf.compat.v1.global_variables_initializer(),
tf.compat.v1.tables_initializer()
])
output = self.evaluate(output)
self.assertAllEqual(output.shape, (batch_size, num_actions))
if __name__ == '__main__':
tf.test.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
f0016ddc6d958b94d872ad1f44a38ef76afa0387 | c7511b81624a556978550341fa7a885022ab45e9 | /tree/balanced-BT.py | 546737be9291e23df2da610d6df391f4738da25a | [] | no_license | sandipan898/ds-algo-python-set-2 | 372de5e01aeda1edf7289cd784195480ca0a3696 | 53859d71d980dc08de8bd51acc049537082df0c9 | refs/heads/main | 2023-07-06T11:26:34.819144 | 2021-08-09T19:26:16 | 2021-08-09T19:26:16 | 378,245,185 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def find_height(root):
if root is None:
return 0
return max(find_height(root.left), find_height(root.right)) + 1
def is_balanced_tree(root):
if root is None:
return True
left_height = find_height(root.left)
right_height = find_height(root.right)
print("Node {} have Balance Factor of {}".format(root.data, left_height - right_height))
if abs(left_height - right_height) > 1:
return False
return (is_balanced_tree(root.left) and is_balanced_tree(root.right))
if __name__ == '__main__':
root = Node(2)
root.left = Node(3)
root.right = Node(4)
root.left.left = Node(5)
root.left.right = Node(6)
root.right.left = Node(7)
root.right.left.left = Node(8)
root.right.left.right = Node(9)
'''
2
/ \
3 4
/ \ /
5 6 7
/ \
8 9
'''
if is_balanced_tree(root):
print("This is a Balanced Binary Tree!")
else:
print("This is Not a Balanced Binary Tree!")
| [
"sandipan.das898@gmail.com"
] | sandipan.das898@gmail.com |
c818fe7898cfed2b1e0843d46953deed4b626626 | 68002ae9f124d808395b51944b616da298068b62 | /Data_Visualization/Plot/Matplotlib/FT748/Ch12/Ch12_4_1.py | 8d7d43c678747a8b7b8ba1d85dcf9604d5aeb10e | [] | no_license | supergravity/PythonLearning- | b4dd51205470f27e1ba92ec19b1fa1c967101545 | eb53b048650a7272819b45943d3dd40fa91d2297 | refs/heads/master | 2023-01-04T23:15:59.945385 | 2020-11-06T13:35:15 | 2020-11-06T13:35:15 | 82,797,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | import pandas as pd
df = pd.read_csv("products.csv", encoding="utf8")
df.columns = ["type", "name", "price"]
ordinals = ["A", "B", "C", "D", "E", "F"]
df.index = ordinals
print(df.head(2))
# ๅๅพ่ๆดๆฐๅฎไธ็ด้ๅผ
print(df.loc[ordinals[0], "price"])
df.loc[ordinals[0], "price"] = 21.6
print(df.iloc[1,2])
df.iloc[1,2] = 46.3
print(df.head(2))
df.head(2).to_html("Ch12_4_1.html") | [
"liamlin@Liams-MacBook-Pro.local"
] | liamlin@Liams-MacBook-Pro.local |
58c61840621f2e94fb6ad4e5358755b34cb76928 | e058d8501ba8fa70c4e7a60b669e92bab1044f03 | /apps/postman/admin.py | 2b4d854053c1d7da4248b86b3e97d79ef0786d34 | [] | no_license | wd5/7-byhands | f1f237f5dc896ce804d91e958793c074ab4f3a14 | d8a29e77f21e2947bed8414dc6ae0144798bd5a3 | refs/heads/master | 2021-01-18T07:19:46.447486 | 2013-01-28T21:37:55 | 2013-01-28T21:37:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | from django.contrib import admin
from django.conf import settings
from models import *
class SubscriberAdmin(admin.ModelAdmin):
pass
class MailAdmin(admin.ModelAdmin):
pass
class SentLogAdmin(admin.ModelAdmin):
pass
admin.site.register(Subscriber, SubscriberAdmin)
admin.site.register(SentLog , SentLogAdmin)
admin.site.register(Mail , MailAdmin)
| [
"nide@inbox.ru"
] | nide@inbox.ru |
08fe3bebae5f5c5f51f87adafb3f87e9659b1c8b | 38b9706d8aea076c453e83a7ab2becc8bc2e57ed | /Array/39.py | f786f36930ae7d4ad6cddd537a6cc28e85505aa6 | [] | no_license | sathish0706/guvi-codekata | e564e18bc174c94eb22a8ed483a9eb93a85fecab | cf4a508beaccfe46de4b34ffd2ec1e126a9e9807 | refs/heads/main | 2023-03-19T19:18:19.070994 | 2021-01-11T16:18:00 | 2021-01-11T16:18:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | from collections import defaultdict
n = int(input())
m = list(map(int,input().split()))
d = defaultdict(int)
for i in m:
if i in d:
d[i] += 1
else:
d[i] = 1
count = 0
for i in d:
if d[i] == 1:
count = i
print(count) | [
"annamalaipalani11@gmail.com"
] | annamalaipalani11@gmail.com |
530d39a7ad9a99f45a07382ab008cbebb4e6f6e7 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_289/ch67_2020_04_27_14_46_16_989502.py | 05a10228396c4bc4074e1e5a2d4230107ae953a5 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | def alunos_impares(lista_nomes):
lista_impares = []
for e in range len(lista_nomes):
if lista_nomes[e] %2 !=0:
lista_impares.append(lista_nomes[e])
return lista_impares | [
"you@example.com"
] | you@example.com |
e53ef3a51a14ccc8104cc867ed554b8adb9aed95 | aad164e4efe1d55cc189c35956bfd435b14a0f52 | /eve-8.21.494548/carbon/client/script/graphics/resourceConstructors/caustics.py | 0b31954b427fc0133d1ad9e77edc5c4412a39c19 | [] | no_license | Pluckyduck/eve | 61cc41fe8fd4dca4fbdcc4761a37bcfeb27ed84f | 9a277707ab1f162c6bd9618faf722c0be3ea93ad | refs/heads/master | 2020-12-28T23:35:29.992875 | 2013-05-06T14:24:33 | 2013-05-06T14:24:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,938 | py | #Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/carbon/client/script/graphics/resourceConstructors/caustics.py
import blue
import trinity
import bluepy
import re
import decometaclass
class CausticsRenderJob(object):
__cid__ = 'trinity.TriRenderJob'
__metaclass__ = decometaclass.BlueWrappedMetaclass
def Initialize(self, size, speed, amplitude, tiling, texturePath):
def TextureDestroyed():
self.Destroy()
texture = trinity.Tr2RenderTarget(size, size, 1, trinity.PIXEL_FORMAT.B8G8R8A8_UNORM)
self.name = 'Caustics'
self.size = size
self.texture = blue.BluePythonWeakRef(texture)
self.texture.callback = TextureDestroyed
self.steps.append(trinity.TriStepPushRenderTarget(texture))
self.steps.append(trinity.TriStepClear((0, 0, 0, 0)))
self.steps.append(trinity.TriStepSetStdRndStates(trinity.RM_FULLSCREEN))
material = trinity.Tr2ShaderMaterial()
material.highLevelShaderName = 'Caustics'
param = trinity.TriTexture2DParameter()
param.name = 'Texture'
param.resourcePath = texturePath
material.parameters['Texture'] = param
param = trinity.Tr2FloatParameter()
param.name = 'Speed'
param.value = speed
material.parameters['Speed'] = param
param = trinity.Tr2FloatParameter()
param.name = 'Amplitude'
param.value = amplitude
material.parameters['Amplitude'] = param
param = trinity.Tr2FloatParameter()
param.name = 'Tiling'
param.value = tiling
material.parameters['Tiling'] = param
material.BindLowLevelShader([])
self.steps.append(trinity.TriStepRenderFullScreenShader(material))
self.steps.append(trinity.TriStepPopRenderTarget())
trinity.renderJobs.recurring.append(self)
return trinity.TriTextureRes(texture)
def Destroy(self):
trinity.renderJobs.recurring.remove(self)
self.texture = None
def DoPrepareResources(self):
for step in self.steps:
if type(step) is trinity.TriStepPushRenderTarget:
if self.texture is not None and self.texture.object is not None:
self.texture.object.SetFromRenderTarget(step.renderTarget)
def Caustics(paramString):
params = {'size': 256,
'speed': 1,
'amplitude': 1,
'tiling': 1,
'texture': 'res:/Texture/Global/caustic.dds'}
expr = re.compile('&?(\\w+)=([^&]*)')
pos = 0
while True:
match = expr.match(paramString, pos)
if match is None:
break
params[match.group(1)] = match.group(2)
pos = match.end()
rj = CausticsRenderJob()
return rj.Initialize(int(params['size']), float(params['speed']), float(params['amplitude']), float(params['tiling']), params['texture'])
blue.resMan.RegisterResourceConstructor('caustics', Caustics) | [
"ferox2552@gmail.com"
] | ferox2552@gmail.com |
a6b2d8ca57fdd5b6c2a3e5d15ae0a7de5aaa87d7 | e67c27642a4b83b3560dc4bba7de7752278caa07 | /example-seaexplorer/process_deploymentRealTime.py | 390d934c8e720fa964bf4ca7e1f927624cb6a240 | [] | no_license | c-burmaster/pyglider | 3749661bfa367642bdd8cb453e8f14428785de46 | 76131e8419c30852150173a9994a88595cef52aa | refs/heads/master | 2020-07-25T14:42:26.752105 | 2020-04-06T20:44:39 | 2020-04-06T20:44:39 | 208,326,724 | 0 | 0 | null | 2019-09-13T18:48:07 | 2019-09-13T18:48:07 | null | UTF-8 | Python | false | false | 1,545 | py | import logging
import os
import pyglider.seaexplorer as seaexplorer
import pyglider.ncprocess as ncprocess
import pyglider.plotting as pgplot
logging.basicConfig(level='INFO')
sourcedir = '~alseamar/Documents/SEA035/000012/000012/C-Csv/*'
rawdir = './realtime_raw/'
rawncdir = './realtime_rawnc/'
deploymentyaml = './deploymentRealtime.yml'
l1tsdir = './L1-timeseries/'
profiledir = './L1-profiles/'
griddir = './L2-gridfiles/'
plottingyaml = './plottingconfig.yml'
## get the data and clean up derived
#os.system('source synctodfo.sh')
if 0:
os.system('rsync -av ' + sourcedir + ' ' + rawdir)
# clean last processing...
os.system('rm ' + rawncdir + '* ' + l1tsdir + '* ' + profiledir + '* ' +
griddir + '* ')
if 1:
# turn *.EBD and *.DBD into *.ebd.nc and *.dbd.nc netcdf files.
seaexplorer.raw_to_rawnc(rawdir, rawncdir, deploymentyaml)
# merge individual neetcdf files into single netcdf files *.ebd.nc and *.dbd.nc
seaexplorer.merge_rawnc(rawncdir, rawncdir, deploymentyaml, kind='sub')
# Make level-1 timeseries netcdf file from th raw files...
outname = seaexplorer.raw_to_L1timeseries(rawncdir, l1tsdir, deploymentyaml, kind='sub')
ncprocess.extract_L1timeseries_profiles(outname, profiledir, deploymentyaml)
outname2 = ncprocess.make_L2_gridfiles(outname, griddir, deploymentyaml)
if 1:
# make profile netcdf files for ioos gdac...
# make grid of dataset....
pgplot.timeseries_plots(outname, plottingyaml)
pgplot.grid_plots(outname2, plottingyaml)
| [
"jklymak@gmail.com"
] | jklymak@gmail.com |
217d85c42a5b1d3880ab3cbf52e36a73d6d5e6c9 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayFinancialnetAuthEcsignErrorQueryResponse.py | 1e5c32e52d696f398ce156d029df7850935ce29a | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,115 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.ErrorLog import ErrorLog
class AlipayFinancialnetAuthEcsignErrorQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayFinancialnetAuthEcsignErrorQueryResponse, self).__init__()
self._error_log_list = None
@property
def error_log_list(self):
return self._error_log_list
@error_log_list.setter
def error_log_list(self, value):
if isinstance(value, list):
self._error_log_list = list()
for i in value:
if isinstance(i, ErrorLog):
self._error_log_list.append(i)
else:
self._error_log_list.append(ErrorLog.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayFinancialnetAuthEcsignErrorQueryResponse, self).parse_response_content(response_content)
if 'error_log_list' in response:
self.error_log_list = response['error_log_list']
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
ac9a73f49e91af26961dc6a2016f1f3a3e02557b | 68bad4b3d92872bb5b77b4ee503e588d20511a27 | /python/scripts_inhibition/old_script/effect_beta_conn_index3.py | 71c90589942dfe29b6b59fb06f211f42ddc7e0b9 | [] | no_license | mickelindahl/bgmodel | 647be626a7311a8f08f3dfc897c6dd4466fc0a92 | 78e6f2b73bbcbecd0dba25caf99f835313c914ee | refs/heads/master | 2023-08-29T13:57:04.122115 | 2022-02-11T14:28:23 | 2022-02-11T14:28:23 | 17,148,386 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | '''
Created on Nov 13, 2014
@author: mikael
'''
from scripts_inhibition import effect_conns
from effect_conns import gs_builder_conn
d=kw={'n_rows':8,
'n_cols':2,
'w':int(72/2.54*18),
'h':int(72/2.54*18)/3,
'fontsize':7,
'title_fontsize':7,
'gs_builder':gs_builder_conn}
kwargs={'data_path':('/home/mikael/results/papers/inhibition/network/'
+'milner/simulate_beta_ZZZ_conn_effect_perturb3/'),
'from_diks':0,
'script_name':(__file__.split('/')[-1][0:-3]+'/data'),
'title':'Activation (beta)',
'fontsize_x':7,
'fontsize_y':7,
'conn_fig_title_fontsize':7,
'title_flipped':True,
# 'title_posy':0.2,
'do_plots':['index'],
'top_lables_fontsize':7,
'clim_raw': [[0,5], [0,50], [0,1]],
'kwargs_fig':d,
'oi_min':15.,
'oi_max':25,
'oi_fs':256,
'psd':{'NFFT':128,
'fs':256.,
'noverlap':128/2}}
obj=effect_conns.Main(**kwargs)
obj.do() | [
"mickelindahl@gmail.com"
] | mickelindahl@gmail.com |
41371ec303f8bfca6e951551f8bc57780ee8392d | 540bf8de2145644fa3549e507871d53352201df8 | /Chapter_8/Lopez_TIY8.9.py | 0f9c69bf23451d0fa200b8967c79b820adbb2d29 | [] | no_license | lope512q09/Python | 68d798d4c3135ac187b78db28b73047b2efdbde9 | 7fdb97da36fd3f7145bfb3c8a43518685b5f6d6d | refs/heads/master | 2020-08-23T10:44:07.915389 | 2020-01-22T15:36:23 | 2020-01-22T15:36:23 | 216,598,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | message_list = ["Hi!", "How are you?", "I'm good, and you?", "The same!"]
def show_messages(messages):
for message in messages:
print(f"{message}\n")
show_messages(message_list)
| [
"dillonduff1@frontier.com"
] | dillonduff1@frontier.com |
c5a559474d66a4a49796a3aa35b921528e07527b | dee29293af049cac224f0c94a7eaf439e8766085 | /bluezutils.py | 8c9f7f962272215923940598f7e84d9b713cd26f | [] | no_license | lubusax/2005_dbus | 9721d88c42081138fac1b55814f5fc1bbb9eec09 | 0647a951448fe8bb297ba2988ef35ec7117e389f | refs/heads/master | 2022-12-01T23:33:52.162681 | 2020-08-10T19:41:02 | 2020-08-10T19:41:02 | 263,895,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,649 | py | import dbus
SERVICE_NAME = "org.bluez"
ADAPTER_INTERFACE = SERVICE_NAME + ".Adapter1"
DEVICE_INTERFACE = SERVICE_NAME + ".Device1"
def get_managed_objects():
bus = dbus.SystemBus()
manager = dbus.Interface(bus.get_object("org.bluez", "/"),
"org.freedesktop.DBus.ObjectManager")
return manager.GetManagedObjects()
def find_adapter(pattern=None):
return find_adapter_in_objects(get_managed_objects(), pattern)
def find_adapter_in_objects(objects, pattern=None):
bus = dbus.SystemBus()
for path, ifaces in objects.items():
adapter = ifaces.get(ADAPTER_INTERFACE)
if adapter is None:
continue
if not pattern or pattern == adapter["Address"] or \
path.endswith(pattern):
obj = bus.get_object(SERVICE_NAME, path)
#print("BLUEZ :", SERVICE_NAME)
print("PATH_HCI0 :", path)
print("hci0 :", obj)
return dbus.Interface(obj, ADAPTER_INTERFACE)
raise Exception("Bluetooth adapter not found")
def find_device(device_address, adapter_pattern=None):
return find_device_in_objects(get_managed_objects(), device_address,
adapter_pattern)
def find_device_in_objects(objects, device_address, adapter_pattern=None):
bus = dbus.SystemBus()
path_prefix = ""
if adapter_pattern:
adapter = find_adapter_in_objects(objects, adapter_pattern)
path_prefix = adapter.object_path
for path, ifaces in objects.items():
device = ifaces.get(DEVICE_INTERFACE)
if device is None:
continue
if (device["Address"] == device_address and
path.startswith(path_prefix)):
obj = bus.get_object(SERVICE_NAME, path)
return dbus.Interface(obj, DEVICE_INTERFACE)
raise Exception("Bluetooth device not found")
| [
"lu.bu.sax@gmail.com"
] | lu.bu.sax@gmail.com |
3ebb7a68bca91cfa6d41a0fe4b1d036325352a14 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit2215.py | 5f520456f8eb7cadea2dc6cd635e3fff07eb5941 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,852 | py | # qubit number=4
# total number=28
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.x(input_qubit[3]) # number=1
prog.rx(-1.9352210746113125,input_qubit[3]) # number=14
prog.cx(input_qubit[1],input_qubit[2]) # number=22
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[2]) # number=13
prog.rx(0.13823007675795101,input_qubit[2]) # number=24
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.rx(-1.9069467407290044,input_qubit[2]) # number=20
prog.h(input_qubit[3]) # number=21
prog.y(input_qubit[2]) # number=10
prog.h(input_qubit[1]) # number=17
prog.cz(input_qubit[3],input_qubit[1]) # number=18
prog.h(input_qubit[1]) # number=19
prog.y(input_qubit[2]) # number=11
prog.h(input_qubit[0]) # number=25
prog.cz(input_qubit[1],input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=27
prog.cx(input_qubit[1],input_qubit[0]) # number=16
prog.z(input_qubit[3]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2215.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
1e225faa5c2c7bc6323a87b9640d07cc541acb30 | 51fd2216d5182a1b3655e7cb1b197862424054f6 | /insta_api/config/urls.py | 8258104c833242de67443431202090483e1a9431 | [] | no_license | seungjinhan/django_clone_instagram | 3217829fabe893190e7d6cc37b64b7d3c881f441 | f0ea8649d426c167e2ccd2fa3b15e5d3b7867d7b | refs/heads/master | 2022-12-26T05:39:16.320763 | 2020-10-05T13:40:39 | 2020-10-05T13:40:39 | 291,262,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('api/', include('api.urls')),
path('token/', include('djoser.urls.jwt')),
path('admin/', admin.site.urls),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"hanblues@gmail.com"
] | hanblues@gmail.com |
062718780f0e402218a9160bf8cafdd20d5a1da6 | 1749147fb24b13803d3437e0ae94250d67d618bd | /keras/keras33_tensorboard.py | a9043cf01d608a05cfe76b9a3fafa8b1681e955d | [] | no_license | MJK0211/bit_seoul | 65dcccb9336d9565bf9b3bc210b1e9c1c8bd840e | 44d78ce3e03f0a9cf44afafc95879e4e92d27d54 | refs/heads/master | 2023-02-06T00:45:52.999272 | 2020-12-26T07:47:30 | 2020-12-26T07:47:30 | 311,308,648 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,076 | py | import numpy as np
#1. ๋ฐ์ดํฐ
dataset = np.array(range(1,11))
size = 5
def split_x(seq, size):
aaa = []
for i in range(len(seq) - size + 1):
subset = seq[i : (i+size)]
aaa.append([item for item in subset])
# print(type(aaa))
return np.array(aaa)
datasets = split_x(dataset, size)
x = datasets[:, 0:4] #(96,4)
y = datasets[:, 4] #(96,)
x = np.reshape(x, (x.shape[0], x.shape[1],1)) #(96,4,1)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test, = train_test_split(x, y, train_size=0.9, shuffle=False)
#2. ๋ชจ๋ธ๊ตฌ์ฑ
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
model = Sequential()
model.add(LSTM(200, activation='relu', input_shape=(4,1)))
model.add(Dense(180, activation='relu'))
model.add(Dense(150, activation='relu'))
model.add(Dense(110, activation='relu'))
model.add(Dense(60, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))
model.summary()
#3. ์ปดํ์ผ, ํ๋ จ
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard #TensorBoard ์ถ๊ฐ
early_stopping = EarlyStopping(monitor='loss', patience=200, mode='min')
to_hist = TensorBoard(log_dir='graph', histogram_freq=0, write_graph=True, write_images=True)
#graph ํด๋์ ํ์ผ๋ค์ด ์๊ธฐ๋ ๊ฒ์ ๋ณผ ์ ์๋ค
#cmd - d: - cd Study - cd bit_seoul - cd graph (ํ์ผ ์ต์ข
๊ฒฝ๋ก) - tensorboard --logdir=. enter
#TensorBoard 2.3.0 at http://localhost:6006/ (Press CTRL+C to quit)
#์์ ํ์๋ url ์ ์
#log๊ฐ ๊ฒน์น ์ ์๋ค. ์๋ก ๋ณผ ๊ฒฝ์ฐ์๋ ๋ก๊ทธ๋ฅผ ์ง์ฐ๊ณ ์๋ก ํ๋๊ฒ ์ข์ ์ ์๋ค
history = model.fit(x_train, y_train, epochs=1000, batch_size=1, validation_split= 0.2, verbose=1, callbacks=[early_stopping, to_hist]) #to_hist๋ฅผ ์ฝ๋ฐฑ์ listํํ๋ก ์ถ๊ฐ
#4. ํ๊ฐ, ์์ธก
loss, mae = model.evaluate(x_test, y_test)
print("loss : ", loss)
print("mae : ", mae)
y_predict = model.predict(x_test)
print("y_predict : \n", y_predict) | [
"kimminjong0211@gmail.com"
] | kimminjong0211@gmail.com |
49582bc92163da9907c82cade99055d72e45a69b | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_chart_format08.py | 058d4a13f4a4112c40ccca93ab04951eaf939c95 | [
"BSD-2-Clause-Views"
] | permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 1,440 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_format08.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [46164608, 46176128]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'trendline': {'type': 'linear'},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
e768c417c324d6053a1501c6620777767efd6bbf | 5483ac2ccf7ac0d951b31dab2a86fe564c397408 | /appos.py | f801bf21a4cacfc9c718c9eab3adaad81a3221a0 | [] | no_license | Ram-Aditya/Music-Recommender | 9d4a7ee8379e98756e2082a95bbb1d5e4318e806 | 1fdeb5ed208be7908f09a1a5576a30218505a979 | refs/heads/master | 2020-05-07T14:20:13.772004 | 2019-10-25T16:12:57 | 2019-10-25T16:12:57 | 180,589,469 | 0 | 2 | null | 2019-10-27T12:58:23 | 2019-04-10T13:37:25 | Python | UTF-8 | Python | false | false | 1,308 | py | appos = {
"ain't":"it is not like",
"aren't" : "are not",
"can't" : "cannot",
"couldn't" : "could not",
"didn't" : "did not",
"doesn't" : "does not",
"don't" : "do not",
"hadn't" : "had not",
"hasn't" : "has not",
"haven't" : "have not",
"he'd" : "he would",
"he'll" : "he will",
"he's" : "he is",
"i'd" : "I would",
"i'd" : "I had",
"i'll" : "I will",
"i'm" : "I am",
"isn't" : "is not",
"it's" : "it is",
"it'll":"it will",
"i've" : "I have",
"let's" : "let us",
"mightn't" : "might not",
"mustn't" : "must not",
"shan't" : "shall not",
"she'd" : "she would",
"she'll" : "she will",
"she's" : "she is",
"shouldn't" : "should not",
"that's" : "that is",
"there's" : "there is",
"they'd" : "they would",
"they'll" : "they will",
"they're" : "they are",
"they've" : "they have",
"we'd" : "we would",
"we're" : "we are",
"weren't" : "were not",
"we've" : "we have",
"what'll" : "what will",
"what're" : "what are",
"what's" : "what is",
"what've" : "what have",
"where's" : "where is",
"who'd" : "who would",
"who'll" : "who will",
"who're" : "who are",
"who's" : "who is",
"who've" : "who have",
"won't" : "will not",
"wouldn't" : "would not",
"you'd" : "you would",
"you'll" : "you will",
"you're" : "you are",
"you've" : "you have",
"'re": " are",
"wasn't": "was not",
"we'll":" will",
"didn't": "did not"
} | [
"ramaditya.danbrown@gmail.com"
] | ramaditya.danbrown@gmail.com |
a56d370607d4b4332ef539db433c38161c4a2bcb | 8972658ca2c64703e8281db89d7a6ac47cbabbf7 | /backend/linkanywhere/apps/base/behaviors.py | d3c96ffeda67dc40478d7c26a088712f96c2161d | [
"MIT"
] | permissive | denisorehovsky/linkanywhere | 15721824719cc8a959cdddb4178cfe754eb4862d | e21d6725fbe0e74a7301e40f9d9bdbac17c68e68 | refs/heads/master | 2022-07-21T16:16:17.412930 | 2017-08-24T06:32:37 | 2017-08-24T06:32:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext as _
from behaviors.querysets import PublishedQuerySet
from .constants import DRAFT, PUBLISHED, PUBLICATION_STATUS_CHOICES
class Published(models.Model):
publication_status = models.CharField(
_('publication status'),
max_length=1,
choices=PUBLICATION_STATUS_CHOICES,
default=DRAFT
)
publication_date = models.DateTimeField(
_('publication date'), null=True, blank=True
)
objects = PublishedQuerySet.as_manager()
publications = PublishedQuerySet.as_manager()
class Meta:
abstract = True
def save(self, *args, **kwargs):
if self.publication_date is None and self.is_published:
self.publication_date = timezone.now()
super().save(*args, **kwargs)
@property
def is_draft(self):
return self.publication_status == DRAFT
@property
def is_published(self):
return self.publication_status == PUBLISHED
| [
"denis.orehovsky@gmail.com"
] | denis.orehovsky@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.