blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
46e5affa680c2833075c70e0956b9914e2f798ce | 8c4cd2a8725f8f04e5b2fee3bc8f67fb7b4af446 | /benchmarks/benchmark_dual_dmp.py | 9369f7b297cea7f2ea2c075f8b349b3127e99a0d | [
"BSD-3-Clause"
] | permissive | dfki-ric/movement_primitives | 37c08aade84f5abc4248b7281e6a1c441cf642a9 | 327b192608747e20e555f59671903854045c4713 | refs/heads/main | 2023-07-20T07:57:52.938054 | 2023-05-22T13:10:47 | 2023-05-22T13:10:47 | 429,049,411 | 97 | 29 | NOASSERTION | 2023-07-18T08:58:30 | 2021-11-17T13:06:49 | Python | UTF-8 | Python | false | false | 881 | py | from functools import partial
import numpy as np
from movement_primitives.dmp import DualCartesianDMP
import timeit
start_y = np.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
goal_y = np.array([1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
dt = 0.01
int_dt = 0.001
dmp = DualCartesianDMP(execution_time=1.0, dt=dt, n_weights_per_dim=6, int_dt=int_dt)
dmp.configure(start_y=start_y, goal_y=goal_y)
dmp.set_weights(1000 * np.random.randn(*dmp.get_weights().shape))
times = timeit.repeat(partial(dmp.open_loop, step_function="cython"), repeat=10, number=1)
print("Cython")
print("Mean: %.5f; Std. dev.: %.5f" % (np.mean(times), np.std(times)))
times = timeit.repeat(partial(dmp.open_loop, step_function="python"), repeat=10, number=1)
print("Python")
print("Mean: %.5f; Std. dev.: %.5f" % (np.mean(times), np.std(times)))
| [
"afabisch@googlemail.com"
] | afabisch@googlemail.com |
69dcf77fe8c8f4bf7999ba12fd6efc072ff19e6a | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /servicecatalog_read/provisioned-product-output_get.py | 01fe1a4ce4eb011dd07c8fc189e38f995cc52cf0 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import read_no_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/servicecatalog/get-provisioned-product-outputs.html
if __name__ == '__main__':
"""
"""
add_option_dict = {}
#######################################################################
# setting option use
# ex: add_option_dict["setting_matching_parameter"] = "--owners"
# ex: add_option_dict["setting_key"] = "owner_id"
#######################################################################
# single parameter
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
read_no_parameter("servicecatalog", "get-provisioned-product-outputs", add_option_dict) | [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
c934f71b537bf189c6aefbe18b994545c9942e1c | 04401ec5768d909ef95b3021dd39d280cdac7356 | /flags/flags2_common.py | 5a073a5e08ad15b91c298578cf78dbcec4edf52c | [
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] | permissive | pythonfluente/concorrencia2020 | 82f826fbcfc36c41cda251cbeaf370a6822b3cdf | 62b1f506fd69b77b67a20aedf6b204f543912c82 | refs/heads/master | 2021-04-03T12:32:48.565498 | 2020-12-20T01:03:20 | 2020-12-20T01:03:20 | 248,353,395 | 7 | 7 | BSD-3-Clause | 2020-04-21T14:28:29 | 2020-03-18T22:06:21 | Python | UTF-8 | Python | false | false | 5,097 | py | """Utilities for second set of flag examples.
"""
import os
import time
import sys
import string
import argparse
from collections import namedtuple
from enum import Enum
Result = namedtuple('Result', 'status data')
HTTPStatus = Enum('Status', 'ok not_found error')
POP20_CC = ('CN IN US ID BR PK NG BD RU JP '
'MX PH VN ET EG DE IR TR CD FR').split()
DEFAULT_CONCUR_REQ = 1
MAX_CONCUR_REQ = 1
SERVERS = {
'REMOTE': 'http://flupy.org/data/flags',
'LOCAL': 'http://localhost:8001/flags',
'DELAY': 'http://localhost:8002/flags',
'ERROR': 'http://localhost:8003/flags',
}
DEFAULT_SERVER = 'LOCAL'
DEST_DIR = 'downloaded/'
COUNTRY_CODES_FILE = 'country_codes.txt'
def save_flag(img, filename):
path = os.path.join(DEST_DIR, filename)
with open(path, 'wb') as fp:
fp.write(img)
def initial_report(cc_list, actual_req, server_label):
if len(cc_list) <= 10:
msg = ', '.join(cc_list)
else:
msg = f'from {cc_list[0]} to {cc_list[-1]}'
print(f'{server_label} site: {SERVERS[server_label]}')
plural = 's' if len(cc_list) != 1 else ''
print(f'Searching for {len(cc_list)} flag{plural}: {msg}')
plural = 's' if actual_req != 1 else ''
print(f'{actual_req} concurrent connection{plural} will be used.')
def final_report(cc_list, counter, start_time):
elapsed = time.time() - start_time
print('-' * 20)
plural = 's' if counter[HTTPStatus.ok] != 1 else ''
print(f'{counter[HTTPStatus.ok]} flag{plural} downloaded.')
if counter[HTTPStatus.not_found]:
print(counter[HTTPStatus.not_found], 'not found.')
if counter[HTTPStatus.error]:
plural = 's' if counter[HTTPStatus.error] != 1 else ''
print(f'{counter[HTTPStatus.error]} error{plural}.')
print(f'Elapsed time: {elapsed:.2f}s')
def expand_cc_args(every_cc, all_cc, cc_args, limit):
codes = set()
A_Z = string.ascii_uppercase
if every_cc:
codes.update(a+b for a in A_Z for b in A_Z)
elif all_cc:
with open(COUNTRY_CODES_FILE) as fp:
text = fp.read()
codes.update(text.split())
else:
for cc in (c.upper() for c in cc_args):
if len(cc) == 1 and cc in A_Z:
codes.update(cc+c for c in A_Z)
elif len(cc) == 2 and all(c in A_Z for c in cc):
codes.add(cc)
else:
msg = 'each CC argument must be A to Z or AA to ZZ.'
raise ValueError('*** Usage error: '+msg)
return sorted(codes)[:limit]
def process_args(default_concur_req):
server_options = ', '.join(sorted(SERVERS))
parser = argparse.ArgumentParser(
description='Download flags for country codes. '
'Default: top 20 countries by population.')
parser.add_argument('cc', metavar='CC', nargs='*',
help='country code or 1st letter (eg. B for BA...BZ)')
parser.add_argument('-a', '--all', action='store_true',
help='get all available flags (AD to ZW)')
parser.add_argument('-e', '--every', action='store_true',
help='get flags for every possible code (AA...ZZ)')
parser.add_argument('-l', '--limit', metavar='N', type=int,
help='limit to N first codes', default=sys.maxsize)
parser.add_argument('-m', '--max_req', metavar='CONCURRENT', type=int,
default=default_concur_req,
help=f'maximum concurrent requests (default={default_concur_req})')
parser.add_argument('-s', '--server', metavar='LABEL',
default=DEFAULT_SERVER,
help=f'server to hit: one of {server_options} (default={DEFAULT_SERVER})')
parser.add_argument('-v', '--verbose', action='store_true',
help='output detailed progress info')
args = parser.parse_args()
if args.max_req < 1:
print('*** Usage error: --max_req CONCURRENT must be >= 1')
parser.print_usage()
sys.exit(1)
if args.limit < 1:
print('*** Usage error: --limit N must be >= 1')
parser.print_usage()
sys.exit(1)
args.server = args.server.upper()
if args.server not in SERVERS:
print('*** Usage error: --server LABEL must be one of',
server_options)
parser.print_usage()
sys.exit(1)
try:
cc_list = expand_cc_args(args.every, args.all, args.cc, args.limit)
except ValueError as exc:
print(exc.args[0])
parser.print_usage()
sys.exit(1)
if not cc_list:
cc_list = sorted(POP20_CC)
return args, cc_list
def main(download_many, default_concur_req, max_concur_req):
args, cc_list = process_args(default_concur_req)
actual_req = min(args.max_req, max_concur_req, len(cc_list))
initial_report(cc_list, actual_req, args.server)
base_url = SERVERS[args.server]
t0 = time.time()
counter = download_many(cc_list, base_url, args.verbose, actual_req)
assert sum(counter.values()) == len(cc_list), \
'some downloads are unaccounted for'
final_report(cc_list, counter, t0)
| [
"luciano@ramalho.org"
] | luciano@ramalho.org |
1dc8801b64037a4f969242a0a4870dc67f6ad153 | e43f0e33bc07f9c8866dd7db88c80576e5d3d9c3 | /tests/parsers_umls_test.py | d55f67cd98b4b5d8af0fb0904f95307780051333 | [] | no_license | bearnd/mt-ingester | ef5fc47d57d4bade7ce76f303425f861f74c00d8 | cc4503b715d862af0ec82fdea666386e9265e56e | refs/heads/master | 2023-03-08T10:54:22.682969 | 2019-09-14T13:45:26 | 2019-09-14T13:45:26 | 338,612,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,301 | py | # coding=utf-8
import os
import unittest
from mt_ingester.parsers import ParserUmlsSat
from mt_ingester.parsers import ParserUmlsConso
from mt_ingester.parsers import ParserUmlsDef
from tests.assets.samples_umls import get_sample_file
from tests.assets.samples_umls import EnumUmlsFileSample
class ParserUmlsSatTest(unittest.TestCase):
""" Tests the `ParserUmlsSat` class."""
def setUp(self):
""" Retrieves a sample MRSAT.RRF file and instantiates the parser."""
self.file = get_sample_file(umls_file_type=EnumUmlsFileSample.MRSAT)
self.parser = ParserUmlsSat()
def tearDown(self):
""" Deletes the temporary MRSAT.RRF file."""
os.remove(self.file.name)
def test_parse(self):
""" Tests the `parse` method of the parser class."""
map_cui_dui = self.parser.parse(filename_mrsat_rrf=self.file.name)
self.assertDictEqual(
map_cui_dui,
{
'C0001175': 'D000163',
'C0006118': 'D001932',
'C0024537': 'D016780',
'C0153633': 'D001932',
'C0750974': 'D001932',
'C0750979': 'D001932',
'C1527390': 'D001932',
}
)
class ParserUmlsConsoTest(unittest.TestCase):
""" Tests the `ParserUmlsConso` class."""
def setUp(self):
""" Retrieves sample MRSAT.RRF and MRCONSO.RRF files and instantiates
the parser.
"""
self.file_mrsat = get_sample_file(
umls_file_type=EnumUmlsFileSample.MRSAT,
)
self.file_mrconso = get_sample_file(
umls_file_type=EnumUmlsFileSample.MRCONSO,
)
self.parser = ParserUmlsConso()
def tearDown(self):
""" Deletes the temporary MRSAT.RRF and MRCONSO.RRF files."""
os.remove(self.file_mrsat.name)
os.remove(self.file_mrconso.name)
def test_parse(self):
""" Tests the `parse` method of the parser class."""
dui_synonyms = self.parser.parse(
filename_mrsat_rrf=self.file_mrsat.name,
filename_mrconso_rrf=self.file_mrconso.name,
)
dui_synonyms_refr = {
'D000163': [
'acquired immunodeficiency syndrome',
'acquired immunodeficiency syndromes',
'syndromes, acquired immunodeficiency',
],
'D001932': [
'neoplasm, brain',
'brain tumors',
'brain tumors, primary',
'neoplasms, intracranial',
],
'D016780': [
'plasmodium vivax malaria',
'vivax malaria',
]
}
self.assertListEqual(
sorted(list(dui_synonyms.keys())),
sorted(list(dui_synonyms_refr.keys())),
)
for k, v in dui_synonyms_refr.items():
self.assertListEqual(
sorted(list(dui_synonyms[k])),
sorted(list(dui_synonyms_refr[k])),
)
class ParserUmlsDefTest(unittest.TestCase):
""" Tests the `ParserUmlsDef` class."""
def setUp(self):
""" Retrieves sample MRSAT.RRF and MRDEF.RRF files and instantiates
the parser.
"""
self.file_mrsat = get_sample_file(
umls_file_type=EnumUmlsFileSample.MRSAT,
)
self.file_mrdef = get_sample_file(
umls_file_type=EnumUmlsFileSample.MRDEF,
)
self.parser = ParserUmlsDef()
def tearDown(self):
""" Deletes the temporary MRSAT.RRF and MRDEF.RRF files."""
os.remove(self.file_mrsat.name)
os.remove(self.file_mrdef.name)
def test_parse(self):
""" Tests the `parse` method of the parser class."""
dui_definitions = self.parser.parse(
filename_mrsat_rrf=self.file_mrsat.name,
filename_mrdef_rrf=self.file_mrdef.name,
)
dui_definitions_refr = {
'D000163': {
'CSP': [
('one or more indicator diseases, depending on '
'laboratory evidence of HIV infection (CDC); late '
'phase of HIV infection characterized by marked '
'suppression of immune function resulting in '
'opportunistic infections, neoplasms, and other systemic '
'symptoms (NIAID).')
],
'NCI_NICHD': [
('A chronic, potentially life threatening condition that '
'is caused by human immunodeficiency virus (HIV) '
'infection, and is characterized by increased '
'susceptibility to opportunistic infections, certain '
'cancers and neurologic disorders.')
]
},
'D001932': {
'NCI': [
('A benign or malignant neoplasm that arises from or '
'metastasizes to the brain.')
],
'NCI_NICHD': [
'An abnormal intracranial solid mass or growth.'
]
},
'D016780': {
'MSH': [
('Malaria caused by PLASMODIUM VIVAX. This form of '
'malaria is less severe than MALARIA, FALCIPARUM, but '
'there is a higher probability for relapses to occur. '
'Febrile paroxysms often occur every other day.')
],
'NCI': [
'Malaria resulting from infection by Plasmodium vivax.'
]
}
}
self.assertListEqual(
sorted(list(dui_definitions.keys())),
sorted(list(dui_definitions_refr.keys())),
)
for k, v in dui_definitions.items():
self.assertListEqual(
sorted(list(dui_definitions[k].keys())),
sorted(list(dui_definitions_refr[k].keys())),
)
for kk, vv in dui_definitions[k].items():
self.assertListEqual(
sorted(list(dui_definitions[k][kk])),
sorted(list(dui_definitions_refr[k][kk])),
)
| [
"somada141@gmail.com"
] | somada141@gmail.com |
2ae2bdf8815a466c8b0684f4c105ab6bdd9fe01d | 190d03cf370844548b9e8c89952dfbaec4d0c5c8 | /p622.py | ca9b0956d3c39375b432f18dfc33e341c309628f | [] | no_license | alainlou/leetcode | 446d101a9fd2f9eaa2229252e5909e7df36b4a74 | fe500bcb067be59aa048259e3860e9da6f98344d | refs/heads/master | 2022-10-16T12:20:44.726963 | 2022-09-18T15:29:05 | 2022-09-18T15:29:05 | 178,775,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | class MyCircularQueue:
def __init__(self, k: int):
self.k = k
self.arr = [None]*k
self.front = 0
self.rear = 0
def enQueue(self, value: int) -> bool:
if self.isFull():
return False
else:
if self.isEmpty():
self.arr[self.rear] = value
else:
self.arr[(self.rear + 1)%self.k] = value
self.rear = (self.rear + 1)%self.k
return True
def deQueue(self) -> bool:
if self.isEmpty():
return False
else:
if self.front == self.rear:
self.arr[self.front] = None
else:
self.arr[self.front] = None
self.front = (self.front + 1)%self.k
return True
def Front(self) -> int:
return self.arr[self.front] if not self.isEmpty() else -1
def Rear(self) -> int:
return self.arr[self.rear] if not self.isEmpty() else -1
def isEmpty(self) -> bool:
return self.arr[self.front] is None
def isFull(self) -> bool:
return (self.rear + 1)%self.k == self.front if self.k > 1 else self.arr[self.front] != None
# Your MyCircularQueue object will be instantiated and called as such:
# obj = MyCircularQueue(k)
# param_1 = obj.enQueue(value)
# param_2 = obj.deQueue()
# param_3 = obj.Front()
| [
"az2lou@uwaterloo.ca"
] | az2lou@uwaterloo.ca |
a3157751b8c0617b6c1de79a6d8a459cd0630e40 | 241b3cef3f7146ca332b45a8e3d4005a9e93d024 | /kestrel/plugins/kestrel_tasks.py | 0f84c537dcba24810ff5e77e0a22b82406883da7 | [
"Apache-2.0"
] | permissive | joelimome/Kestrel | c5ec380ec0395df3213e63cd7389f68551350b62 | 199075569e57d72676512a4eaf64e82c21248460 | refs/heads/master | 2021-01-18T06:44:00.105322 | 2010-10-11T14:35:11 | 2010-10-11T14:35:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,690 | py | # Kestrel: An XMPP-based Job Scheduler
# Author: Lance Stout <lancestout@gmail.com>
#
# Credits: Nathan Fritz <fritzy@netflint.net>
#
# Copyright 2010 Lance Stout
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import threading
import signal
import subprocess
import sleekxmpp
from sleekxmpp.plugins import base
from sleekxmpp.xmlstream import JID
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import MatchXPath
from sleekxmpp.xmlstream.stanzabase import ElementBase, ET
from sleekxmpp.xmlstream.stanzabase import registerStanzaPlugin
from sleekxmpp.stanza.iq import Iq
from kestrel.stanza.task import Task
class kestrel_tasks(base.base_plugin):
def plugin_init(self):
self.description = "Kestrel Worker"
self.capabilities = self.config.get('capabilities', [])
self.xmpp.registerHandler(
Callback('Kestrel Task',
MatchXPath('{%s}iq/{%s}task' % (self.xmpp.default_ns,
Task.namespace)),
self.handle_task))
registerStanzaPlugin(Iq, Task)
self.xmpp.add_event_handler('kestrel_task', self.start_task, threaded=True)
self.xmpp.add_event_handler('kestrel_task_cancel', self.cancel_task, threaded=True)
self.tasks = {}
self.max_tasks = 1
self.lock = threading.Lock()
def post_init(self):
base.base_plugin.post_init(self)
self.xmpp['xep_0030'].add_feature('kestrel:tasks')
self.xmpp['xep_0030'].add_node('kestrel:tasks:capabilities')
caps = self.xmpp['xep_0030'].nodes['kestrel:tasks:capabilities']
for cap in self.capabilities:
caps.addFeature(cap)
def setMaxTasks(self, num):
self.max_tasks = num
def setCapabilities(self, caps):
node = self.xmpp['xep_0030'].nodes['kestrel:tasks:capabilities']
node.setFeatures(caps)
def handle_task(self, iq):
task = iq['kestrel_task']
logging.info("TASK: Received task: %s" % str(iq))
if task['action'] == 'execute' and task['command'] == '':
self._sendError(iq, '406', 'modify', 'not-acceptable')
return
# Todo: Check sender for authorization
events = {'execute': 'kestrel_task',
'cancel': 'kestrel_task_cancel'}
self.xmpp.event(events[task['action']], iq)
def start_task(self, iq):
from_jid = iq['from'].jid
task = iq['kestrel_task']
process_id = (iq['from'].user, iq['from'].resource)
if len(self.tasks) >= self.max_tasks:
self._sendError(iq, '500', 'cancel', 'resource-constraint')
return
if len(self.tasks) == self.max_tasks - 1:
# Send busy status if we will reach the max number of
# tasks when we start this one.
self.xmpp.sendPresence(pshow='dnd', pstatus='Executing Task')
iq.reply()
iq['kestrel_task']['status'] = 'executing'
iq.send()
self.xmpp.event('kestrel_task_started', iq)
command = "%s %s" % (task['command'], process_id[1])
if self._execute(process_id, command):
iq = self.xmpp.Iq()
iq['to'] = from_jid
iq['kestrel_task']['status'] = 'complete'
iq.send()
else:
iq = self.xmpp.Iq()
iq['from'] = from_jid
self._sendError(iq, '500', 'cancel', 'internal-server-error')
with self.lock:
if process_id in self.tasks:
del self.tasks[process_id]
self.xmpp.event('kestrel_task_finished', iq)
if task['cleanup']:
command = "%s %s" % (task['cleanup'], process_id[1])
self._execute(process_id, command, cleanup=True)
self.xmpp.sendPresence(pstatus='Ready for Task')
def cancel_task(self, iq):
process_id = (iq['from'].user, iq['from'].resource)
if self._cancel(process_id):
iq.reply().send()
self.xmpp.event('kestrel_task_cancelled', iq)
else:
self._sendError(iq, '404', 'cancel', 'item-not-found')
def _execute(self, name, command, cleanup=False):
"""Wrapper function to open a subprocess."""
try:
task_process = subprocess.Popen(['sh', '-c', "%s" % command],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid)
if not cleanup:
with self.lock:
self.tasks[name] = task_process
logging.info("TASK: Task started: %s (%s)" % (name, command))
task_process.wait()
logging.info("TASK: Task finished: %s (%s)" % (name, command))
else:
logging.info("TASK: Cleanup started: %s (%s)" % (name, command))
task_process.wait()
logging.info("TASK: Cleanup finished: %s (%s)" % (name, command))
return True
except:
error_type = "cleanup" if cleanup else "task"
logging.info("TASK: Error starting %s: (%s)" % (error_type, command))
return False
def _cancel(self, name):
"""Wrapper function to kill a subprocess."""
if name not in self.tasks:
logging.info("TASK: Tried cancelling task %s, but task not found." % str(name))
return False
task_process = self.tasks[name]
logging.info("TASK: Cancelling task %s" % str(name))
try:
os.killpg(task_process.pid, signal.SIGKILL)
except:
pass
with self.lock:
if name in self.tasks:
del self.tasks[name]
return True
def _sendError(self, iq, code, etype, condition, text=''):
iq.reply().error()
iq['error']['code'] = code
iq['error']['type'] = etype
iq['error']['condition'] = condition
iq['error']['text'] = text
iq.send()
| [
"lancestout@gmail.com"
] | lancestout@gmail.com |
09c98f2636180514e61419c174e02ce50a07fb96 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /oxf7b7vroXvWBJ9Nq_18.py | c91857e0445c9860163566659cc778edc4feb4b3 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py |
import re
def discount(n, txt):
if not len(txt): return n
discounts = sorted(txt.split(", "), key=lambda x: x[-1])
for d in discounts:
if d[-1] == "%":
n -= float(d[:-1])/100*n
else:
n -= float(d)
sol = "{:.2f}".format(n)
return int(sol[:-3]) if sol[-3:] == ".00" else float(re.sub(r"0+$", "", sol))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
7432e39a052baa01c42ba905cda58827685faba5 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/eqptdiagp/porttestsetbt.py | 3ad8df35a3b2c025a8b87153e3ac3134883bd056 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 6,214 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class PortTestSetBt(Mo):
meta = ClassMeta("cobra.model.eqptdiagp.PortTestSetBt")
meta.isAbstract = True
meta.moClassName = "eqptdiagpPortTestSetBt"
meta.moClassName = "eqptdiagpPortTestSetBt"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "On-Demand Diag Policy Set for the Specified Card Type"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.eqptdiagp.TestSet")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.eqptdiagp.TestSetBoot")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.concreteSubClasses.add("cobra.model.eqptdiagp.TsBtLeafP")
meta.concreteSubClasses.add("cobra.model.eqptdiagp.TsBtFabP")
meta.concreteSubClasses.add("cobra.model.eqptdiagp.TsBtExtChHP")
meta.concreteSubClasses.add("cobra.model.eqptdiagp.TsBtExtChFP")
meta.rnPrefixes = [
]
prop = PropMeta("str", "adminSt", "adminSt", 1942, PropCategory.REGULAR)
prop.label = "Administrative State"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "start"
prop._addConstant("start", "start", 1)
prop._addConstant("stop", "stop", 2)
prop._addConstant("suspend", "suspend", 3)
meta.props.add("adminSt", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "freq", "freq", 1943, PropCategory.REGULAR)
prop.label = "Bootup Diag Test Frequency"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "one-shot"
prop._addConstant("every10mins", "every-10-mins", 600000)
prop._addConstant("every12hrs", "every-12-hours", 43200000)
prop._addConstant("every1day", "every-1-day", 86400000)
prop._addConstant("every1hr", "every-1-hour", 3600000)
prop._addConstant("every1week", "every-1-week", 604800000)
prop._addConstant("every2hrs", "every-2-hours", 7200000)
prop._addConstant("every30mins", "every-30-mins", 1800000)
prop._addConstant("every4hrs", "every-4-hours", 14400000)
prop._addConstant("every4weeks", "every-4-weeks", 2419200000)
prop._addConstant("every5mins", "every-5-mins", 300000)
prop._addConstant("every8hrs", "every-8-hours", 28800000)
prop._addConstant("one-shot", "once", 0)
meta.props.add("freq", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
a8b518462a18be00cb11ef084cc3170c03798e9e | 9e19b20d5a63d6e7693ad85eba37c8d1d1507192 | /Python/441_Arranging Coins.py | 68b8ab9baf05220ed1854c4304608a9507247475 | [] | no_license | Eddie02582/Leetcode | eacbfdfa0075c16ee7b3eb297c116fe42e7c8550 | b5c25f976866eefec33b96c638a4c5e127319e74 | refs/heads/master | 2022-10-22T20:51:06.739926 | 2022-10-17T07:43:38 | 2022-10-17T07:43:38 | 189,950,613 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | '''
You have a total of n coins that you want to form in a staircase shape, where every k-th row must have exactly k coins.
Given n, find the total number of full staircase rows that can be formed.
n is a non-negative integer and fits within the range of a 32-bit signed integer.
Example 1:
n = 5
The coins can form the following rows:
?
? ?
? ?
Because the 3rd row is incomplete, we return 2.
Example 2:
n = 8
The coins can form the following rows:
?
? ?
? ? ?
? ?
'''
class Solution(object):
def arrangeCoins(self, n):
total,count,i = 0 ,0,1
while total <= n:
total += i
i += 1
if total <= n:
count += 1
return count
sol = Solution()
assert sol.arrangeCoins(5) == 2
assert sol.arrangeCoins(8) == 3
assert sol.arrangeCoins(1) == 1
| [
"38853252+Eddie02582@users.noreply.github.com"
] | 38853252+Eddie02582@users.noreply.github.com |
8e862dda517f499add536302544e6aafb9fae6e4 | 7ad0808c8e3f77b1de5e9d3148941dc1404d3432 | /modules/core/system/queue_controller.py | e0f42bae7333d641ea4967a8dc9be090500c4148 | [] | no_license | Budabot/Tyrbot | addadfb6d265d371d5bbef1195a41d53736bf5dc | bf04a5180dac129f56b6d2231ab26070d8b6d2cc | refs/heads/master | 2023-08-04T10:44:29.861252 | 2023-07-26T15:02:54 | 2023-07-26T15:02:54 | 120,727,804 | 27 | 38 | null | 2023-08-15T18:43:24 | 2018-02-08T07:40:12 | Python | UTF-8 | Python | false | false | 1,314 | py | from core.command_param_types import Const, Any
from core.decorators import instance, command
@instance()
class QueueController:
def inject(self, registry):
self.bot = registry.get_instance("bot")
self.command_alias_service = registry.get_instance("command_alias_service")
self.command_service = registry.get_instance("command_service")
def start(self):
self.command_alias_service.add_alias("clearqueue", "queue clear")
@command(command="queue", params=[Const("clear")], access_level="moderator",
description="Clear the outgoing message queue")
def queue_clear_cmd(self, request, _):
num_messages = len(request.conn.packet_queue)
request.conn.packet_queue.clear()
return f"Cleared <highlight>{num_messages}</highlight> messages from the outgoing message queue."
@command(command="massmsg", params=[Any("command")], access_level="moderator",
description="Force the reply of the specified command to be sent via non-main bots")
def massmsg_cmd(self, request, command_str):
def reply(msg):
self.bot.send_mass_message(request.sender.char_id, msg, conn=request.conn)
self.command_service.process_command(command_str, request.channel, request.sender.char_id, reply, request.conn)
| [
"email1@jkbff.com"
] | email1@jkbff.com |
093e614f902f69b5827e4236c41809ee21e3d461 | 73a0f661f1423d63e86489d4b2673f0103698aab | /python/oneflow/test/modules/test_global_batch_gather.py | 3653c7d5bb759c2d7bdae6249247d70c87908203 | [
"Apache-2.0"
] | permissive | Oneflow-Inc/oneflow | 4fc3e081e45db0242a465c4330d8bcc8b21ee924 | 0aab78ea24d4b1c784c30c57d33ec69fe5605e4a | refs/heads/master | 2023-08-25T16:58:30.576596 | 2023-08-22T14:15:46 | 2023-08-22T14:15:46 | 81,634,683 | 5,495 | 786 | Apache-2.0 | 2023-09-14T09:44:31 | 2017-02-11T06:09:53 | C++ | UTF-8 | Python | false | false | 2,335 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
from oneflow.test_utils.automated_test_util.util import broadcast
def _test_batch_gather(test_case, ndim, placement, sbp):
dims = [random(1, 3).to(int).value() * 8 for _ in range(ndim)]
x = random_tensor(ndim, *dims, requires_grad=True)
local_x = flow.tensor(x.pytorch.detach().cpu().numpy(), requires_grad=True)
global_x = x.oneflow.to_global(placement=placement, sbp=sbp)
global_x.retain_grad()
indices_ndim = random(1, ndim + 1).to(int).value()
indices_dims = [dims[i] for i in range(indices_ndim)]
indices_dims[-1] = random(1, dims[indices_ndim - 1]).to(int).value()
indices = np.random.choice(dims[indices_ndim - 1], indices_dims)
indices = broadcast(indices)
local_indices = flow.tensor(indices)
global_indices = local_indices.to_global(
placement=placement, sbp=[flow.sbp.broadcast for _ in range(len(sbp))]
)
global_out = flow.batch_gather(global_x, global_indices)
global_out.sum().backward()
local_out = flow.batch_gather(local_x, local_indices)
local_out.sum().backward()
test_case.assertTrue(
np.allclose(
global_x.grad.detach().cpu().numpy(),
local_x.grad.detach().cpu().numpy(),
atol=1e-5,
rtol=1e-5,
)
)
class TestBatchGather(flow.unittest.TestCase):
@globaltest
def test_batch_gather(test_case):
ndim = 2
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=ndim):
_test_batch_gather(test_case, ndim, placement, sbp)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | Oneflow-Inc.noreply@github.com |
7411091b34332232c549ad28678e225515310812 | 7d90d2ce27c6ee0af74391b09909edbd45fdc2f0 | /renix_py_api/api_gen/ResultView_Autogen.py | 016622ef5e523d60fed961c68f0489c4b254c5cc | [] | no_license | gaoxingyu-hub/54testframework-master-e284 | d7ea0d4a715b65c8652430e963a86b9522a7237a | 57dd2197e7d91b8ad8fb2bd0e3503f10afa08544 | refs/heads/master | 2023-04-30T05:50:41.542402 | 2021-05-28T09:19:37 | 2021-05-28T09:19:37 | 309,922,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,180 | py | """
Auto-generated File
Create Time: 2019-12-27 02:33:25
"""
from .ROMEnum_Autogen import *
from renix_py_api.renix_common_api import *
from renix_py_api import rom_manager
from .ROMObject_Autogen import ROMObject
@rom_manager.rom
class ResultView(ROMObject):
def __init__(self, ViewName=None, DataClassName=None, **kwargs):
self._ViewName = ViewName # Result View Name
self._DataClassName = DataClassName # Result class name
properties = kwargs.copy()
if ViewName is not None:
properties['ViewName'] = ViewName
if DataClassName is not None:
properties['DataClassName'] = DataClassName
# call base class function, and it will send message to renix server to create a class.
super(ResultView, self).__init__(**properties)
def delete(self):
"""
call to delete itself
"""
return self._finalize()
def edit(self, ViewName=None, DataClassName=None, **kwargs):
properties = kwargs.copy()
if ViewName is not None:
self._ViewName = ViewName
properties['ViewName'] = ViewName
if DataClassName is not None:
self._DataClassName = DataClassName
properties['DataClassName'] = DataClassName
super(ResultView, self).edit(**properties)
@property
def ViewName(self):
"""
get the value of property _ViewName
"""
if self.force_auto_sync:
self.get('ViewName')
return self._ViewName
@property
def DataClassName(self):
"""
get the value of property _DataClassName
"""
if self.force_auto_sync:
self.get('DataClassName')
return self._DataClassName
@ViewName.setter
def ViewName(self, value):
self._ViewName = value
self.edit(ViewName=value)
@DataClassName.setter
def DataClassName(self, value):
self._DataClassName = value
self.edit(DataClassName=value)
def _set_viewname_with_str(self, value):
self._ViewName = value
def _set_dataclassname_with_str(self, value):
self._DataClassName = value
| [
"gaoxingyu@example.com"
] | gaoxingyu@example.com |
f11af8e0e76f04055392f05a6934c6acb8dbaa2d | 1c29f2609b0cb9a72972fb94e630ed8335b5e4e4 | /scripts/CreateAndCleanupMarkerGraph.py | 8dc351afb0b541ceef2db839c9f6120ead0bba6c | [
"MIT",
"Zlib",
"LicenseRef-scancode-public-domain"
] | permissive | tw7649116/shasta | 4bef2b878bca0adcb003902c619ab3f388b13498 | 8612d46fd3de5ed51d19f05c4d811b5736d516ad | refs/heads/master | 2020-04-28T02:44:06.973416 | 2019-03-06T19:35:49 | 2019-03-06T19:35:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | #!/usr/bin/python3
import shasta
import GetConfig
import sys
# Read the config file.
config = GetConfig.getConfig()
# Initialize the assembler and access what we need.
a = shasta.Assembler()
a.accessReadsReadOnly()
a.accessKmers()
a.accessMarkers()
a.accessAlignmentData()
a.accessReadGraph()
a.accessChimericReadsFlags()
# Create vertices of the marker graph.
a.createMarkerGraphVertices(
maxMarkerFrequency = int(config['Align']['maxMarkerFrequency']),
maxSkip = int(config['Align']['maxSkip']),
minCoverage = int(config['MarkerGraph']['minCoverage']),
maxCoverage = int(config['MarkerGraph']['maxCoverage']))
# Create edges of the marker graph.
a.createMarkerGraphEdges()
# Approximate transitive reduction.
a.flagMarkerGraphWeakEdges(
lowCoverageThreshold = int(config['MarkerGraph']['lowCoverageThreshold']),
highCoverageThreshold = int(config['MarkerGraph']['highCoverageThreshold']),
maxDistance = int(config['MarkerGraph']['maxDistance']),
)
# Prune the strong subgraph of the marker graph.
a.pruneMarkerGraphStrongSubgraph(
iterationCount = int(config['MarkerGraph']['pruneIterationCount']))
| [
"paoloczi@users.noreply.github.com"
] | paoloczi@users.noreply.github.com |
1c934fe5e03fbd7d00b58d1b16e4a2d2a0a5c435 | aca253ff1a97c96a1a0a9a5802aa623789662bb1 | /p030/dialog_demo.py | e568b7f233a493e3aef167358b74b2a0a0b09abf | [] | no_license | KD-huhu/PyQt5 | a6128a34b93f6e2da7216d5818f66dc9614216bc | 1c33a6549c2fcf663168256553d8c24e25d9a69c | refs/heads/master | 2022-07-03T07:37:29.837547 | 2020-05-17T14:54:39 | 2020-05-17T14:54:39 | 261,768,854 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class QDialogDemo(QMainWindow):
def __init__(self):
super(QDialogDemo,self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('QDialog案例')
self.resize(300,200)
self.button = QPushButton(self)
self.button.setText('弹出对话框')
self.button.move(50,50)
self.button.clicked.connect(self.showDialog)
def showDialog(self):
dialog = QDialog() # 创建对话框对象
button = QPushButton('确定',dialog) # 添加按钮控件
button.clicked.connect(dialog.close)
button.move(50,50)
dialog.setWindowTitle('对话框') # 对话框基础设置和主窗口相同
dialog.setWindowModality(Qt.ApplicationModal) # 在对话框中,主窗口的控件不可用
dialog.exec()
if __name__ == '__main__':
app = QApplication(sys.argv)
main = QDialogDemo()
main.show()
sys.exit(app.exec_())
| [
"noreply@github.com"
] | KD-huhu.noreply@github.com |
2c40f53838408287189e489b19df5fc1ec20aa1a | 8b675ca56bae3a1b622eff991f8786963712d12f | /a301/__init__.py | e85022d2ce213a276ce0bce421a914b1172f3500 | [
"MIT"
] | permissive | KayhanB21/a301_code | b4dd7d8bdb2a4170211965abee707f48da4cbb23 | 4237b4e538bd999f5ac1b20f6b25b4c4e03bb09c | refs/heads/master | 2021-09-23T14:39:14.333193 | 2018-09-24T19:44:50 | 2018-09-24T19:44:50 | 150,385,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | from pathlib import Path
#
# open the VERSION file and read it into a301.__version__
# https://github.com/pypa/setuptools/issues/1316
#
__version_file__=Path(__file__).parent / Path('VERSION')
#
# if __version_file__ doesn't exist, try to create it and
# write 'no_version', if that doesn't work (no write permission), set
# __version_file__ to None
#
if not __version_file__.is_file():
__version__ = 'no_version'
try:
with open(__version_file__,'w') as f:
f.write(__version__)
except:
__version_file__=None
else:
with open(__version_file__) as f:
__version__=f.read().strip()
| [
"paustin@eos.ubc.ca"
] | paustin@eos.ubc.ca |
9545e13460cfa481d9ae30dc5b02b4f93977fc49 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/03_model_fitting/merraRF882/61-tideGauge.py | e49c8cf3e8885cfd243aea28ac1f410de445d6df | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,454 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a Random Forest
model by using the KFOLD method
@author: Michael Tadesse
"""
#import packages
import os
import glob
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validateRF():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraRFValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 61
y = 62
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
#filter only .csv files
tgNames = []
for file in glob.glob("*.csv"):
tgNames.append(file)
tg_name = sorted(tgNames)[tg]
print(tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
print("this tide gauge is already taken care of")
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
rf= RandomForestRegressor(n_estimators = 50, random_state = 101, \
min_samples_leaf = 1)
rf.fit(X_train, y_train)
#predictions
predictions = rf.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
print()
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#run script
validateRF()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
0de82ef4d599b7fcb1e8c91a95fdb1238c215f5d | a5a4cee972e487512275c34f308251e6cc38c2fa | /examples/Ni__eam__born_exp_fs__postprocessing/Reduced_TSNE_qoi_in_param/configuration/configure_final_plot.py | bb505c0bb91236afab1d0c898c0312a5a119c4d8 | [
"MIT"
] | permissive | eragasa/pypospack | 4f54983b33dcd2dce5b602bc243ea8ef22fee86b | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | refs/heads/master | 2021-06-16T09:24:11.633693 | 2019-12-06T16:54:02 | 2019-12-06T16:54:02 | 99,282,824 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | from collections import OrderedDict
from pypospack.pyposmat.data.pipeline import PyposmatPipeline
pipeline_configuration = OrderedDict()
# define first segment (plotting)
pipeline_configuration[0] = OrderedDict()
pipeline_configuration[0]['segment_type'] = 'plot'
pipeline_configuration[0]['function_calls'] = OrderedDict()
pipeline_configuration[0]['function_calls'][0] = OrderedDict()
pipeline_configuration[0]['function_calls'][0]['function'] = 'plot_by_cluster'
pipeline_configuration[0]['function_calls'][0]['args'] = OrderedDict()
pipeline_configuration[0]['function_calls'][0]['args']['x_axis'] = 'tsne_0'
pipeline_configuration[0]['function_calls'][0]['args']['y_axis'] = 'tsne_1'
pipeline_configuration[0]['function_calls'][0]['args']['filename'] = 'qoi_clusters_in_param_tsne_space.png'
if __name__ == "__main__":
pipeline = PyposmatPipeline()
fn = __file__.replace('.py', '.in')
pipeline.write_configuration(filename=fn,
d=pipeline_configuration)
| [
"seatonullberg@gmail.com"
] | seatonullberg@gmail.com |
a25199bba01db10b42a11ff3f9af31b72b291e1c | ed538eba0bb81713d8353dea5baafd038913d52c | /photos/urls.py | af4fb96be4ef12b78a66dbe5fbb0f4a0609e76be | [] | no_license | Ansagan-Kabdolla/photo_site | 78cf738ff948cbf7d2207bff6166dcbe44679e1e | 19228dc3abeab9cc301962c970b15fcf040e2577 | refs/heads/master | 2022-06-04T20:06:07.798864 | 2020-05-02T20:05:28 | 2020-05-02T20:05:28 | 260,765,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | from django.urls import path
from .views import *
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', index),
path('subservices/<int:pk>', subservice_example, name = 'subservices')
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"ansagankabdolla4@gmail.com"
] | ansagankabdolla4@gmail.com |
5820dfd31c14197589ff85b20ee3c09ef20a8d93 | 434d5256fa47c6bec0e5d79917f2d09b52490fa0 | /tests/pypint/plugins_tests/implicit_solvers_tests/__init__.py | 540bd2c49fa5e3540bed00c4ec8613618cf57d42 | [] | no_license | Parallel-in-Time/PyPinT | 2d0a54d21a6b50863c6acef69eb9a86d3bcc7fcf | 90aed34cf43d633e44f56444f6c5d4fa39619663 | refs/heads/master | 2016-08-03T18:58:05.269042 | 2014-06-10T08:27:30 | 2014-06-10T08:32:45 | 19,447,961 | 0 | 2 | null | 2014-06-02T14:26:08 | 2014-05-05T07:39:20 | Python | UTF-8 | Python | false | false | 168 | py | # coding=utf-8
import unittest
class ImplicitSolversTests(unittest.TestSuite):
def __init__(self):
pass
if __name__ == "__main__":
unittest.main()
| [
"t.klatt@fz-juelich.de"
] | t.klatt@fz-juelich.de |
0d7ef97c64cbb51cb2f664c2765cc5b6d54098a1 | 09aee268ce72d282f53fe94f42478e2b3b48127d | /CBVProject_3/manage.py | ba051715cec14ce97e5ac0aec7233658f5f881dd | [] | no_license | keshava519/Django_Projects | c95d0f8c55d4cc946291be6fb058b7298aefe596 | 99584892b9d9ec6b6395a382c684b4d036d07874 | refs/heads/main | 2023-02-23T03:44:32.110742 | 2021-01-27T15:15:13 | 2021-01-27T15:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CBVProject_3.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"keshava.cadcam@gmail.com"
] | keshava.cadcam@gmail.com |
8064c42bc3d64fe11d2c1fd47af31b2a447da64d | 02b73216f3970a981dc4bb8eea67f876edc8797f | /funcs.py | fe84dcb18d1b1a6db59299839394a4877e94586a | [] | no_license | Coul33t/LinkReader | b44eff04a8979af3884e70ccbe165ee9d8e7ae8c | 7396fc8888eec7182783f5cb08e338dbac314637 | refs/heads/master | 2020-04-19T14:49:18.764694 | 2019-02-01T00:10:20 | 2019-02-01T00:10:20 | 168,254,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | import csv
import random as rn
import pdb
from constants import VIDEO_LIST
class LinkReader:
def __init__(self):
self.links = []
def has_links(self):
return len(self.links)
def import_links(self, csv_file):
with open(csv_file, newline='') as csvfile:
contents = csv.reader(csvfile, delimiter=' ', quotechar='|')
# return a list containing [link, fav or like, image or video)]
self.links.extend([{'link': row[0].split(',')[1], 'category':row[0].split(',')[2], 'content_type':'video' if any(n in row[0].split(',')[1] for n in VIDEO_LIST) else 'image'} for row in contents])
def get_links(self, beg=0, end=50, category=None, link_only=True):
if not category:
if link_only:
return [list(link.keys())[0] for link in self.links[beg:end]]
else:
return self.links[beg:end]
def get_random_link(self, number=1, category=None, link_only=True, content_type=None):
sub_list = self.links.copy()
if category:
sub_list = [x['link'] for x in sub_list.items() if x['category'] == category]
if content_type:
sub_list = [x['link'] for x in sub_list.items() if x['content_type'] == content_type]
rn_list = rn.sample(range(len(sub_list)), number)
if link_only:
return [sub_list[i]['link'] for i in rn_list]
return [sub_list[i] for i in rn_list]
if __name__ == '__main__':
l_r = LinkReader()
l_r.import_links('favorites.csv')
l_r.import_links('likes.csv')
pdb.set_trace() | [
"Coulis1990@gmail.com"
] | Coulis1990@gmail.com |
3480b284dcaed2749f6f58fa86e06e8053cb57ff | 81fe7f2faea91785ee13cb0297ef9228d832be93 | /HackerRank/Contests/RegularExpresso2.py | cb6b298b2549f14cd035b21bfb764ca316fc44b7 | [] | no_license | blegloannec/CodeProblems | 92349c36e1a35cfc1c48206943d9c2686ea526f8 | 77fd0fa1f1a519d4d55265b9a7abf12f1bd7d19e | refs/heads/master | 2022-05-16T20:20:40.578760 | 2021-12-30T11:10:25 | 2022-04-22T08:11:07 | 54,330,243 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | #!/usr/bin/env python
import sys
import re
#Regex_Pattern = r'(te|b|a|t|r)$'
#Regex_Pattern = r'^(A{1,3})?(a{1,3}A{1,3})*(a{1,3})?$'
#Regex_Pattern = r'(.)\1\1.{,2}.{20}$|(.)\2\2.{,2}.{15}$|(.)\3\3.{,2}.{10}$|(.)\4\4.{,2}.{5}$|(.)\5\5.{,2}$|(.).{4}\6.{4}\6'
#Regex_Pattern = r'10(11|00)*$'
Regex_Pattern = r'(00|11(10(11)*00)*01)*'
#Regex_Pattern = r'(?=^.{20}$)(?!^.*\n)(?=^.*[a-z])(?=^.*[A-Z].*[A-Z])(?!^(..)*0)(?!^1)(?!^.{3}1)(?!^.{5}1)(?!^.{7}1)(?!^.{8}1)(?!^.{9}1)(?!^.{11}1)(?!^.{13}1)(?!^.{14}1)(?!^.{15}1)(?!^.{17}1)(?!^.{19}1)(?!^2)(?!^.*2$)(?!^.*345)(?!^.*354)(?!^.*435)(?!^.*453)(?!^.*534)(?!^.*543)(?!^(..)*.6)(?!^.*7.*7)(?!^.*8.*8.*8)(?!^.*9.*9.*9.*9)'
#Regex_Pattern = r'(?=^([^ab]*a[^ab]*b([^ab]*b[^ab]*a)*)*[^ab]*$|^([^ab]*b[^ab]*a([^ab]*a[^ab]*b)*)*[^ab]*$)(?=^([^cd]*c[^cd]*d([^cd]*d[^cd]*c)*)*[^cd]*$|^([^cd]*d[^cd]*c([^cd]*c[^cd]*d)*)*[^cd]*$)'
#Regex_Pattern = r'(?=.*P)(?!.*P.*P)(?=^(R(RL|UD|RT(UD|TT)*UL(LR|RL|P)*RD(UD+TT)*TL|RT(UD|TT)*UL(LR|RL|P)*LJD)*L)*$)'
print len(Regex_Pattern)
for l in sys.stdin.readlines():
print str(bool(re.search(Regex_Pattern, l))).lower()
| [
"blg@gmx.com"
] | blg@gmx.com |
a15d8a2d61f046759905ecfabae1a224bd13de50 | 527af27858f7cd937915268ea0dccb9c793bace8 | /systemdlint/systemdlint/conf/knownMandatory.py | 8dec79cd6f0a8f4ee1d98f46115daceb4ffb8000 | [
"BSD-2-Clause"
] | permissive | priv-kweihmann/systemdlint | b382438564ff3cff73655da634d10903027f26c3 | d9909d2e2d970599bb2015e2a667d4debf063384 | refs/heads/master | 2023-04-12T20:26:42.106601 | 2021-12-21T09:51:24 | 2021-12-21T09:51:24 | 182,244,386 | 21 | 2 | BSD-2-Clause | 2020-10-23T18:17:24 | 2019-04-19T10:01:24 | Python | UTF-8 | Python | false | false | 291 | py | KNOWN_MANDATORY = {
"Unit": ["Description"],
"Address": ["Address"],
"BridgeFDB": ["MACAddress"],
"Mount": ["What", "Where"],
"NetDev": ["Name"],
"L2TPSession": ["Name"],
"Peer": ["Name"],
"IPv6AddressLabel": ["Label", "Prefix"],
"NextHop": ["Gateway"]
}
| [
"kweihmann@outlook.com"
] | kweihmann@outlook.com |
b7356056640093a86575c5fbc52bb527a04ea613 | bf3c0b0b2f5eed28043caa155f001bb656ed41a5 | /softboxen/client/resources/box/credentials.py | e70600381da6f261e27f63e660f92cde040fad0f | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | etingof/softboxen | 0787a004f13d7f3d19f4cbf090e55bc229b8470a | 2a7ba85669d563de9824e3962bd48a0849482e3f | refs/heads/master | 2023-03-27T16:56:14.460474 | 2020-04-18T12:30:11 | 2020-04-18T12:30:11 | 239,087,167 | 3 | 1 | BSD-2-Clause | 2020-06-26T06:50:37 | 2020-02-08T07:29:53 | Python | UTF-8 | Python | false | false | 624 | py | #
# This file is part of softboxen software.
#
# Copyright (c) 2020, Ilya Etingof <etingof@gmail.com>
# License: https://github.com/etingof/softboxen/LICENSE.rst
#
import logging
from softboxen.client.resources import base
LOG = logging.getLogger(__name__)
class Credentials(base.Resource):
"""Represent user credentials."""
protocol = base.Field('protocol')
user = base.Field('user')
password = base.Field('password')
class CredentialsCollection(base.ResourceCollection):
"""Represent a collection of users credentials."""
@property
def _resource_type(self):
return Credentials
| [
"etingof@gmail.com"
] | etingof@gmail.com |
3be2027bbed138e20adad4a399187c7b472f9d7d | da280a226bbf15d7243410c0d3930bdca00d0088 | /ex39.py | cf0099096111e4cca29e493f9986359bfb0cb6c5 | [] | no_license | c4collins/PyTHWay | 174cae57c73431ce5bfc90a361613c5db5c846d7 | 135b4b908ef2698084ee1b3fb9f1e5550c3c8843 | refs/heads/master | 2021-01-10T18:29:43.998528 | 2012-11-03T22:53:17 | 2012-11-03T22:53:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | ten_things = "Apples Oranges Crows Telephone Light Sugar"
print "Wait there's not 10 things in that list, let's fix that."
stuff = ten_things.split(' ')
more_stuff = ["Day", "Night", "Song", "Frisbee", "Corn", "Banana", "Girl", "Boy"]
while len(stuff) != 10:
next_one = more_stuff.pop()
print "Adding: ", next_one
stuff.append(next_one)
print "There's %d items now." % len(stuff)
print "There we go: ", stuff
print "Let's do some more things with stuff."
print stuff[1]
print stuff[-1]
print stuff.pop()
print ' '.join(stuff)
print '#'.join(stuff[3:5])
| [
"connor.collins@gmail.com"
] | connor.collins@gmail.com |
3f723df5d615220111afdd7537c425100cc9e621 | e7b7505c084e2c2608cbda472bc193d4a0153248 | /LeetcodeNew/python/LC_765.py | 022d0738c948efc1eee48d7400433fe08f690773 | [] | no_license | Taoge123/OptimizedLeetcode | 8e5c1cd07904dfce1248bc3e3f960d2f48057a5d | 3e50f6a936b98ad75c47d7c1719e69163c648235 | refs/heads/master | 2023-02-27T21:13:40.450089 | 2023-02-07T04:11:09 | 2023-02-07T04:11:09 | 170,044,224 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,098 | py | """
https://leetcode.com/problems/couples-holding-hands/discuss/535314/Python-DFS-solution-with-detailed-explanation
https://leetcode.com/problems/couples-holding-hands/discuss/822501/Python-99-DFS-SCC
下面我们来看一种使用联合查找Union Find的解法。该解法对于处理群组问题时非常有效,比如岛屿数量有关的题就经常使用UF解法。核心思想是用一个root数组,每个点开始初始化为不同的值,如果两个点属于相同的组,就将其中一个点的root值赋值为另一个点的位置,这样只要是相同组里的两点,通过find函数会得到相同的值。 那么如果总共有n个数字,则共有 n/2 对儿,所以我们初始化 n/2 个群组,我们还是每次处理两个数字。每个数字除以2就是其群组号,那么属于同一组的两个数的群组号是相同的,比如2和3,其分别除以2均得到1,所以其组号均为1。那么这对解题有啥作用呢?作用忒大了,由于我们每次取的是两个数,且计算其群组号,并调用find函数,那么如果这两个数的群组号相同,那么find函数必然会返回同样的值,我们不用做什么额外动作,因为本身就是一对儿。如果两个数不是一对儿,那么其群组号必然不同,在二者没有归为一组之前,调用find函数返回的值就不同,此时我们将二者归为一组,并且cnt自减1,忘说了,cnt初始化为总群组数,即 n/2。那么最终cnt减少的个数就是交换的步数,还是用上面讲解中的例子来说明吧:
[3 1 4 0 2 5]
最开始的群组关系是:
群组0:0,1
群组1:2,3
群组2:4,5
取出前两个数字3和1,其群组号分别为1和0,带入find函数返回不同值,则此时将群组0和群组1链接起来,变成一个群组,则此时只有两个群组了,cnt自减1,变为了2。
群组0 & 1:0,1,2,3
群组2:4,5
此时取出4和0,其群组号分别为2和0,带入find函数返回不同值,则此时将群组0 & 1和群组2链接起来,变成一个超大群组,cnt自减1,变为了1
群组0 & 1 & 2:0,1,2,3,4,5
此时取出最后两个数2和5,其群组号分别为1和2,因为此时都是一个大组内的了,带入find函数返回相同的值,不做任何处理。最终交换的步数就是cnt减少值
"""
class UnionFind:
def __init__(self, n):
self.parent = [i for i in range(n)]
self.count = n // 2
def find(self, i):
if self.parent[i] == i:
return self.parent[i]
return self.find(self.parent[i])
def union(self, i, j):
x = self.find(i)
y = self.find(j)
if x != y:
self.count -= 1
self.parent[x] = y
class Solution:
def minSwapsCouples(self, row) -> int:
n = len(row)
uf = UnionFind(n)
for i in range(0, n, 2):
uf.union(row[i] // 2, row[i + 1] // 2)
return n // 2 - uf.count
"""
[3, 2, 0, 1]
1 1 0 0
row = [0, 2, 1, 3]
0 1 0 1
"""
| [
"taocheng984@gmail.com"
] | taocheng984@gmail.com |
247b93c47254775bb6f5dca6bdf3424e935aedbb | 14e2732db8f51176cc6394b4967868dd41b0ea97 | /src/inout/modelica/InitModelOMC.py | 91d3555d781d92aacbef5ab4a42e08efab7dd070 | [] | no_license | fran-jo/EngineSSE | 0c29db6db71499425738b22bb617f95e606f5b2e | 0878947aefb68f5e13d2cefd2dee2ef5e293f4d8 | refs/heads/master | 2020-03-23T21:02:40.605127 | 2018-07-23T23:11:42 | 2018-07-23T23:11:42 | 142,077,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,653 | py | '''
Created on 11 apr 2014
@author: fragom
'''
class InitModelOMC(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
def loadFile(self, _path, _model):
strcommand = []
strcommand.append('loadFile(')
strcommand.append('"')
strcommand.append(_path)
strcommand.append(_model)
strcommand.append('"')
strcommand.append(')')
command = ''.join(strcommand)
command = command.replace('\\','/')
print 'loadFile: ', command
return command
def simulate(self, _model, _simOptions, _modelParams, _isParamsFile):
strcommand= []
strcommand.append('simulate(')
strcommand.append(_model)
if (_simOptions!= ''):
strcommand.append(_simOptions)
if (_isParamsFile):
strcommand.append(',simflags="-overrideFile=')
strcommand.append(_modelParams)
strcommand.append('"')
else:
strcommand.append(',simflags="-override ')
strcommand.append(_modelParams)
strcommand.append('"')
strcommand.append(')')
command = ''.join(strcommand)
command= command.replace('\\','/')
print 'simulate: ', command
return command
def plot(self, _simOutputs):
strcommand= []
strcommand.append('plot({')
for value in _simOutputs:
strcommand.append(value)
strcommand.append(',')
strcommand= strcommand[:-1]
strcommand.append('})')
command = ''.join(strcommand)
return command | [
"fran_jo@hotmail.com"
] | fran_jo@hotmail.com |
d333c66003907d386f6eee513acacbc200c7de8f | 70e047d748d503362cabc0f3ba50f3e103110ff4 | /element/migrations/0002_element_kode_element.py | 0de0334f2d46aeeaf317a647b8acefa1bc8f682a | [] | no_license | gofur/AHS | 621e7982df7c4fbd150e9427b7b408e122a38b07 | b59cba9d29e4ef5e20bf2091a646cd7ec79c3c6f | refs/heads/master | 2021-01-10T10:47:20.159642 | 2016-02-06T12:26:59 | 2016-02-06T12:26:59 | 49,955,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-20 23:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('element', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='element',
name='kode_element',
field=models.CharField(default=1, max_length=8, unique=True),
preserve_default=False,
),
]
| [
"you@example.com"
] | you@example.com |
7cfd21c143ab586aefa2bbe9b4d2f2e0ffe3b867 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /FWh2fGH7aRWALMf3o_12.py | 3ed8ee73f8657d5d9d71f3bacedbcaa8f51841d0 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,341 | py | """
Create a function that takes a string (without spaces) and a word list,
cleaves the string into words based on the list, and returns the correctly
spaced version of the string (a sentence). If a section of the string is
encountered that can't be found on the word list, return `"Cleaving stalled:
Word not found"`.
### Examples
word_list = ["about", "be", "hell", "if", "is", "it", "me", "other", "outer", "people", "the", "to", "up", "where"]
cleave("ifitistobeitisuptome", word_list) ➞ "if it is to be it is up to me"
cleave("hellisotherpeople", word_list) ➞ "hell is other people"
cleave("hellisotterpeople", word_list) ➞ "Cleaving stalled: Word not found"
### Notes
Words on the `word_list` can appear more than once in the string. The
`word_list` is a reference guide, kind of like a dictionary that lists which
words are allowed.
"""
def cleave(string, lst, rec_call=False):
possible_words = []
for ref_word in lst:
if string[:min(len(string), len(ref_word))] == ref_word:
if(len(string) == len(ref_word)):
return string
possible_words.append(ref_word)
for word in possible_words:
result = cleave(string[len(word):], lst, True)
if result is not None:
return word + " " + result
return None if rec_call else "Cleaving stalled: Word not found"
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
fa48c0659171b8c9d8df62405784ad41278d721c | 70d4ef0863906b3ca64f986075cd35b8412b871e | /pipeline/contrib/statistics/migrations/0010_auto_20190304_1747.py | 16379222282ca52e23b319f90809bcf33b6776f6 | [
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | selinagyan/bk-sops | 72db0ac33d9c307f51769e4baa181ceb8e1b279e | 39e63e66416f688e6a3641ea8e975d414ece6b04 | refs/heads/master | 2020-05-07T16:44:33.312442 | 2019-04-11T02:09:25 | 2019-04-11T02:09:25 | 180,696,241 | 0 | 0 | null | 2019-04-11T02:07:11 | 2019-04-11T02:07:10 | null | UTF-8 | Python | false | false | 2,783 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
# Generated by Django 1.11.11 on 2019-03-04 09:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('statistics', '0009_auto_20181116_1627'),
]
operations = [
migrations.AlterModelOptions(
name='componentexecutedata',
options={'ordering': ['-id'], 'verbose_name': 'Pipeline\u6807\u51c6\u63d2\u4ef6\u6267\u884c\u6570\u636e', 'verbose_name_plural': 'Pipeline\u6807\u51c6\u63d2\u4ef6\u6267\u884c\u6570\u636e'},
),
migrations.AlterModelOptions(
name='componentintemplate',
options={'verbose_name': 'Pipeline\u6807\u51c6\u63d2\u4ef6\u88ab\u5f15\u7528\u6570\u636e', 'verbose_name_plural': 'Pipeline\u6807\u51c6\u63d2\u4ef6\u88ab\u5f15\u7528\u6570\u636e'},
),
migrations.AlterField(
model_name='componentexecutedata',
name='archived_time',
field=models.DateTimeField(blank=True, null=True, verbose_name='\u6807\u51c6\u63d2\u4ef6\u6267\u884c\u7ed3\u675f\u65f6\u95f4'),
),
migrations.AlterField(
model_name='componentexecutedata',
name='elapsed_time',
field=models.IntegerField(blank=True, null=True, verbose_name='\u6807\u51c6\u63d2\u4ef6\u6267\u884c\u8017\u65f6(s)'),
),
migrations.AlterField(
model_name='componentexecutedata',
name='started_time',
field=models.DateTimeField(verbose_name='\u6807\u51c6\u63d2\u4ef6\u6267\u884c\u5f00\u59cb\u65f6\u95f4'),
),
migrations.AlterField(
model_name='instanceinpipeline',
name='atom_total',
field=models.IntegerField(verbose_name='\u6807\u51c6\u63d2\u4ef6\u603b\u6570'),
),
migrations.AlterField(
model_name='templateinpipeline',
name='atom_total',
field=models.IntegerField(verbose_name='\u6807\u51c6\u63d2\u4ef6\u603b\u6570'),
),
]
| [
"pagezhou@tencent.com"
] | pagezhou@tencent.com |
fa284fc48436ccf4f4208b3acd8ddd6f678c9cb3 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/surface/ml/speech/recognize.py | c88df1addbeabd4903e6f394417536ac17cee4ab | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 4,369 | py | # -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recognize speech in provided audio."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml.speech import flags
from googlecloudsdk.command_lib.ml.speech import util
@base.ReleaseTracks(base.ReleaseTrack.GA)
class RecognizeGA(base.Command):
"""Get transcripts of short (less than 60 seconds) audio from an audio file."""
detailed_help = {
'DESCRIPTION':
"""\
Get a transcript of an audio file that is less than 60 seconds. You can use
an audio file that is on your local drive or a Google Cloud Storage URL.
If the audio is longer than 60 seconds, you will get an error. Please use
`{parent_command} recognize-long-running` instead.
""",
'EXAMPLES':
"""\
To get a transcript of an audio file 'my-recording.wav':
$ {command} 'my-recording.wav' --language-code=en-US
To get a transcript of an audio file in bucket 'gs://bucket/myaudio' with a
custom sampling rate and encoding that uses hints and filters profanity:
$ {command} 'gs://bucket/myaudio' \\
--language-code=es-ES --sample-rate=2200 --hints=Bueno \\
--encoding=OGG_OPUS --filter-profanity
""",
'API REFERENCE':
"""\
This command uses the speech/v1 API. The full documentation for this API
can be found at: https://cloud.google.com/speech-to-text/docs/quickstart-protocol
"""
}
API_VERSION = 'v1'
flags_mapper = flags.RecognizeArgsToRequestMapper()
@classmethod
def Args(cls, parser):
parser.display_info.AddFormat('json')
cls.flags_mapper.AddRecognizeArgsToParser(parser, cls.API_VERSION)
def MakeRequest(self, args, messages):
return messages.RecognizeRequest(
audio=util.GetRecognitionAudioFromPath(args.audio, self.API_VERSION),
config=self.flags_mapper.MakeRecognitionConfig(args, messages))
def Run(self, args):
"""Run 'ml speech recognize'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
Nothing.
"""
client = apis.GetClientInstance(util.SPEECH_API, self.API_VERSION)
self._request = self.MakeRequest(args, client.MESSAGES_MODULE)
return client.speech.Recognize(self._request)
def Epilog(self, unused_resources_were_displayed):
util.MaybePrintSttUiLink(self._request)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class RecognizeBeta(RecognizeGA):
__doc__ = RecognizeGA.__doc__
detailed_help = RecognizeGA.detailed_help.copy()
API_VERSION = 'v1p1beta1'
@classmethod
def Args(cls, parser):
super(RecognizeBeta, RecognizeBeta).Args(parser)
cls.flags_mapper.AddBetaRecognizeArgsToParser(parser)
def MakeRequest(self, args, messages):
request = super(RecognizeBeta, self).MakeRequest(args, messages)
self.flags_mapper.UpdateBetaArgsInRecognitionConfig(args, request.config)
return request
RecognizeBeta.detailed_help['API REFERENCE'] = """\
This command uses the speech/v1p1beta1 API. The full documentation for this API
can be found at: https://cloud.google.com/speech-to-text/docs/quickstart-protocol
"""
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class RecognizeAlpha(RecognizeBeta):
__doc__ = RecognizeBeta.__doc__
API_VERSION = 'v1p1beta1'
@classmethod
def Args(cls, parser):
super(RecognizeAlpha, RecognizeAlpha).Args(parser)
cls.flags_mapper.AddAlphaRecognizeArgsToParser(parser, cls.API_VERSION)
def MakeRequest(self, args, messages):
request = super(RecognizeAlpha, self).MakeRequest(args, messages)
self.flags_mapper.UpdateAlphaArgsInRecognitionConfig(args, request.config)
return request
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
57f7e7a98efc2ba19453ca164616ca915aa1d1b1 | f66a33f8cdd8286320da730be67c89ee00d83d8d | /ext/libelf/SConscript | 535e216ddf1152e87d570cb4615bde289b7d97d3 | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] | permissive | H2020-COSSIM/cgem5 | 0d5812632757e6146f7852c9bf4abe4e9628296a | 1222cc0c5618875e048f288e998187c236508a64 | refs/heads/main | 2023-05-13T14:08:01.665322 | 2023-05-08T08:39:50 | 2023-05-08T08:39:50 | 468,039,890 | 3 | 2 | BSD-3-Clause | 2022-10-12T14:29:33 | 2022-03-09T18:05:40 | C++ | UTF-8 | Python | false | false | 5,084 | # -*- mode:python -*-
# Copyright (c) 2004-2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import os, subprocess
Import('env')
elf_files = []
def ElfFile(filename):
elf_files.append(File(filename))
ElfFile('elf.c')
ElfFile('elf_begin.c')
ElfFile('elf_cntl.c')
ElfFile('elf_data.c')
ElfFile('elf_end.c')
ElfFile('elf_errmsg.c')
ElfFile('elf_errno.c')
ElfFile('elf_fill.c')
ElfFile('elf_flag.c')
ElfFile('elf_getarhdr.c')
ElfFile('elf_getarsym.c')
ElfFile('elf_getbase.c')
ElfFile('elf_getident.c')
ElfFile('elf_hash.c')
ElfFile('elf_kind.c')
ElfFile('elf_memory.c')
ElfFile('elf_next.c')
ElfFile('elf_open.c')
ElfFile('elf_phnum.c')
ElfFile('elf_rand.c')
ElfFile('elf_rawfile.c')
ElfFile('elf_scn.c')
ElfFile('elf_shnum.c')
ElfFile('elf_shstrndx.c')
ElfFile('elf_strptr.c')
ElfFile('elf_update.c')
ElfFile('elf_version.c')
ElfFile('gelf_cap.c')
ElfFile('gelf_checksum.c')
ElfFile('gelf_dyn.c')
ElfFile('gelf_ehdr.c')
ElfFile('gelf_fsize.c')
ElfFile('gelf_getclass.c')
ElfFile('gelf_move.c')
ElfFile('gelf_phdr.c')
ElfFile('gelf_rel.c')
ElfFile('gelf_rela.c')
ElfFile('gelf_shdr.c')
ElfFile('gelf_sym.c')
ElfFile('gelf_syminfo.c')
ElfFile('gelf_symshndx.c')
ElfFile('gelf_xlate.c')
ElfFile('libelf.c')
ElfFile('libelf_align.c')
ElfFile('libelf_allocate.c')
ElfFile('libelf_ar.c')
ElfFile('libelf_ar_util.c')
ElfFile('libelf_checksum.c')
ElfFile('libelf_data.c')
ElfFile('libelf_ehdr.c')
ElfFile('libelf_extended.c')
ElfFile('libelf_memory.c')
ElfFile('libelf_open.c')
ElfFile('libelf_phdr.c')
ElfFile('libelf_shdr.c')
ElfFile('libelf_xlate.c')
ElfFile('libelf_convert.c')
ElfFile('libelf_fsize.c')
ElfFile('libelf_msize.c')
m4env = env.Clone()
if m4env['GCC']:
m4env.Append(CCFLAGS=['-Wno-pointer-sign',
'-Wno-unused-but-set-variable',
'-Wno-implicit-function-declaration',
'-Wno-override-init'])
if m4env['CLANG']:
m4env.Append(CCFLAGS=['-Wno-initializer-overrides', '-Wno-pointer-sign'])
# clang defaults to c99 (while gcc defaults to gnu89) and there is a
# difference in the handling of inlining functions which causes
# linking problems with multiple definitions of the symbols in
# sysmacros.h for older versions of glibc
m4env.Append(CCFLAGS=['-std=gnu89'])
m4env.Append(CCFLAGS=['-Wno-implicit', '-Wno-undef'])
del m4env['CPPPATH']
# If we have gm4 use it
if m4env.Detect('gm4'):
m4env['M4'] = 'gm4'
# Check that m4 is available
import SCons.Tool.m4
if not SCons.Tool.m4.exists(m4env):
print("Error: Can't find version of M4 macro processor. " +
"Please install M4 and try again.")
Exit(1)
# Setup m4 tool
m4env.Tool('m4')
m4env.Append(M4FLAGS=['-DSRCDIR=%s' % Dir('.').path])
m4env['M4COM'] = '$M4 $M4FLAGS $SOURCES > $TARGET'
m4env.M4(target=File('libelf_convert.c'),
source=[File('elf_types.m4'), File('libelf_convert.m4')])
m4env.M4(target=File('libelf_fsize.c'),
source=[File('elf_types.m4'), File('libelf_fsize.m4')])
m4env.M4(target=File('libelf_msize.c'),
source=[File('elf_types.m4'), File('libelf_msize.m4')])
m4env.Append(CPPPATH=Dir('.'))
# Build libelf as a static library with PIC code so it can be linked
# into either m5 or the library
m4env.Library('elf', [m4env.SharedObject(f) for f in elf_files])
# Generate the native-elf-format header file based on the build system
m4env.Command(File('native-elf-format.h'), File('native-elf-format'),
'${SOURCE} > ${TARGET}')
env.Prepend(CPPPATH=Dir('.'))
env.Append(LIBS=[File('libelf.a')])
env.Prepend(LIBPATH=[Dir('.')])
| [
"ntampouratzis@isc.tuc.gr"
] | ntampouratzis@isc.tuc.gr | |
f091fda178c17b28a06ec0aab0bf657492ab6016 | 0556754cd4765d05a1d831c48933c5f299bb095d | /Dec-18-2020/ThreadByExtending.py | 12369dbbe3bf5dd33bbdee68914a580058d7eefa | [] | no_license | rohitbhatghare/python | 4fa5e5883743023ced841892a13a9798b7686f39 | 248d265e02ecbc1270a87081af26537eb401e535 | refs/heads/main | 2023-02-03T04:32:15.716805 | 2020-12-21T11:33:27 | 2020-12-21T11:33:27 | 302,831,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | from threading import *
class Mythread(Thread):
def run(self):
for i in range(10):
print("child class-1")
t = Mythread()
t.start()
for i in range(10):
print("main thread-1")
| [
"noreply@github.com"
] | rohitbhatghare.noreply@github.com |
2e78bcd54c647bec3744feb1502e4fef91bc5733 | 84de9423c003e22631a549dd767f7f88006f73d5 | /tests/tools/profile/runtest.py | 3c664934c55eae96aa200b0add25463fb790bb5a | [
"Apache-2.0"
] | permissive | Go0zx/NyuziProcessor | 7c23cdad06cff0e0d6f77264e54b1fa826231e91 | 35264d91dafbf0455e551e3e1f3cd1a0f429c991 | refs/heads/master | 2020-08-07T20:51:04.594123 | 2019-10-05T13:07:29 | 2019-10-05T15:27:20 | 213,583,543 | 1 | 0 | Apache-2.0 | 2019-10-08T08:04:20 | 2019-10-08T08:04:19 | null | UTF-8 | Python | false | false | 2,386 | py | #!/usr/bin/env python3
#
# Copyright 2017 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test profiling capabilities of hardware simulator."""
import os
import subprocess
import sys
sys.path.insert(0, '../..')
import test_harness
@test_harness.test(['verilator'])
def profile(*unused):
hexfile = test_harness.build_program(['test_program.c'])
elffile = test_harness.get_elf_file_for_hex(hexfile)
profile_file = os.path.join(test_harness.WORK_DIR, 'profile.out')
test_harness.run_program(hexfile, 'verilator', profile_file=profile_file)
symbol_file = os.path.join(test_harness.WORK_DIR, 'symbols.txt')
objdump_args = [
os.path.join(test_harness.COMPILER_BIN_DIR, 'llvm-objdump'),
'-t', elffile
]
symbols = subprocess.check_output(objdump_args)
with open(symbol_file, 'w') as f:
f.write(symbols.decode())
profile_args = [
os.path.join(test_harness.TOOL_BIN_DIR, 'profile.py'),
symbol_file,
profile_file
]
profile_output = subprocess.check_output(' '.join(profile_args), shell=True)
profile_lines = profile_output.decode().split('\n')
profile_tuples = [line.split() for line in profile_lines if line]
profile_map = {func: int(count) for count, _, func in profile_tuples}
# These tests don't end up being exactly 2x the number of samples. Because
# the system samples randomly, it can vary. I could have ran the test longer
# to get more samples, but this is really just a smoke test and I didn't want
# to bloat the test time unnecessarily.
loop5k = profile_map['loop5000']
loop10k = profile_map['loop10000']
loop20k = profile_map['loop20000']
test_harness.assert_greater(loop5k, 0)
test_harness.assert_greater(loop10k, loop5k * 1.75)
test_harness.assert_greater(loop20k, loop10k * 1.75)
test_harness.execute_tests()
| [
"jeffbush001@gmail.com"
] | jeffbush001@gmail.com |
88096a5c9910e54d7fb1b3d7865008e8ba9acb34 | 696799b824503429a3ac65ebdc28890bfbcaebe0 | /plugins/com.astra.ses.spell.gui.cots_4.0.2.201806070922/win32/spell/spell/lib/dummy/config.py | bde2e0f7b4c61e25d9d920099ca1f2b5a02f09a6 | [] | no_license | CalypsoCubesat/SPELL_GUI_4.0.2_win32_x86 | a176886b48873b090ab270c189113a8b2c261a06 | 9275ecfff2195ca4d4c297f894d80c1bcfa609e3 | refs/heads/master | 2021-08-03T08:04:25.821703 | 2019-10-28T04:53:50 | 2019-10-28T04:53:50 | 217,968,357 | 0 | 0 | null | 2021-08-02T17:03:44 | 2019-10-28T04:50:59 | Python | UTF-8 | Python | false | false | 3,382 | py | ###############################################################################
"""
(c) SES-ASTRA 2008
PACKAGE
spell.lib.adapter.config
FILE
config.py
DESCRIPTION
Setup environment for correct core driver instantiation
COPYRIGHT
This software is the copyrighted work of SES ASTRA S.A.
All rights reserved.
PROJECT
UGCS/USL
AUTHOR
Rafael Chinchilla Camara (GMV)
DATE
01/10/2007
"""
###############################################################################
#*******************************************************************************
# SPELL Imports
#*******************************************************************************
from spell.utils.log import *
from spell.config.reader import *
from spell.config.constants import COMMON
from spell.lib.registry import REGISTRY
from spell.lib.exception import DriverException
#*******************************************************************************
# Local Imports
#*******************************************************************************
from interface.model import SimulatorModel
#*******************************************************************************
# System Imports
#*******************************************************************************
import os
###############################################################################
# Module import definition
__all__ = ['CONFIG']
INTERFACE_DEFAULTS = {}
###############################################################################
# Superclass
import spell.lib.adapter.config
superClass = spell.lib.adapter.config.ConfigInterface
###############################################################################
class ConfigInterface(superClass):
#==========================================================================
def __init__(self):
superClass.__init__(self)
LOG("Created")
#==========================================================================
def setup(self, contextName):
superClass.setup(self, contextName)
LOG("Setup standalone CFG interface")
dataPath = Config.instance().getRuntimeDir()
driver = Config.instance().getContextConfig(contextName).getDriver()
driverInfo = Config.instance().getDriverConfig(driver)
simulationPath = driverInfo['SimPath']
simulationFile = Config.instance().getContextConfig(contextName).getDriverConfig('Simulation')
home = Config.instance().getHome()
if home is None:
raise DriverException("SPELL home is not defined")
LOG("Loading simulation: " + simulationFile)
simulationFile = dataPath + os.sep + simulationPath + \
os.sep + simulationFile
SIM = SimulatorModel()
SIM.tmClass = REGISTRY['TM']
SIM.tcClass = REGISTRY['TC']
SIM.setup( simulationFile )
REGISTRY['SIM'] = SIM
#==========================================================================
def cleanup(self, shutdown = False):
superClass.cleanup(self, shutdown)
LOG("Cleanup standalone CFG interface")
REGISTRY['SIM'].cleanup()
REGISTRY.remove('SIM')
###############################################################################
# Interface instance
CONFIG = ConfigInterface()
| [
"matthew.travis@aresinstitute.org"
] | matthew.travis@aresinstitute.org |
09431e04feee5173a0495644128709b76257ecd9 | 9eec7de3670cb6a53dd2e1ac16891eed45cc796e | /xhose/urls.py | 1405be00d3dccdc6388acd60de4e74ae99adcfac | [] | no_license | rudiq4/landing-page | e7c891c9d4220b74cb88b9e86e341f0faf05627d | f4ab2eb4d80620b6fbaec3d04c79c23949375bd4 | refs/heads/master | 2020-03-24T20:56:12.380552 | 2018-07-31T11:11:35 | 2018-07-31T11:11:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from django.conf.urls import url
from xhose import views
app_name = 'xhose'
urlpatterns = [
url(r'^xhose/', views.xhose, name='xhose'),
# url(r'^contacts/$', views.contacts, name='contacts'),
] | [
"rudikvovan@gmail.com"
] | rudikvovan@gmail.com |
62476c413ee5a0c5b0c4c85e0d4bb4c3eec16108 | 9f86b4a4e31affb497dcc500ea45a57589f2f533 | /detectron2/layers/nms.py | e29435e77b9edca7f01da5e4627352c81bd2920a | [] | permissive | ishann/detectron2 | a68ae39960dc2594dd971908176074ac5af8b1ba | a52fcd7f6d5ebd334508d59823dbda9f81f2cd0e | refs/heads/master | 2020-08-10T05:58:21.115916 | 2019-12-09T01:16:00 | 2019-12-09T01:16:00 | 214,275,928 | 0 | 0 | Apache-2.0 | 2019-10-10T20:08:02 | 2019-10-10T20:08:02 | null | UTF-8 | Python | false | false | 6,572 | py | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torchvision.ops import boxes as box_ops
from torchvision.ops import nms # BC-compat
def batched_nms(boxes, scores, idxs, iou_threshold):
"""
Same as torchvision.ops.boxes.batched_nms, but safer.
"""
assert boxes.shape[-1] == 4
# TODO may need better strategy.
# Investigate after having a fully-cuda NMS op.
if len(boxes) < 40000:
return box_ops.batched_nms(boxes, scores, idxs, iou_threshold)
result_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
for id in torch.unique(idxs).cpu().tolist():
mask = (idxs == id).nonzero().view(-1)
keep = nms(boxes[mask], scores[mask], iou_threshold)
result_mask[mask[keep]] = True
keep = result_mask.nonzero().view(-1)
keep = keep[scores[keep].argsort(descending=True)]
return keep
# Note: this function (nms_rotated) might be moved into
# torchvision/ops/boxes.py in the future
def nms_rotated(boxes, scores, iou_threshold):
"""
Performs non-maximum suppression (NMS) on the rotated boxes according
to their intersection-over-union (IoU).
Rotated NMS iteratively removes lower scoring rotated boxes which have an
IoU greater than iou_threshold with another (higher scoring) rotated box.
Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as
RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they
can be representing completely different objects in certain tasks, e.g., OCR.
As for the question of whether rotated-NMS should treat them as faraway boxes
even though their IOU is 1, it depends on the application and/or ground truth annotation.
As an extreme example, consider a single character v and the square box around it.
If the angle is 0 degree, the object (text) would be read as 'v';
If the angle is 90 degrees, the object (text) would become '>';
If the angle is 180 degrees, the object (text) would become '^';
If the angle is 270/-90 degrees, the object (text) would become '<'
All of these cases have IoU of 1 to each other, and rotated NMS that only
uses IoU as criterion would only keep one of them with the highest score -
which, practically, still makes sense in most cases because typically
only one of theses orientations is the correct one. Also, it does not matter
as much if the box is only used to classify the object (instead of transcribing
them with a sequential OCR recognition model) later.
On the other hand, when we use IoU to filter proposals that are close to the
ground truth during training, we should definitely take the angle into account if
we know the ground truth is labeled with the strictly correct orientation (as in,
upside-down words are annotated with -180 degrees even though they can be covered
with a 0/90/-90 degree box, etc.)
The way the original dataset is annotated also matters. For example, if the dataset
is a 4-point polygon dataset that does not enforce ordering of vertices/orientation,
we can estimate a minimum rotated bounding box to this polygon, but there's no way
we can tell the correct angle with 100% confidence (as shown above, there could be 4 different
rotated boxes, with angles differed by 90 degrees to each other, covering the exactly
same region). In that case we have to just use IoU to determine the box
proximity (as many detection benchmarks (even for text) do) unless there're other
assumptions we can make (like width is always larger than height, or the object is not
rotated by more than 90 degrees CCW/CW, etc.)
In summary, not considering angles in rotated NMS seems to be a good option for now,
but we should be aware of its implications.
Args:
boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in
(x_center, y_center, width, height, angle_degrees) format.
scores (Tensor[N]): Scores for each one of the rotated boxes
iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold
Returns:
keep (Tensor): int64 tensor with the indices of the elements that have been kept
by Rotated NMS, sorted in decreasing order of scores
"""
from detectron2 import _C
return _C.nms_rotated(boxes, scores, iou_threshold)
# Note: this function (batched_nms_rotated) might be moved into
# torchvision/ops/boxes.py in the future
def batched_nms_rotated(boxes, scores, idxs, iou_threshold):
"""
Performs non-maximum suppression in a batched fashion.
Each index value correspond to a category, and NMS
will not be applied between elements of different categories.
Args:
boxes (Tensor[N, 5]):
boxes where NMS will be performed. They
are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format
scores (Tensor[N]):
scores for each one of the boxes
idxs (Tensor[N]):
indices of the categories for each one of the boxes.
iou_threshold (float):
discards all overlapping boxes
with IoU < iou_threshold
Returns:
Tensor:
int64 tensor with the indices of the elements that have been kept
by NMS, sorted in decreasing order of scores
"""
assert boxes.shape[-1] == 5
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=boxes.device)
# Strategy: in order to perform NMS independently per class,
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
# Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate,
# which won't handle negative coordinates correctly.
# Here by using min_coordinate we can make sure the negative coordinates are
# correctly handled.
max_coordinate = (
torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2
).max()
min_coordinate = (
torch.min(boxes[:, 0], boxes[:, 1]) - torch.min(boxes[:, 2], boxes[:, 3]) / 2
).min()
offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1)
boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes
boxes_for_nms[:, :2] += offsets[:, None]
keep = nms_rotated(boxes_for_nms, scores, iou_threshold)
return keep
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
dc6e21c252793fe6e0b0f0a7c9640fbeb83c8bac | 0cb6eb8a9dc9bdd4e3552040848cecec19d25798 | /FaceRecognition/settings.py | 2135266da38e065cd8395bcfab3e9c68efad056e | [] | no_license | mayank2498/Face_Detection | 9c89e40a0b5359466c2c49e89a2fb6204ad0be20 | fdcb98923d924312038356706c3830fe13bed3da | refs/heads/master | 2021-08-31T13:09:52.876858 | 2017-12-21T11:32:33 | 2017-12-21T11:32:33 | 114,999,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,182 | py | """
Django settings for FaceRecognition project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ry4l!_2@+ha^qlfasvjp96g19-vgj$*j542rwcv1)#b!rrgrna'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'recognise'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'FaceRecognition.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'FaceRecognition.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
| [
"mayankchaurasia.bsp@gmail.com"
] | mayankchaurasia.bsp@gmail.com |
551a6c050f045f8fac9fa97a7f453cecde40f95d | 559fe08f79c297783c404caf7eccee2a269932d4 | /etl/parsers/etw/Intel_Thunderbolt_App.py | 93acf723184612ce60fab955eed5a1b08887f46f | [
"Apache-2.0"
] | permissive | killvxk/etl-parser | 9ba70f54120887f56950054f2cde6dc6c18e0973 | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | refs/heads/master | 2022-11-23T03:35:47.127241 | 2020-07-23T08:55:50 | 2020-07-23T08:55:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,292 | py | # -*- coding: utf-8 -*-
"""
Intel-Thunderbolt-App
GUID : 8ef15e41-05bf-5bcd-4aa8-4f0559564dc0
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=0, version=0)
class Intel_Thunderbolt_App_0_0(Etw):
pattern = Struct(
"message" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=1, version=0)
class Intel_Thunderbolt_App_1_0(Etw):
pattern = Struct(
"message" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=2, version=0)
class Intel_Thunderbolt_App_2_0(Etw):
pattern = Struct(
"message" / WString,
"stackTrace" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=3, version=0)
class Intel_Thunderbolt_App_3_0(Etw):
pattern = Struct(
"message" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=4, version=0)
class Intel_Thunderbolt_App_4_0(Etw):
pattern = Struct(
"message" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=5, version=0)
class Intel_Thunderbolt_App_5_0(Etw):
pattern = Struct(
"eventName" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=6, version=0)
class Intel_Thunderbolt_App_6_0(Etw):
pattern = Struct(
"obj" / WString,
"method" / WString,
"inparams" / WString,
"stackFrame" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=7, version=0)
class Intel_Thunderbolt_App_7_0(Etw):
pattern = Struct(
"message" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=8, version=0)
class Intel_Thunderbolt_App_8_0(Etw):
pattern = Struct(
"info" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=9, version=0)
class Intel_Thunderbolt_App_9_0(Etw):
pattern = Struct(
"action" / WString,
"message" / WString
)
| [
"citronneur@gmail.com"
] | citronneur@gmail.com |
9e94d260f3ad0d89442a785c6b2ebbacb48b40ab | 363302c0dce6f72290f19be2bb0728d7b0bdb02d | /top_like_tags/migrations/0004_fixed_hashtag_sub_title.py | 911da63f8ab79c32a12580e338a476a9d5f8420c | [] | no_license | linker10/totag | 87e242734c9b586e66f76f4d99740bb1f175117c | 274e70174387d9ad55ab27e16816dd54f6e71699 | refs/heads/master | 2022-12-06T13:56:23.641080 | 2020-09-04T15:49:24 | 2020-09-04T15:49:24 | 292,886,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # Generated by Django 3.0.6 on 2020-05-31 09:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('top_like_tags', '0003_fixed_hashtag'),
]
operations = [
migrations.AddField(
model_name='fixed_hashtag',
name='sub_title',
field=models.CharField(default='hello', max_length=200),
preserve_default=False,
),
]
| [
"bilalsharif4@gmail.com"
] | bilalsharif4@gmail.com |
e4850aa7a18ba29d395c5eec5ec94ea8a09818a8 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_transmigrated.py | 297c6c5aac3496c8827de39a106d134d7b6404d3 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py |
#calss header
class _TRANSMIGRATED():
def __init__(self,):
self.name = "TRANSMIGRATED"
self.definitions = transmigrate
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['transmigrate']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
ce5db88c9ac44022408cf0d61d9a3509c38b239c | 2be679906bfd8481fde463b0ceaf7d0d8a9c4775 | /tests/test_session.py | 79c986120cdf916f06ad273063bb1440caa118fa | [
"MIT"
] | permissive | dayjaby/libtmux | 73b8b62e47ca18043dda55ac6cb0c7d1a389a060 | 47a8aeb7fa6919aae7e6c0303e7aece5e7679b21 | refs/heads/master | 2020-07-13T16:32:07.385953 | 2019-08-29T11:46:48 | 2019-08-29T11:46:48 | 205,115,334 | 0 | 0 | MIT | 2019-08-29T08:19:44 | 2019-08-29T08:19:44 | null | UTF-8 | Python | false | false | 7,807 | py | # -*- coding: utf-8 -*-
"""Test for tmuxp Session object."""
from __future__ import absolute_import, unicode_literals, with_statement
import logging
import pytest
from libtmux import Pane, Session, Window, exc
from libtmux.common import has_gte_version
from libtmux.test import TEST_SESSION_PREFIX, namer
logger = logging.getLogger(__name__)
def test_has_session(server, session):
"""Server.has_session returns True if has session_name exists."""
TEST_SESSION_NAME = session.get('session_name')
assert server.has_session(TEST_SESSION_NAME)
if has_gte_version('2.1'):
assert not server.has_session(TEST_SESSION_NAME[:-2])
assert server.has_session(TEST_SESSION_NAME[:-2], exact=False)
assert not server.has_session('asdf2314324321')
def test_select_window(session):
"""Session.select_window moves window."""
# get the current window_base_index, since different user tmux config
# may start at 0 or 1, or whatever they want.
window_base_index = int(session.attached_window.get('window_index'))
session.new_window(window_name='test_window')
window_count = len(session._windows)
assert window_count >= 2 # 2 or more windows
assert len(session._windows) == window_count
# tmux selects a window, moves to it, shows it as attached_window
selected_window1 = session.select_window(window_base_index)
assert isinstance(selected_window1, Window)
attached_window1 = session.attached_window
assert selected_window1 == attached_window1
assert selected_window1.__dict__ == attached_window1.__dict__
# again: tmux selects a window, moves to it, shows it as
# attached_window
selected_window2 = session.select_window(window_base_index + 1)
assert isinstance(selected_window2, Window)
attached_window2 = session.attached_window
assert selected_window2 == attached_window2
assert selected_window2.__dict__ == attached_window2.__dict__
# assure these windows were really different
assert selected_window1 != selected_window2
assert selected_window1.__dict__ != selected_window2.__dict__
def test_select_window_returns_Window(session):
"""Session.select_window returns Window object."""
window_count = len(session._windows)
assert len(session._windows) == window_count
window_base_index = int(session.attached_window.get('window_index'))
assert isinstance(session.select_window(window_base_index), Window)
def test_attached_window(session):
"""Session.attached_window returns Window."""
assert isinstance(session.attached_window, Window)
def test_attached_pane(session):
"""Session.attached_pane returns Pane."""
assert isinstance(session.attached_pane, Pane)
def test_session_rename(session):
"""Session.rename_session renames session."""
TEST_SESSION_NAME = session.get('session_name')
test_name = 'testingdis_sessname'
session.rename_session(test_name)
assert session.get('session_name') == test_name
session.rename_session(TEST_SESSION_NAME)
assert session.get('session_name') == TEST_SESSION_NAME
def test_new_session(server):
"""Server.new_session creates new session."""
new_session_name = TEST_SESSION_PREFIX + next(namer)
new_session = server.new_session(session_name=new_session_name, detach=True)
assert isinstance(new_session, Session)
assert new_session.get('session_name') == new_session_name
def test_show_options(session):
"""Session.show_options() returns dict."""
options = session.show_options()
assert isinstance(options, dict)
def test_set_show_options_single(session):
"""Set option then Session.show_options(key)."""
session.set_option('history-limit', 20)
assert session.show_options('history-limit') == 20
session.set_option('history-limit', 40)
assert session.show_options('history-limit') == 40
assert session.show_options()['history-limit'] == 40
def test_set_show_option(session):
"""Set option then Session.show_option(key)."""
session.set_option('history-limit', 20)
assert session.show_option('history-limit') == 20
session.set_option('history-limit', 40)
assert session.show_option('history-limit') == 40
def test_empty_session_option_returns_None(session):
assert session.show_option('default-shell') is None
def test_show_option_unknown(session):
"""Session.show_option raises UnknownOption for invalid option."""
with pytest.raises(exc.UnknownOption):
session.show_option('moooz')
def test_show_option_ambiguous(session):
"""Session.show_option raises AmbiguousOption for ambiguous option."""
with pytest.raises(exc.AmbiguousOption):
session.show_option('default-')
def test_set_option_ambigous(session):
"""Session.set_option raises AmbiguousOption for invalid option."""
with pytest.raises(exc.AmbiguousOption):
session.set_option('default-', 43)
def test_set_option_invalid(session):
"""Session.set_option raises UnknownOption for invalid option."""
if has_gte_version('2.4'):
with pytest.raises(exc.InvalidOption):
session.set_option('afewewfew', 43)
else:
with pytest.raises(exc.UnknownOption):
session.set_option('afewewfew', 43)
def test_show_environment(session):
"""Session.show_environment() returns dict."""
_vars = session.show_environment()
assert isinstance(_vars, dict)
def test_set_show_environment_single(session):
"""Set environment then Session.show_environment(key)."""
session.set_environment('FOO', 'BAR')
assert session.show_environment('FOO') == 'BAR'
session.set_environment('FOO', 'DAR')
assert session.show_environment('FOO') == 'DAR'
assert session.show_environment()['FOO'] == 'DAR'
def test_show_environment_not_set(session):
"""Not set environment variable returns None."""
assert session.show_environment('BAR') is None
def test_remove_environment(session):
"""Remove environment variable."""
assert session.show_environment('BAM') is None
session.set_environment('BAM', 'OK')
assert session.show_environment('BAM') == 'OK'
session.remove_environment('BAM')
assert session.show_environment('BAM') is None
def test_unset_environment(session):
"""Unset environment variable."""
assert session.show_environment('BAM') is None
session.set_environment('BAM', 'OK')
assert session.show_environment('BAM') == 'OK'
session.unset_environment('BAM')
assert session.show_environment('BAM') is None
@pytest.mark.parametrize(
"session_name,raises",
[('hey.period', True), ('hey:its a colon', True), ('hey moo', False)],
)
def test_periods_raise_badsessionname(server, session, session_name, raises):
new_name = session_name + 'moo' # used for rename / switch
if raises:
with pytest.raises(exc.BadSessionName):
session.rename_session(new_name)
with pytest.raises(exc.BadSessionName):
server.new_session(session_name)
with pytest.raises(exc.BadSessionName):
server.has_session(session_name)
with pytest.raises(exc.BadSessionName):
server.switch_client(new_name)
with pytest.raises(exc.BadSessionName):
server.attach_session(new_name)
else:
server.new_session(session_name)
server.has_session(session_name)
session.rename_session(new_name)
with pytest.raises(exc.LibTmuxException):
server.switch_client(new_name)
def test_cmd_inserts_sesion_id(session):
current_session_id = session.id
last_arg = 'last-arg'
cmd = session.cmd('not-a-command', last_arg)
assert '-t' in cmd.cmd
assert current_session_id in cmd.cmd
assert cmd.cmd[-1] == last_arg
| [
"tony@git-pull.com"
] | tony@git-pull.com |
8946b5ae8d70fc3d519899293f2227a86275f898 | 1bebfe3d45f11f89014dc56d27306be7d5507c5c | /flavio/_parse_errors.py | fb894885b8a77c3f8e85e754f4bdca39680f082a | [
"MIT"
] | permissive | talismanbrandi/flavio | 6ed628d9f4831c6d55ff56d587eaaca2633de3bb | 9c8f2ce0fa68ea1a4733977557b4a602d590a0ea | refs/heads/master | 2021-01-19T06:38:17.355774 | 2017-12-10T10:55:23 | 2017-12-10T10:55:23 | 87,472,967 | 0 | 0 | null | 2017-04-06T20:40:43 | 2017-04-06T20:40:43 | null | UTF-8 | Python | false | false | 7,141 | py | import re
from flavio.statistics.probability import *
# for strings of the form '< 5.3e-8 @ 95% CL'
_pattern_upperlimit = re.compile(r"^\s*<\s*([-+]?\d+\.?\d*)([eE][-+]?\d+)?\s*@\s*(\d+\.?\d*)\s*\%\s*C[\.\s]*L[\.\s]*$")
# for strings of the form '1.67(3)(5) 1e-3'
_pattern_brackets = re.compile(r"^\s*\(?\s*(-?\d+\.?\d*)\s*((?:\(\s*\d+\.?\d*\s*\)\s*)+)\)?\s*\*?\s*(?:(?:e|E|1e|1E|10\^)\(?([+-]?\d+)\)?)?$")
# for strings of the form '(1.67 +- 0.3 +- 0.5) * 1e-3'
_pattern_plusminus = re.compile(r"^\s*\(?\s*(-?\d+\.?\d*)\s*((?:[+\-±\\pm]+\s*\d+\.?\d*\s*)+)\)?\s*\*?\s*(?:(?:e|E|1e|1E|10\^)\(?([+-]?\d+)\)?)?$")
# for strings of the form '[1, 5] 1e-3'
_pattern_range = re.compile(r"^\s*\[\s*(-?\d+\.?\d*)\s*([eE][-+]?\d+)?\s*\,\s*(-?\d+\.?\d*)\s*([eE][-+]?\d+)?\s*\]\s*\*?\s*(?:(?:e|E|1e|1E|10\^)\(?([+-]?\d+)\)?)?$")
def errors_from_string(constraint_string):
"""Convert a string like '1.67(3)(5)' or '1.67+-0.03+-0.05' to a dictionary
of central values errors."""
m = _pattern_brackets.match(constraint_string)
if m is None:
m = _pattern_plusminus.match(constraint_string)
if m is None:
raise ValueError("Constraint " + constraint_string + " not understood")
# extracting the central value and overall power of 10
if m.group(3) is None:
overall_factor = 1
else:
overall_factor = 10**float(m.group(3))
central_value = m.group(1)
# number_decimal gives the number of digits after the decimal point
if len(central_value.split('.')) == 1:
number_decimal = 0
else:
number_decimal = len(central_value.split('.')[1])
central_value = float(central_value) * overall_factor
# now, splitting the errors
error_string = m.group(2)
pattern_brackets_err = re.compile(r"\(\s*(\d+\.?\d*)\s*\)\s*")
pattern_symmetric_err = re.compile(r"(?:±|\\pm|\+\-)(\s*\d+\.?\d*)")
pattern_asymmetric_err = re.compile(r"\+\s*(\d+\.?\d*)\s*\-\s*(\d+\.?\d*)")
errors = {}
errors['central_value'] = central_value
errors['symmetric_errors'] = []
errors['asymmetric_errors'] = []
if pattern_brackets_err.match(error_string):
for err in re.findall(pattern_brackets_err, error_string):
if not err.isdigit():
# if isdigit() is false, it means that it is a number
# with a decimal point (e.g. '1.5'), so no rescaling is necessary
standard_deviation = float(err)*overall_factor
else:
# if the error is just digits, need to rescale it by the
# appropriate power of 10
standard_deviation = float(err)*10**(-number_decimal)*overall_factor
errors['symmetric_errors'].append(standard_deviation)
elif pattern_symmetric_err.match(error_string) or pattern_asymmetric_err.match(error_string):
for err in re.findall(pattern_symmetric_err, error_string):
errors['symmetric_errors'].append( float(err)*overall_factor )
for err in re.findall(pattern_asymmetric_err, error_string):
right_err = float(err[0])*overall_factor
left_err = float(err[1])*overall_factor
errors['asymmetric_errors'].append((right_err, left_err))
return errors
def limit_from_string(constraint_string):
m = _pattern_upperlimit.match(constraint_string)
if m is None:
raise ValueError("Constraint " + constraint_string + " not understood")
sg, ex, cl_pc = m.groups()
if ex is None:
limit = float(sg)
else:
limit = float(sg + ex)
cl = float(cl_pc)/100.
return limit, cl
def range_from_string(constraint_string):
m = _pattern_range.match(constraint_string)
if m is None:
raise ValueError("Constraint " + constraint_string + " not understood")
lo, ex_lo, hi, ex_hi, ex_ov = m.groups()
if ex_lo is None:
lo = float(lo)
else:
lo = float(lo + ex_lo)
if ex_hi is None:
hi = float(hi)
else:
hi = float(hi + ex_hi)
if hi < lo:
raise ValueError("Uniform constraint must be specified as [a,b] with b>a")
if ex_ov is None:
overall = 1
else:
overall = 10**float(ex_ov)
lo = overall * lo
hi = overall * hi
central_value = (hi + lo)/2.
half_range = (hi - lo)/2.
return central_value, half_range
def errors_from_constraints(probability_distributions):
"""Return a string of the form 4.0±0.1±0.3 for the constraints on
the parameter. Correlations are ignored."""
errors = {}
errors['symmetric_errors'] = []
errors['asymmetric_errors'] = []
for num, pd in probability_distributions:
errors['central_value'] = pd.central_value
if isinstance(pd, DeltaDistribution):
# delta distributions (= no error) can be skipped
continue
elif isinstance(pd, NormalDistribution):
errors['symmetric_errors'].append(pd.standard_deviation)
elif isinstance(pd, AsymmetricNormalDistribution):
errors['asymmetric_errors'].append((pd.right_deviation, pd.left_deviation))
elif isinstance(pd, MultivariateNormalDistribution):
errors['central_value'] = pd.central_value[num]
errors['symmetric_errors'].append(math.sqrt(pd.covariance[num, num]))
return errors
def constraints_from_string(constraint_string):
"""Convert a string like '1.67(3)(5)' or '1.67+-0.03+-0.05' to a list
of ProbabilityDistribution instances."""
# first of all, replace dashes (that can come from copy-and-pasting latex) by minuses
try:
float(constraint_string)
# if the string represents just a number, return a DeltaDistribution
return [DeltaDistribution(float(constraint_string))]
except ValueError:
# first of all, replace dashes (that can come from copy-and-pasting latex) by minuses
constraint_string = constraint_string.replace('−','-')
# try again if the number is a float now
try:
float(constraint_string)
return {'central_value': float(constraint_string)}
except:
pass
if _pattern_upperlimit.match(constraint_string):
limit, cl = limit_from_string(constraint_string)
return [GaussianUpperLimit(limit, cl)]
elif _pattern_range.match(constraint_string):
central_value, half_range = range_from_string(constraint_string)
return [UniformDistribution(central_value, half_range)]
elif _pattern_brackets.match(constraint_string) or _pattern_plusminus.match(constraint_string):
errors = errors_from_string(constraint_string)
if 'symmetric_errors' not in errors and 'asymmetric_errors' not in errors:
return [DeltaDistribution(errors['central_value'])]
pd = []
for err in errors['symmetric_errors']:
pd.append(NormalDistribution(errors['central_value'], err))
for err_right, err_left in errors['asymmetric_errors']:
pd.append(AsymmetricNormalDistribution(errors['central_value'], err_right, err_left))
return pd
else:
raise ValueError("Constraint " + constraint_string + " not understood")
| [
"david.straub@tum.de"
] | david.straub@tum.de |
fbb477b2257116e7ff83fc702ea8d7a4dc02cb65 | 10e4652d4e677b2ce89abd0729424dc92c562e19 | /IPython/utils/pickleutil.py | dd1f733637513cf55c58d288c710bb60c5ff6d3b | [
"BSD-3-Clause"
] | permissive | appasche/ipython | 18fd6619d19c96694747aa12f435c410bb5d8f4e | d538c7ac9dcee2e75ff5e670376ac4d3ad6715ee | refs/heads/master | 2021-01-21T09:06:22.830942 | 2015-04-19T16:26:08 | 2015-04-19T16:26:08 | 34,225,707 | 1 | 0 | null | 2015-04-19T21:45:07 | 2015-04-19T21:45:07 | null | UTF-8 | Python | false | false | 140 | py | from warnings import warn
warn("IPython.utils.pickleutil has moved to ipython_kernel.pickleutil")
from ipython_kernel.pickleutil import *
| [
"benjaminrk@gmail.com"
] | benjaminrk@gmail.com |
90d17be586bf4e63e3a8346168cf2cd7cd9755b1 | 4a1b61cf551db7843050cc7080cec6fd60c4f8cc | /2020/백준문제/백트래킹/14889_스타트와 링크.py | 0552b5ad7fa88ee91f4b003aaaa042dbd8d4f794 | [] | no_license | phoenix9373/Algorithm | 4551692027ca60e714437fd3b0c86462f635d8ff | c66fd70e14bb8357318e8b8f386d2e968f0c4d98 | refs/heads/master | 2023-08-24T10:01:20.798430 | 2021-10-15T07:57:36 | 2021-10-15T07:57:36 | 288,092,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | # N은 짝수, 두 팀으로 나눔.
# 팀 나누는 방법은 조합?
def comb(idx, sidx):
if sidx == N // 2:
global teams
teams.append(sel.copy())
return
for i in range(idx, N):
sel[sidx] = i
comb(i + 1, sidx + 1)
N = int(input())
info = [list(map(int, input().split())) for _ in range(N)]
teams = []
sel = [0] * (N // 2)
comb(0, 0)
M = pow(10, 7)
for i in range(len(teams)):
t1 = teams[i]
t2 = [j for j in range(N) if j not in t1]
s1 = sum([info[x][y] for x in t1 for y in t1])
s2 = sum([info[x][y] for x in t2 for y in t2])
tmp = abs(s1 - s2)
if M > tmp:
M = tmp
print(M) | [
"phoenix9373@naver.com"
] | phoenix9373@naver.com |
e8d991ff30ab7eef92e081619c6febba3569da66 | e7d65f8773a8c736fc9e41e843d7da6da5cc2e0b | /py3plex/algorithms/community_detection/node_ranking.py | af80dd4bd4e6a79a2245f1f1a4d8fd843735e838 | [
"BSD-3-Clause"
] | permissive | hanbei969/Py3plex | 768e86b16ca00044fcb4188e01edf32c332c8a2a | 1ef3e0e6d468d24bd6e6aec3bd68f20b9d9686bb | refs/heads/master | 2021-01-03T18:19:24.049457 | 2020-02-12T16:51:14 | 2020-02-12T16:51:14 | 240,188,307 | 1 | 0 | BSD-3-Clause | 2020-02-13T05:57:16 | 2020-02-13T05:57:16 | null | UTF-8 | Python | false | false | 5,165 | py | ## node ranking algorithms
import numpy as np
import networkx as nx
import scipy.sparse as sp
#from networkx.algorithms.community.community_utils import is_partition
from itertools import product
# def stochastic_normalization(matrix):
# matrix = matrix.tolil()
# try:
# matrix.setdiag(0)
# except TypeError:
# matrix.setdiag(np.zeros(matrix.shape[0]))
# matrix = matrix.tocsr()
# d = matrix.sum(axis=1).getA1()
# nzs = np.where(d > 0)
# d[nzs] = 1 / d[nzs]
# matrix = (sp.diags(d, 0).tocsc().dot(matrix)).transpose()
# return matrix
def stochastic_normalization(matrix):
matrix = matrix.tolil()
try:
matrix.setdiag(0)
except TypeError:
matrix.setdiag(np.zeros(matrix.shape[0]))
matrix = matrix.tocsr()
d = matrix.sum(axis=1).getA1()
nzs = np.where(d > 0)
k = 1/d[nzs]
matrix = (sp.diags(k, 0).tocsc().dot(matrix)).transpose()
return matrix
def stochastic_normalization_hin(matrix):
matrix = matrix.tolil()
try:
matrix.setdiag(0)
except TypeError:
matrix.setdiag(np.zeros(matrix.shape[0]))
matrix = matrix.tocsr()
d = matrix.sum(axis=1).getA1()
nzs = np.where(d > 0)
d[nzs] = 1 / d[nzs]
matrix = (sp.diags(d, 0).tocsc().dot(matrix)).transpose()
return matrix
def modularity(G, communities, weight='weight'):
multigraph = G.is_multigraph()
directed = G.is_directed()
m = G.size(weight=weight)
if directed:
out_degree = dict(G.out_degree(weight=weight))
in_degree = dict(G.in_degree(weight=weight))
norm = 1 / m
else:
out_degree = dict(G.degree(weight=weight))
in_degree = out_degree
norm = 1 / (2 * m)
def val(u, v):
try:
if multigraph:
w = sum(d.get(weight, 1) for k, d in G[u][v].items())
else:
w = G[u][v].get(weight, 1)
except KeyError:
w = 0
# Double count self-loops if the graph is undirected.
if u == v and not directed:
w *= 2
return w - in_degree[u] * out_degree[v] * norm
Q = np.sum(val(u, v) for c in communities for u, v in product(c, repeat=2))
return Q * norm
def page_rank_kernel(index_row):
## call as results = p.map(pr_kernel, batch)
pr = sparse_page_rank(G, [index_row],
epsilon=1e-6,
max_steps=100000,
damping=damping_hyper,
spread_step=spread_step_hyper,
spread_percent=spread_percent_hyper,
try_shrink=True)
norm = np.linalg.norm(pr, 2)
if norm > 0:
pr = pr / np.linalg.norm(pr, 2)
return (index_row,pr)
else:
return (index_row,np.zeros(graph.shape[1]))
def sparse_page_rank(matrix, start_nodes,
epsilon=1e-6,
max_steps=100000,
damping=0.5,
spread_step=10,
spread_percent=0.3,
try_shrink=False):
assert(len(start_nodes)) > 0
# this method assumes that column sums are all equal to 1 (stochastic normalizaition!)
size = matrix.shape[0]
if start_nodes is None:
start_nodes = range(size)
nz = size
else:
nz = len(start_nodes)
start_vec = np.zeros((size, 1))
start_vec[start_nodes] = 1
start_rank = start_vec / len(start_nodes)
rank_vec = start_vec / len(start_nodes)
# calculate the max spread:
shrink = False
which = np.zeros(0)
if try_shrink:
v = start_vec / len(start_nodes)
steps = 0
while nz < size * spread_percent and steps < spread_step:
steps += 1
v += matrix.dot(v)
nz_new = np.count_nonzero(v)
if nz_new == nz:
shrink = True
break
nz = nz_new
rr = np.arange(matrix.shape[0])
which = (v[rr] > 0).reshape(size)
if shrink:
start_rank = start_rank[which]
rank_vec = rank_vec[which]
matrix = matrix[:, which][which, :]
diff = np.Inf
steps = 0
while diff > epsilon and steps < max_steps: # not converged yet
steps += 1
new_rank = matrix.dot(rank_vec)
rank_sum = np.sum(new_rank)
if rank_sum < 0.999999999:
new_rank += start_rank * (1 - rank_sum)
new_rank = damping * new_rank + (1 - damping) * start_rank
new_diff = np.linalg.norm(rank_vec - new_rank, 1)
diff = new_diff
rank_vec = new_rank
if try_shrink and shrink:
ret = np.zeros(size)
rank_vec = rank_vec.T[0] ## this works for both python versions
ret[which] = rank_vec
ret[start_nodes] = 0
return ret.flatten()
else:
rank_vec[start_nodes] = 0
return rank_vec.flatten()
def hubs_and_authorities(graph):
return nx.hits_scipy(graph)
def hub_matrix(graph):
return nx.hub_matrix(graph)
def authority_matrix(graph):
return nx.authority_matrix(graph)
| [
"skrljblaz@gmail.com"
] | skrljblaz@gmail.com |
7dd3f312d9609c7548451d48fd24fc7858972eb4 | 629034462af2e1fccaf7a47e4b9a5cbd789c90e7 | /algorithm/graph-embedding/configs/base.py | e8868d62ac4da3f10266e620bbaf663f22f5b386 | [] | no_license | zhouxh19/Grep-project | eb17c23650aada7a87aef550492ca4bcd8ef0657 | 40ee7d8dcc1e46db82582bf92b86f9a909db727c | refs/heads/master | 2022-12-24T13:00:40.783457 | 2020-09-13T14:47:15 | 2020-09-13T14:47:15 | 284,244,936 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,055 | py | from pathlib import Path
base_dir = Path().resolve()
config = {}
feature_num = 2
max_iteration = 8
max_test_iteration = 2
k = 3
db = 'tpch'
workload_num = {'tpch': 20187, 'job': 3224, 'xuetangx': 22000}
table_job = {'aka_name': [], 'aka_title': [], 'cast_info': [], 'char_name': [], 'comp_cast_type': [], 'company_name': [], 'company_type': [], 'complete_cast': [], 'info_type': [], 'keyword': [], 'kind_type': [], 'link_type': [], 'movie_companies': [], 'movie_info': [], 'movie_info_idx': [], 'movie_keyword': [], 'movie_link': [], 'name': [], 'person_info': [], 'role_type': [], 'title': []}
v_feature_size = 6 # [tbl_id, tbl_size, distinct values, row len, select, aggregate]
def get_file(file_path, pattern="*"):
"""
函数 获取给定目录下的所有文件的绝对路径
参数 file_path: 文件目录
参数 pattern:默认返回所有文件,也可以自定义返回文件类型,例如:pattern="*.py"
返回值 abspath:文件路径列表
"""
all_file = []
files = Path(file_path).rglob(pattern)
for file in files:
if Path.is_file(file):
all_file.append(file)
return all_file
'''
BASE_DIR = Path('pybert')
config = {
'raw_data_path': BASE_DIR / 'dataset/train.csv',
'test_path': BASE_DIR / 'dataset/test_stage1.csv',
'data_dir': BASE_DIR / 'dataset',
'log_dir': BASE_DIR / 'output/log',
'writer_dir': BASE_DIR / "output/TSboard",
'figure_dir': BASE_DIR / "output/figure",
'checkpoint_dir': BASE_DIR / "output/checkpoints",
'cache_dir': BASE_DIR / 'model/',
'result': BASE_DIR / "output/result",
'bert_vocab_path': BASE_DIR / 'pretrain/bert/base-chinese/vocab.txt',
'bert_config_file': BASE_DIR / 'pretrain/bert/base-chinese/config.json',
'bert_model_dir': BASE_DIR / 'pretrain/bert/base-chinese',
'xlnet_vocab_path': BASE_DIR / 'pretrain/xlnet/base-cased/spiece.model',
'xlnet_config_file': BASE_DIR / 'pretrain/xlnet/base-cased/config.json',
'xlnet_model_dir': BASE_DIR / 'pretrain/xlnet/base-cased'
}
''' | [
"zhouxuan19@mails.tsinghua.edu.cn"
] | zhouxuan19@mails.tsinghua.edu.cn |
b59bb380bb991cabe3cefb26f89a8fa5c6f3da44 | 1113e8eec4ccbbcd00c6a9b5466c5239b6f0eb03 | /ops/core/rdb_icpocm.py | 972f5c82126e411356f275269175e3db0db53953 | [] | no_license | yizhong120110/CPOS | a05858c84e04ce4aa48b3bfb43ee49264ffc5270 | 68ddf3df6d2cd731e6634b09d27aff4c22debd8e | refs/heads/master | 2021-09-01T17:59:53.802095 | 2017-12-28T05:43:06 | 2017-12-28T05:43:06 | 106,247,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,260 | py | # -*- coding: utf-8 -*-
"""
通讯进程参数信息查询
"""
import ops.core.rdb
def get_txxx( txjclx ):
"""
通过进程信息表的jclx,获得通讯进程相关的参数
返回值:
{'txwj': 'ops.ocm.short_tcp.jnfs_app', 'bm': 'jnfs_app'}
"""
txjdxx_dic = {}
with connection() as db:
sql = """
select b.fwfx ,b.txlx ,b.txwjmc ,b.bm
from gl_jcxxpz a ,gl_txgl b
where a.jcmc = b.bm
and a.zt = '1' and a.jclx = %(jclx)s
"""
# 使用sql和dict分离的方式,是为了防止SQL注入
d = {'jclx':txjclx}
rs = db.execute_sql( sql ,d )
obj = rs[0] if rs else None
if obj:
wjlj = ['ops']
if str(obj['fwfx']) == "1":
wjlj.append("ocm")
else:
wjlj.append("icp")
wjlj.extend([obj['txlx'] ,obj['txwjmc'].split('.')[0]])
# 通讯文件的路径
txjdxx_dic["txwj"] = ".".join(wjlj)
# 通讯的编码,用于获得通讯的参数
txjdxx_dic.update( {"bm":obj['bm']} )
txjdxx_dic.update( {"txwjmc":obj['txwjmc'].split('.')[0]} )
return txjdxx_dic
def get_txcs( txjclx ,txbm = None ):
"""
txbm 是通讯管理中的唯一标识
返回值: 使用的界面配置的k-v对
{'IP': '127.0.0.1'}
"""
txcs_dic = {}
# 如果传了txbm,就不用查询了,没有就查询一次
if txbm == None:
txxx = get_txxx( txjclx )
if txxx:
txbm = txxx["bm"]
else:
return txcs_dic
with connection() as db:
# 使用sql和dict分离的方式,是为了防止SQL注入
sql = """
select csdm ,value as csz
from gl_csdy a ,gl_txgl b
where a.lx = '4' and a.ssid = b.id
and a.zt = '1'
and b.bm = %(txbm)s
"""
rs = db.execute_sql( sql ,{"txbm":txbm} )
for obj in rs:
txcs_dic.update({obj['csdm']:obj['csz']})
return txcs_dic
if __name__ == '__main__':
print(get_txcs(get_txxx( 'jnfs_app' )["bm"]) )
| [
"yizhong120110@gmail.com"
] | yizhong120110@gmail.com |
e569dbc4052364132ab074eaf6c3b2e70407822b | d644b6cabb4fa88cf900c59799a2897f5a0702d8 | /tests/base_tests/multipolygon_tests/strategies.py | bd202eb0721c2b567aa55e8a3372cf4acb3b6804 | [
"MIT"
] | permissive | lycantropos/gon | c3f89a754c60424c8e2609e441d7be85af985455 | 177bd0de37255462c60adcbfcdf76bfdc343a9c1 | refs/heads/master | 2023-07-06T01:11:57.028646 | 2023-06-26T20:47:14 | 2023-06-27T00:30:06 | 194,597,548 | 15 | 1 | MIT | 2023-06-27T00:30:07 | 2019-07-01T04:06:06 | Python | UTF-8 | Python | false | false | 1,010 | py | from tests.strategies import (coordinates_strategies,
coordinates_to_multipolygons,
coordinates_to_points,
coordinates_to_polygons,
invalid_multipolygons)
from tests.utils import (cleave_in_tuples,
to_pairs,
to_triplets)
multipolygons = coordinates_strategies.flatmap(coordinates_to_multipolygons)
polygons = coordinates_strategies.flatmap(coordinates_to_polygons)
invalid_multipolygons = invalid_multipolygons
multipolygons_strategies = (coordinates_strategies
.map(coordinates_to_multipolygons))
multipolygons_pairs = multipolygons_strategies.flatmap(to_pairs)
multipolygons_triplets = multipolygons_strategies.flatmap(to_triplets)
multipolygons_with_points = (
(coordinates_strategies
.flatmap(cleave_in_tuples(coordinates_to_multipolygons,
coordinates_to_points)))
)
| [
"azatibrakov@gmail.com"
] | azatibrakov@gmail.com |
df941af2e5b59c83931a8144e389f0875fe8898b | 804a81e52d5fc7fd1078268a3a2976ca80a91880 | /nengo/test/test_new_api.py | 14494ef583ac2047ea4e4f779770ce9dca7474ec | [
"MIT"
] | permissive | jvitku/nengo | 692bbc56717acf476cfb384b3cf0affa71135c40 | 484b9244a32ff1011a1292b24225752db75fc3b2 | refs/heads/master | 2020-12-25T05:42:34.936881 | 2013-07-10T16:50:38 | 2013-07-10T19:48:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,948 | py | from pprint import pprint
from unittest import TestCase
from matplotlib import pyplot as plt
import nose
import numpy as np
from nengo.nonlinear import LIF
from nengo.model import Model
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
class TestNewAPI(TestCase):
show = False
def test_direct_mode_simple(self):
"""
"""
model = Model('Runtime Test', seed=123, backend='numpy')
model.make_node('in', output=np.sin)
model.probe('in')
res = model.run(0.01)
data = res['in']
print data.dtype
print data
assert np.allclose(data.flatten(), np.sin(np.arange(0, 0.0095, .001)))
def test_basic_1(self, N=1000):
"""
Create a network with sin(t) being represented by
a population of spiking neurons. Assert that the
decoded value from the population is close to the
true value (which is input to the population).
Expected duration of test: about .7 seconds
"""
model = Model('Runtime Test', seed=123, backend='numpy')
model.make_node('in', output=np.sin)
model.make_ensemble('A', LIF(N), 1)
model.connect('in', 'A')
model.probe('A', sample_every=0.01, pstc=0.001) # 'A'
model.probe('A', sample_every=0.01, pstc=0.01) # 'A_1'
model.probe('A', sample_every=0.01, pstc=0.1) # 'A_2'
model.probe('in', sample_every=0.01, pstc=0.01)
pprint(model.o)
res = model.run(1.0)
target = np.sin(np.arange(0, 1000, 10) / 1000.)
target.shape = (100, 1)
for A, label in (('A', 'fast'), ('A_1', 'med'), ('A_2', 'slow')):
data = np.asarray(res[A]).flatten()
plt.plot(data, label=label)
in_data = np.asarray(res['in']).flatten()
plt.plot(in_data, label='in')
plt.legend(loc='upper left')
#print in_probe.get_data()
#print net.sim.sim_step
if self.show:
plt.show()
# target is off-by-one at the sampling frequency of dt=0.001
print rmse(target, res['in'])
assert rmse(target, res['in']) < .001
print rmse(target, res['A'])
assert rmse(target, res['A']) < .3
print rmse(target, res['A_1'])
assert rmse(target, res['A_1']) < .03
print rmse(target, res['A_2'])
assert rmse(target, res['A_2']) < 0.1
def test_basic_5K(self):
return self.test_basic_1(5000)
def test_matrix_mul(self):
# Adjust these values to change the matrix dimensions
# Matrix A is D1xD2
# Matrix B is D2xD3
# result is D1xD3
D1 = 1
D2 = 2
D3 = 3
seed = 123
N = 50
model = Model('Matrix Multiplication', seed=seed, backend='numpy')
# values should stay within the range (-radius,radius)
radius = 1
# make 2 matrices to store the input
model.make_ensemble('A', LIF(N), D1*D2, radius=radius)
model.make_ensemble('B', LIF(N), D2*D3, radius=radius)
# connect inputs to them so we can set their value
model.make_node('input A', [0] * D1 * D2)
model.make_node('input B', [0] * D2 * D3)
model.connect('input A', 'A')
model.connect('input B', 'B')
# the C matrix holds the intermediate product calculations
# need to compute D1*D2*D3 products to multiply 2 matrices together
model.make_ensemble('C', LIF(4 * N), D1 * D2 * D3, # dimensions=2,
radius=1.5*radius)
# encoders=[[1,1], [1,-1], [-1,1], [-1,-1]])
# determine the transformation matrices to get the correct pairwise
# products computed. This looks a bit like black magic but if
# you manually try multiplying two matrices together, you can see
# the underlying pattern. Basically, we need to build up D1*D2*D3
# pairs of numbers in C to compute the product of. If i,j,k are the
# indexes into the D1*D2*D3 products, we want to compute the product
# of element (i,j) in A with the element (j,k) in B. The index in
# A of (i,j) is j+i*D2 and the index in B of (j,k) is k+j*D3.
# The index in C is j+k*D2+i*D2*D3, multiplied by 2 since there are
# two values per ensemble. We add 1 to the B index so it goes into
# the second value in the ensemble.
transformA = [[0] * (D1 * D2) for i in range(D1 * D2 * D3 * 2)]
transformB = [[0] * (D2 * D3) for i in range(D1 * D2 * D3 * 2)]
for i in range(D1):
for j in range(D2):
for k in range(D3):
ix = (j + k * D2 + i * D2 * D3) * 2
transformA[ix][j + i * D2] = 1
transformB[ix + 1][k + j * D3] = 1
model.connect('A', 'C', transform=transformA)
model.connect('B', 'C', transform=transformB)
# now compute the products and do the appropriate summing
model.make_ensemble('D', LIF(N), D1 * D3, radius=radius)
def product(x):
return x[0] * x[1]
# the mapping for this transformation is much easier, since we want to
# combine D2 pairs of elements (we sum D2 products together)
model.connect('C', 'D', index_post=[i / D2 for i in range(D1*D2*D3)],
func=product)
model.get('input A').origin['X'].decoded_output.set_value(
np.asarray([.5, -.5]).astype('float32'))
model.get('input B').origin['X'].decoded_output.set_value(
np.asarray([0, 1, -1, 0]).astype('float32'))
pprint(model.o)
Dprobe = model.probe('D')
model.run(1)
net_data = Dprobe.get_data()
print net_data.shape
plt.plot(net_data[:, 0])
plt.plot(net_data[:, 1])
if self.show:
plt.show()
nose.SkipTest('test correctness')
| [
"tbekolay@gmail.com"
] | tbekolay@gmail.com |
eae545f5af90d8c3c562142d45cf233bbe774293 | 2f4ae73c68637306c878a5234fc3b81950de8854 | /tests/compiler/test_pre_parser.py | 640bc673a35ff9c272b30198c186ce9e2bdc561d | [
"MIT"
] | permissive | ltfschoen/vyper | b121cf1f320f852b7997b0d54eaff5e68163e66e | f68af5730516011007e2546ff825b881e94f030f | refs/heads/master | 2020-03-11T02:42:28.688320 | 2018-04-17T12:42:59 | 2018-04-17T12:42:59 | 129,726,567 | 0 | 0 | MIT | 2018-04-16T10:34:58 | 2018-04-16T10:34:57 | null | UTF-8 | Python | false | false | 1,317 | py | from vyper.exceptions import StructureException
from pytest import raises
def test_semicolon_prohibited(get_contract):
code = """@public
def test() -> int128:
a: int128 = 1; b: int128 = 2
return a + b
"""
with raises(StructureException):
get_contract(code)
def test_valid_semicolons(get_contract):
code = """
@public
def test() -> int128:
a: int128 = 1
b: int128 = 2
s: bytes[300] = "this should not be a problem; because it is in a string"
s = \"\"\"this should not be a problem; because it's in a string\"\"\"
s = 'this should not be a problem;;; because it\\\'s in a string'
s = '''this should not ; \'cause it\'s in a string'''
s = "this should not be \\\"; because it's in a ;\\\"string;\\\";"
return a + b
"""
c = get_contract(code)
assert c.test() == 3
def test_external_contract_definition_alias(get_contract):
contract_1 = """
@public
def bar() -> int128:
return 1
"""
contract_2 = """
contract Bar():
def bar() -> int128: pass
bar_contract: static(Bar)
@public
def foo(contract_address: contract(Bar)) -> int128:
self.bar_contract = contract_address
return self.bar_contract.bar()
"""
c1 = get_contract(contract_1)
c2 = get_contract(contract_2)
assert c2.foo(c1.address) == 1
| [
"jacques@dilectum.co.za"
] | jacques@dilectum.co.za |
1ca1632321aee4063fff9a55b986fe6e9ff62a8b | 6bdb32ddbd72c4337dab12002ff05d6966538448 | /gridpack_folder/mc_request/LHEProducer/Spin-2/BulkGraviton_hh_hVVhbb_inclusive/BulkGraviton_hh_hVVhbb_inclusive_narrow_M450_13TeV-madgraph_cff.py | 03b465498d74fe0f6d139a62106502f6a31c8127 | [] | no_license | cyrilbecot/DibosonBSMSignal_13TeV | 71db480de274c893ba41453025d01bfafa19e340 | d8e685c40b16cde68d25fef9af257c90bee635ba | refs/heads/master | 2021-01-11T10:17:05.447035 | 2016-08-17T13:32:12 | 2016-08-17T13:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | import FWCore.ParameterSet.Config as cms
# link to cards:
# https://github.com/cms-sw/genproductions/tree/31b6e7510443b74e0f9aac870e4eb9ae30c19d65/bin/MadGraph5_aMCatNLO/cards/production/13TeV/exo_diboson/Spin-2/BulkGraviton_hh_hVVhbb_inclusive/BulkGraviton_hh_hVVhbb_inclusive_narrow_M450
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.2.2/exo_diboson/Spin-2/BulkGraviton_hh_hVVhbb_inclusive/narrow/v1/BulkGraviton_hh_hVVhbb_inclusive_narrow_M450_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
| [
"syu@cern.ch"
] | syu@cern.ch |
645847f61e91b01dc0dc99a5a8f3216f229bb86c | ad23b164febd12d5c6d97cfbcd91cf70e2914ab3 | /TestCaseFunction/main/run_all_test_createActivity.py | fd0b6a13a6bce27051cfae09df9786c9dc4ddad3 | [] | no_license | wawj901124/webtestdata | 9eedf9a01dec2c157725299bda9a42e8d357ef0b | 54f6412566fce07ece912760c5caea73ede819cb | refs/heads/master | 2022-12-09T14:18:38.125191 | 2021-04-25T07:54:07 | 2021-04-25T07:54:07 | 175,773,318 | 1 | 1 | null | 2022-12-08T02:39:15 | 2019-03-15T07:49:16 | Python | UTF-8 | Python | false | false | 4,042 | py | import unittest
# 在jenkins运行时经常提示找不到包,所以就需要手动添加PYTHONPATH,通过追加sys.path列表来实现
import os
import sys
rootpath = str(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
syspath = sys.path
sys.path = []
sys.path.append(rootpath) # 将工程根目录加入到python搜索路径中
sys.path.extend([rootpath + i for i in os.listdir(rootpath) if i[0] != "."]) # 将工程目录下的一级目录添加到python搜索路径中
sys.path.extend(syspath)
# 追加完成
from TestCaseFunction.htmltest import HTMLTestRunner_jietuxiugai as HTMLTestRunner
from test import *
from TestCaseFunction.test.alltest_list_create_activity import caselist #调用数组文件
from TestCaseFunction.util.gettimestr import GetTimeStr
from TestCaseFunction.util.send_attach_email import SendEmail
from TestCaseFunction.log.my_log import UserLog
class RunAllTest(unittest.TestCase):
def runAllTest(self):
#将用例组件成数组
alltestnames = caselist()
suite=unittest.TestSuite()
for testpy in alltestnames:
try:
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(testpy)) #默认加载所有用例
except Exception:
print('ERROR: Skipping tests from "%s".' % testpy)
try:
__import__(test)
except ImportError:
print('Could not import the "%s" test module.'% testpy)
else:
print('Could not load the "%s" test suite.' % testpy)
from traceback import print_exc
print_exc()
self.outPutMyLog('Running the tests...')
# print('Running the tests...')
gettime = GetTimeStr()
filename = '%s/report/%s_report.html' % (str(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),gettime.getTimeStr())
fp = open(filename, 'wb')
self.outPutMyLog('The report path:%s' % filename)
# 定义测试报告
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title=u'python 自动化测试_测试报告',
description=u'用例执行情况:',
verbosity=2) #verbosity=2,输出测试用例中打印的信息
runner.run(suite)
fp.close()
# 发送report至邮箱
send_e = SendEmail()
send_e.send_main([1], [2], filename)
def outPutMyLog(self, context):
mylog = UserLog(context)
mylog.runMyLog()
def run(self):
self.outPutMyLog('---------------------------')
# print('---------------------------')
stdout_backup = sys.stdout
gettime = GetTimeStr()
timestr = gettime.getTimeStr()
# define the log file that receives your log info
logpath = "%s/log/%s_message.txt" % (str(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),timestr)
log_file = open(logpath, "w", encoding="utf-8")
self.outPutMyLog('Now all print info will be written to message.log')
# print("Now all print info will be written to message.log")
# redirect print output to log file
sys.stdout = log_file
self.outPutMyLog('----------开始打印日志-----------------\n')
# print('----------开始打印日志-----------------\n')
# any command line that you will execute
self.runAllTest()
self.outPutMyLog('\n----------日志打印结束-----------------')
# print('\n----------日志打印结束-----------------')
log_file.close()
# restore the output to initial pattern
sys.stdout = stdout_backup
self.outPutMyLog('Now this will be presented on screen')
# print("Now this will be presented on screen")
# 发送log至邮箱
send_e = SendEmail()
send_e.send_main([1], [2], logpath)
if __name__ == '__main__':
runat = RunAllTest()
# runat.run()
runat.runAllTest()
| [
"410287958@qq.com"
] | 410287958@qq.com |
3375c9d7e789e362ffbc2a148ed3e347e6f8f559 | 62718778da7e683be16ede27bdc2aaf1695be5ec | /routing_classifier/predict_bert.py | b730128378a6e307bd4465527d604911224486c1 | [] | no_license | GihanMora/QA_LEAP | 25a65685df41360dc8385391434b5ba25e115f1b | 67b2bbdde471202ff6e4b69b831b8321d733fd89 | refs/heads/master | 2023-09-04T18:23:39.720062 | 2021-11-11T11:52:49 | 2021-11-11T11:52:49 | 426,982,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | import ast
import numpy as np
import pandas as pd
import torch
from sklearn.metrics.pairwise import cosine_similarity
from transformers import AutoTokenizer, AutoModel
from routing_classifier.building_embedding_space_bert import get_mean_pooling_emb
model_path = 'bert-base-uncased'
vocab_path = 'bert-base-uncased'
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModel.from_pretrained(model_path)
def predict_class_bert(sentence,embedding_space):
# print(sentence)
# print(embedding_space)
sentence_emb = get_mean_pooling_emb([sentence],tokenizer,model)
# print(sentence_emb)
tuples = []
for i,row in embedding_space.iterrows():
# print(row['classes'])
dis = cosine_similarity(ast.literal_eval(row['embeddings']), sentence_emb)
dis = np.round(dis,3)
tuples.append([row['classes'], dis])
s_tup = sorted(tuples, key=lambda x: x[1]) # sort tuples based on the cosine distance
print(sentence,s_tup)
def predict_class_bert_tokenwise(sentence,embedding_space):
tokens = sentence.split(' ')
for token in tokens:
predict_class_bert(token,embedding_space)
embedding_space = pd.read_csv(r"E:\Projects\LEAP_Gihan\QA_LEAP\routing_classifier\embedding_spaces\embedding_space_bert.csv")
predict_class_bert('what is the energy consumption of library?',embedding_space)
predict_class_bert_tokenwise('what is the energy consumption of library?',embedding_space) | [
"gihangamage.15@cse.mrt.ac.lk"
] | gihangamage.15@cse.mrt.ac.lk |
524fa9c06c6a7e63bf445458738e0daf7b313979 | ff871c8dc30b34070cc3e0ea6a31e658158b7c63 | /PaddleVision/ImageClassification/models/mobilenet.py | 4a1154e1a5bd03a241effb8e4ef05bc4d8636929 | [
"Apache-2.0"
] | permissive | SunAhong1993/PaddleSolution | 3da9255849b520a6fb7d7b1eda5a2da48f9127e4 | 46ee3812c66c0dc436c96be8330b7c7d931604b2 | refs/heads/master | 2020-07-05T05:06:08.133802 | 2019-08-20T12:19:24 | 2019-08-20T12:19:24 | 202,531,666 | 2 | 0 | Apache-2.0 | 2019-08-20T06:39:21 | 2019-08-15T11:48:03 | Jupyter Notebook | UTF-8 | Python | false | false | 6,201 | py | #copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
__all__ = ['MobileNet']
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": 256,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class MobileNet():
def __init__(self):
self.params = train_parameters
def net(self, input, class_dim=1000, scale=1.0):
# conv1: 112x112
input = self.conv_bn_layer(
input,
filter_size=3,
channels=3,
num_filters=int(32 * scale),
stride=2,
padding=1,
name="conv1")
# 56x56
input = self.depthwise_separable(
input,
num_filters1=32,
num_filters2=64,
num_groups=32,
stride=1,
scale=scale,
name="conv2_1")
input = self.depthwise_separable(
input,
num_filters1=64,
num_filters2=128,
num_groups=64,
stride=2,
scale=scale,
name="conv2_2")
# 28x28
input = self.depthwise_separable(
input,
num_filters1=128,
num_filters2=128,
num_groups=128,
stride=1,
scale=scale,
name="conv3_1")
input = self.depthwise_separable(
input,
num_filters1=128,
num_filters2=256,
num_groups=128,
stride=2,
scale=scale,
name="conv3_2")
# 14x14
input = self.depthwise_separable(
input,
num_filters1=256,
num_filters2=256,
num_groups=256,
stride=1,
scale=scale,
name="conv4_1")
input = self.depthwise_separable(
input,
num_filters1=256,
num_filters2=512,
num_groups=256,
stride=2,
scale=scale,
name="conv4_2")
# 14x14
for i in range(5):
input = self.depthwise_separable(
input,
num_filters1=512,
num_filters2=512,
num_groups=512,
stride=1,
scale=scale,
name="conv5" + "_" + str(i + 1))
# 7x7
input = self.depthwise_separable(
input,
num_filters1=512,
num_filters2=1024,
num_groups=512,
stride=2,
scale=scale,
name="conv5_6")
input = self.depthwise_separable(
input,
num_filters1=1024,
num_filters2=1024,
num_groups=1024,
stride=1,
scale=scale,
name="conv6")
input = fluid.layers.pool2d(
input=input,
pool_size=0,
pool_stride=1,
pool_type='avg',
global_pooling=True)
output = fluid.layers.fc(input=input,
size=class_dim,
param_attr=ParamAttr(
initializer=MSRA(), name="fc7_weights"),
bias_attr=ParamAttr(name="fc7_offset"))
return output
def conv_bn_layer(self,
input,
filter_size,
num_filters,
stride,
padding,
channels=None,
num_groups=1,
act='relu',
use_cudnn=True,
name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=ParamAttr(
initializer=MSRA(), name=name + "_weights"),
bias_attr=False)
bn_name = name + "_bn"
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=ParamAttr(name=bn_name + "_scale"),
bias_attr=ParamAttr(name=bn_name + "_offset"),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def depthwise_separable(self,
input,
num_filters1,
num_filters2,
num_groups,
stride,
scale,
name=None):
depthwise_conv = self.conv_bn_layer(
input=input,
filter_size=3,
num_filters=int(num_filters1 * scale),
stride=stride,
padding=1,
num_groups=int(num_groups * scale),
use_cudnn=False,
name=name + "_dw")
pointwise_conv = self.conv_bn_layer(
input=depthwise_conv,
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
padding=0,
name=name + "_sep")
return pointwise_conv
| [
"jiangjiajun@baidu.com"
] | jiangjiajun@baidu.com |
c3ed3b484ab64c7d5b85380754f2f47d6a7e4939 | df716b2868b289a7e264f8d2b0ded52fff38d7fc | /tests/formatters/symantec.py | f1b0886c63474d244c14ce69700bf5c780427667 | [
"Apache-2.0"
] | permissive | ir4n6/plaso | 7dd3cebb92de53cc4866ae650d41c255027cf80a | 010f9cbdfc82e21ed6658657fd09a7b44115c464 | refs/heads/master | 2021-04-25T05:50:45.963652 | 2018-03-08T15:11:58 | 2018-03-08T15:11:58 | 122,255,666 | 0 | 0 | Apache-2.0 | 2018-02-20T21:00:50 | 2018-02-20T21:00:50 | null | UTF-8 | Python | false | false | 1,167 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Symantec AV log file event formatter."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import symantec
from tests.formatters import test_lib
class SymantecAVFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Symantec AV log file event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = symantec.SymantecAVFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = symantec.SymantecAVFormatter()
expected_attribute_names = [
'event_map',
'category_map',
'virus',
'file',
'action0_map',
'action1_map',
'action2_map',
'description',
'scanid',
'event_data',
'remote_machine',
'remote_machine_ip']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
if __name__ == '__main__':
unittest.main()
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
013f6557d21e5cb87dcc3029821b093ccc528416 | dc6750f77b60b188c5161f09c831622de76f84d4 | /andros/euterpe-master/euterpe/model_asr/seq2seq/v2/encrnn_decrnn_att_asr.py | 6e1e3ff0bc7a0609d7c141b5c63e04a59382f627 | [] | no_license | gudwns1215/latest | 614edb900167178845d99a0dfdfc732b625e26f5 | 0d0b96aaaecb05039da5b6faf81c2f3e78d8087c | refs/heads/master | 2020-06-03T22:36:13.664965 | 2018-10-08T03:03:50 | 2018-10-08T03:03:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,554 | py |
import sys
import time
import re
import numpy as np
import json
# pytorch #
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence as pack, pad_packed_sequence as unpack
# torchev #
from torchev.generator import generator_rnn, generator_attention, generator_act_fn, generator_act_module
from torchev.custom import decoder
from torchev.utils.helper import torchauto
from torchev.utils.mask_util import generate_seq_mask
from torchev.nn.modules import LayerNorm
# utilbox #
from utilbox.config_util import ConfigParser
class EncRNNDecRNNAtt(nn.Module) :
def __init__(self, enc_in_size, dec_in_size, dec_out_size,
enc_fnn_sizes=[512], enc_fnn_act='LeakyReLU', enc_fnn_do=0.25,
enc_rnn_sizes=[256, 256, 256], enc_rnn_cfgs={"type":"lstm", "bi":True}, enc_rnn_do=0.25,
downsampling=[False, True, True],
dec_emb_size=256, dec_emb_do=0.25, dec_emb_tied_weight=True,
# tying weight from char/word embedding with softmax layer
dec_rnn_sizes=[512, 512], dec_rnn_cfgs={"type":"lstm"}, dec_rnn_do=0.25,
dec_cfg={"type":"standard_decoder"},
att_cfg={"type":"mlp"},
use_layernorm=False,
) :
super().__init__()
self.enc_in_size = enc_in_size
self.dec_in_size = dec_in_size
self.dec_out_size = dec_out_size
self.enc_fnn_sizes = enc_fnn_sizes
self.enc_fnn_act = enc_fnn_act
self.enc_fnn_do = ConfigParser.list_parser(enc_fnn_do, len(enc_fnn_sizes))
self.enc_rnn_sizes = enc_rnn_sizes
self.enc_rnn_cfgs = enc_rnn_cfgs
self.enc_rnn_do = ConfigParser.list_parser(enc_rnn_do, len(enc_rnn_sizes))
self.downsampling = ConfigParser.list_parser(downsampling, len(enc_rnn_sizes))
self.dec_emb_size = dec_emb_size
self.dec_emb_do = dec_emb_do
self.dec_emb_tied_weight = dec_emb_tied_weight
self.dec_rnn_sizes = dec_rnn_sizes
self.dec_rnn_cfgs = ConfigParser.list_parser(dec_rnn_cfgs, len(dec_rnn_sizes))
self.dec_rnn_do = ConfigParser.list_parser(dec_rnn_do, len(dec_rnn_sizes))
self.dec_cfg = dec_cfg
self.att_cfg = att_cfg
self.use_layernorm = use_layernorm
if self.use_layernorm == True :
raise ValueError("LayerNorm is not implemented yet")
# modules #
# init encoder #
prev_size = enc_in_size
_tmp = []
for ii in range(len(enc_fnn_sizes)) :
_tmp.append(nn.Linear(prev_size, enc_fnn_sizes[ii]))
if use_layernorm :
_tmp.append(LayerNorm(enc_fnn_sizes[ii]))
_tmp.append(generator_act_module(enc_fnn_act))
_tmp.append(nn.Dropout(p=self.enc_fnn_do[ii]))
prev_size = enc_fnn_sizes[ii]
self.enc_fnn_lyr = nn.Sequential(*_tmp)
self.enc_rnn_lyr = nn.ModuleList()
_enc_rnn_cfgs = ConfigParser.list_parser(enc_rnn_cfgs, len(enc_rnn_sizes))
for ii in range(len(enc_rnn_sizes)) :
_rnn_cfg = {}
_rnn_cfg['type'] = _enc_rnn_cfgs[ii]['type']
_rnn_cfg['args'] = [prev_size, enc_rnn_sizes[ii], 1, True, True, 0, _enc_rnn_cfgs[ii]['bi']]
self.enc_rnn_lyr.append(generator_rnn(_rnn_cfg))
prev_size = enc_rnn_sizes[ii] * (2 if _enc_rnn_cfgs[ii]['bi'] else 1)
final_enc_size = prev_size
# init decoder #
self.dec_emb_lyr = nn.Embedding(self.dec_in_size, dec_emb_size, padding_idx=None)
prev_size = dec_emb_size
_dec_rnn_cfgs = ConfigParser.list_parser(dec_rnn_cfgs, len(dec_rnn_sizes))
for ii in range(len(dec_rnn_sizes)) :
_type = _dec_rnn_cfgs[ii]['type']
if re.match('stateful.*cell', _type) is None :
_dec_rnn_cfgs[ii]['type'] = 'stateful_{}cell'.format(_type)
# TODO : dec_cfg #
assert 'type' in dec_cfg, "decoder type need to be defined"
if dec_cfg['type'] == 'standard_decoder' :
_tmp_dec_cfg = dict(dec_cfg)
del _tmp_dec_cfg['type'] #
self.dec_att_lyr = decoder.StandardDecoder(att_cfg=att_cfg, ctx_size=final_enc_size, in_size=dec_emb_size,
rnn_sizes=dec_rnn_sizes, rnn_cfgs=_dec_rnn_cfgs, rnn_do=dec_rnn_do, **_tmp_dec_cfg)
else :
raise NotImplementedError("decoder type {} is not found".format(dec_cfg['type']))
self.dec_presoftmax_lyr = nn.Linear(self.dec_att_lyr.output_size, dec_out_size)
if dec_emb_tied_weight :
assert dec_out_size == dec_in_size and self.dec_emb_lyr.embedding_dim == self.dec_presoftmax_lyr.in_features
self.dec_presoftmax_lyr.weight = self.dec_emb_lyr.weight
pass
def get_config(self) :
# TODO
return {'class':str(self.__class__),
'enc_in_size':self.enc_in_size,
'dec_in_size':self.dec_in_size,
'dec_out_size':self.dec_out_size,
'enc_fnn_sizes':self.enc_fnn_sizes,
'enc_fnn_act':self.enc_fnn_act,
'enc_fnn_do':self.enc_fnn_do,
'enc_rnn_sizes':self.enc_rnn_sizes,
'enc_rnn_cfgs':self.enc_rnn_cfgs,
'enc_rnn_do':self.enc_rnn_do,
'downsampling':self.downsampling,
'dec_emb_size':self.dec_emb_size,
'dec_emb_do':self.dec_emb_do,
'dec_emb_tied_weight':self.dec_emb_tied_weight,
'dec_rnn_sizes':self.dec_rnn_sizes,
'dec_rnn_cfgs':self.dec_rnn_cfgs,
'dec_rnn_do':self.dec_rnn_do,
'dec_cfg':self.dec_cfg,
'att_cfg':self.att_cfg,
'use_layernorm':self.use_layernorm
}
@property
def state(self) :
return (self.dec_att_lyr.state, )
@state.setter
def state(self, value) :
self.dec_att_lyr.state = value[0]
def encode(self, input, src_len=None) :
"""
input : (batch x max_src_len x in_size)
mask : (batch x max_src_len)
"""
batch, max_src_len, in_size = input.size()
if src_len is None :
src_len = [max_src_len] * batch
res = input.view(batch * max_src_len, in_size)
res = self.enc_fnn_lyr(res)
res = res.view(batch, max_src_len, -1)
for ii in range(len(self.enc_rnn_lyr)) :
res = pack(res, src_len, batch_first=True)
res = self.enc_rnn_lyr[ii](res)[0] # get h only #
res,_ = unpack(res, batch_first=True)
res = F.dropout(res, self.enc_rnn_do[ii], self.training)
if self.downsampling[ii] == True :
res = res[:, 1::2]
src_len = [x // 2 for x in src_len]
pass
ctx = res
# create mask if required #
if src_len is not None :
ctx_mask = Variable(generate_seq_mask(src_len, self, max_len=ctx.size(1)))
else :
ctx_mask = None
self.dec_att_lyr.set_ctx(ctx, ctx_mask)
def reset(self) :
self.dec_att_lyr.reset()
def decode(self, y_tm1, mask=None) :
assert y_tm1.dim() == 1, "batchsize only"
res = self.dec_emb_lyr(y_tm1)
if self.dec_emb_do > 0.0 :
res = F.dropout(res, self.dec_emb_do, self.training)
res = self.dec_att_lyr(res, mask)
return self.dec_presoftmax_lyr(res['dec_output']), res
| [
"kano.takatomo.km0@is.naist.jp"
] | kano.takatomo.km0@is.naist.jp |
0995fea88a29d3ee76e12db1750b10190ae20cc1 | 080c13cd91a073457bd9eddc2a3d13fc2e0e56ae | /MY_REPOS/awesome-4-new-developers/tensorflow-master/tensorflow/python/ops/numpy_ops/np_utils_test.py | 2cb8f64324e9e9d6d47bcab8981ed3a8b6d8439f | [
"Apache-2.0"
] | permissive | Portfolio-Projects42/UsefulResourceRepo2.0 | 1dccc8961a09347f124d3ed7c27c6d73b9806189 | 75b1e23c757845b5f1894ebe53551a1cf759c6a3 | refs/heads/master | 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,674 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utils.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.ops.numpy_ops import np_utils
from tensorflow.python.platform import test
class UtilsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(UtilsTest, self).setUp()
self._old_np_doc_form = np_utils.get_np_doc_form()
self._old_is_sig_mismatch_an_error = np_utils.is_sig_mismatch_an_error()
def tearDown(self):
np_utils.set_np_doc_form(self._old_np_doc_form)
np_utils.set_is_sig_mismatch_an_error(self._old_is_sig_mismatch_an_error)
super(UtilsTest, self).tearDown()
# pylint: disable=unused-argument
def testNpDocInlined(self):
def np_fun(x, y, z):
"""np_fun docstring."""
return
np_utils.set_np_doc_form("inlined")
@np_utils.np_doc(None, np_fun=np_fun, unsupported_params=["x"])
def f(x, z):
"""f docstring."""
return
expected = """TensorFlow variant of NumPy's `np_fun`.
Unsupported arguments: `x`, `y`.
f docstring.
Documentation for `numpy.np_fun`:
np_fun docstring."""
self.assertEqual(expected, f.__doc__)
@parameterized.named_parameters(
[
(version, version, link)
for version, link in [ # pylint: disable=g-complex-comprehension
(
"dev",
"https://numpy.org/devdocs/reference/generated/numpy.np_fun.html",
),
(
"stable",
"https://numpy.org/doc/stable/reference/generated/numpy.np_fun.html",
),
(
"1.16",
"https://numpy.org/doc/1.16/reference/generated/numpy.np_fun.html",
),
]
]
)
def testNpDocLink(self, version, link):
def np_fun(x, y, z):
"""np_fun docstring."""
return
np_utils.set_np_doc_form(version)
@np_utils.np_doc(None, np_fun=np_fun, unsupported_params=["x"])
def f(x, z):
"""f docstring."""
return
expected = """TensorFlow variant of NumPy's `np_fun`.
Unsupported arguments: `x`, `y`.
f docstring.
See the NumPy documentation for [`numpy.np_fun`](%s)."""
expected = expected % (link)
self.assertEqual(expected, f.__doc__)
@parameterized.parameters([None, 1, "a", "1a", "1.1a", "1.1.1a"])
def testNpDocInvalid(self, invalid_flag):
def np_fun(x, y, z):
"""np_fun docstring."""
return
np_utils.set_np_doc_form(invalid_flag)
@np_utils.np_doc(None, np_fun=np_fun, unsupported_params=["x"])
def f(x, z):
"""f docstring."""
return
expected = """TensorFlow variant of NumPy's `np_fun`.
Unsupported arguments: `x`, `y`.
f docstring.
"""
self.assertEqual(expected, f.__doc__)
def testNpDocName(self):
np_utils.set_np_doc_form("inlined")
@np_utils.np_doc("foo")
def f():
"""f docstring."""
return
expected = """TensorFlow variant of NumPy's `foo`.
f docstring.
"""
self.assertEqual(expected, f.__doc__)
# pylint: disable=unused-variable
def testSigMismatchIsError(self):
"""Tests that signature mismatch is an error (when configured so)."""
if not np_utils._supports_signature():
self.skipTest("inspect.signature not supported")
np_utils.set_is_sig_mismatch_an_error(True)
def np_fun(x, y=1, **kwargs):
return
with self.assertRaisesRegex(TypeError, "Cannot find parameter"):
@np_utils.np_doc(None, np_fun=np_fun)
def f1(a):
return
with self.assertRaisesRegex(TypeError, "is of kind"):
@np_utils.np_doc(None, np_fun=np_fun)
def f2(x, kwargs):
return
with self.assertRaisesRegex(
TypeError, "Parameter y should have a default value"
):
@np_utils.np_doc(None, np_fun=np_fun)
def f3(x, y):
return
def testSigMismatchIsNotError(self):
"""Tests that signature mismatch is not an error (when configured so)."""
np_utils.set_is_sig_mismatch_an_error(False)
def np_fun(x, y=1, **kwargs):
return
# The following functions all have signature mismatches, but they shouldn't
# throw errors when is_sig_mismatch_an_error() is False.
@np_utils.np_doc(None, np_fun=np_fun)
def f1(a):
return
def f2(x, kwargs):
return
@np_utils.np_doc(None, np_fun=np_fun)
def f3(x, y):
return
# pylint: enable=unused-variable
if __name__ == "__main__":
test.main()
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
09f339b23093009144f9b4c01d6ee8320fadfc8c | 8016e033484d3cb88a4ee9b82bd3ca08557c12aa | /programmingKnowledge_OOP/hello.py | bd08eca157d9b0200fdd35f8a6aa3a11dac798e7 | [] | no_license | keys4words/python | 72ecf5de80b14ad3a94abe1d48e82035a2f0fa3d | 08431836498e6caed8e01cbc3548b295b69056fe | refs/heads/master | 2021-06-16T19:42:21.294976 | 2020-04-30T14:40:24 | 2020-04-30T14:40:24 | 187,210,896 | 0 | 0 | null | 2021-03-20T01:25:04 | 2019-05-17T12:16:40 | Python | UTF-8 | Python | false | false | 434 | py | class Hello:
def __init__(self):
# self.name = name
# self.age = 10
self.a = 10
self._b = 20
self.__c = 30
def public_method(self):
# print(self.a)
# print(self.__c)
print('public')
self.__private_method()
def __private_method(self):
print('private')
hello = Hello()
print(hello.a)
print(hello._b)
hello.public_method()
# print(hello.__c) | [
"keys4words@gmail.com"
] | keys4words@gmail.com |
3074e68ae62c9b8da42116372bac38daab0eab34 | 1e5f2b99be2e7c1bcbe1718e09e5dce1c7a5ed4d | /23_Merge_k_Sorted_Lists/23_Merge_k_Sorted_Lists.py | 63ce62aef895054cbbca668733a19e61eaf894b0 | [] | no_license | superSeanLin/Algorithms-and-Structures | 05706cf34ac6bbf919f14001da9d44b918c10fb8 | 53ec4471f1eff393c26f7575a47df1a56fb8af11 | refs/heads/master | 2020-03-27T09:13:30.330974 | 2019-10-13T05:10:32 | 2019-10-13T05:10:32 | 146,323,422 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
## Use heap to do better job
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
minimum = 1000
p = None
for l in lists: # find min of all lists
if l and l.val < minimum:
p = l
minimum = l.val
if p: # not all None
q = p
lists[lists.index(p)] = p.next # change original lists
q.next = self.mergeKLists(lists)
return q
else: # all None
return None
| [
"noreply@github.com"
] | superSeanLin.noreply@github.com |
954fd295540dba8cfc19355ba45a3c7da5aa94a9 | 6f1034b17b49f373a41ecf3a5a8923fb4948992b | /test/experiment.py | 6a774f21b36d3719206d9df9793d4a0a31d8cb59 | [
"Apache-2.0"
] | permissive | NMGRL/pychron | a6ec1854488e74eb5d3ff53eee8537ecf98a6e2f | 8cfc8085393ace2aee6b98d36bfd6fba0bcb41c6 | refs/heads/main | 2023-08-30T07:00:34.121528 | 2023-06-12T17:43:25 | 2023-06-12T17:43:25 | 14,438,041 | 38 | 28 | Apache-2.0 | 2023-08-09T22:47:17 | 2013-11-15T23:46:10 | Python | UTF-8 | Python | false | false | 6,977 | py | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
import unittest
import os
from pychron.experiment.experimentor import Experimentor
from test.database import isotope_manager_factory
from pychron.experiment.tasks.experiment_editor import ExperimentEditor
from pychron.experiment.tasks.experiment_task import ExperimentEditorTask
from six.moves import zip
# from pychron.database.records.isotope_record import IsotopeRecord
# ============= standard library imports ========================
# ============= local library imports ==========================
class BaseExperimentTest(unittest.TestCase):
def _load_queues(self):
man = self.experimentor
path = self._experiment_file
with open(path, 'r') as rfile:
txt = rfile.read()
qtexts = self.exp_task._split_text(txt)
qs = []
for qi in qtexts:
editor = ExperimentEditor(path=path)
editor.new_queue(qi)
qs.append(editor.queue)
# man.test_queues(qs)
man.experiment_queues = qs
man.update_info()
man.path = path
man.executor.reset()
return qs
def setUp(self):
self.experimentor = Experimentor(connect=False,
unique_executor_db=False
)
self.experimentor.db = db = isotope_manager_factory().db
self._experiment_file = './data/experiment2.txt'
self.exp_task = ExperimentEditorTask()
self._load_queues()
class ExperimentTest2(BaseExperimentTest):
def testAliquots(self):
queue = self._load_queues()[0]
# aqs = (46, 46, 47, 47)
aqs = (46, 46, 47, 46, 46)
aqs = (1, 46, 46, 47, 46, 46, 2)
aqs = (1, 46, 46, 45, 46, 46, 2)
for i, (aq, an) in enumerate(zip(aqs, queue.automated_runs)):
print(i, an.labnumber, an.aliquot, aq, 'aaa')
self.assertEqual(an.aliquot, aq)
def testSteps(self):
queue = self._load_queues()[0]
# sts = ('A', 'B', '', 'A', 'B', '', '', '', '')
sts = ('A', 'B', 'A', 'C', 'D')
sts = ('', 'A', 'B', 'A', 'C', 'D', '')
sts = ('', 'A', 'B', 'E', 'C', 'D')
for i, (st, an) in enumerate(zip(sts, queue.automated_runs)):
# if st in ('E', 'F'):
print(i, an.labnumber, an.step, st, an.aliquot)
self.assertEqual(an.step, st)
class ExperimentTest(BaseExperimentTest):
def testFile(self):
p = self._experiment_file
self.assertTrue(os.path.isfile(p))
def testOpen(self):
qs = self._load_queues()
self.assertEqual(len(qs), 1)
def testNRuns(self):
n = 11
queue = self._load_queues()[0]
self.assertEqual(len(queue.automated_runs), n)
def testAliquots(self):
queue = self._load_queues()[0]
# aqs = (31, 31, 2, 32, 32, 200, 201, 3, 40, 41)
# aqs = (46, 46, 2, 47, 47, 45, 45, 3, 40, 41)
aqs = (46, 46, 2, 47, 47, 46, 46, 40, 41, 45, 45, 3, 40, 41)
for aq, an in zip(aqs, queue.automated_runs):
self.assertEqual(an.aliquot, aq)
def testSteps(self):
queue = self._load_queues()[0]
# sts = ('A', 'B', '', 'A', 'B', '', '', '', '')
sts = ('A', 'B', '', 'A', 'B', 'C', 'D', '', '', 'E', 'F',
'', '', '', 'C', 'D')
for i, (st, an) in enumerate(zip(sts, queue.automated_runs)):
# if st in ('E', 'F'):
print(i, an.labnumber, an.step, st, an.aliquot)
self.assertEqual(an.step, st)
@unittest.skip('foo')
def testSample(self):
queue = self._load_queues()[0]
samples = ('NM-779', 'NM-779', '', 'NM-779', 'NM-779', 'NM-779',
'NM-779', '', 'NM-791', 'NM-791'
)
for sample, an in zip(samples, queue.automated_runs):
self.assertEqual(an.sample, sample)
@unittest.skip('foo')
def testIrradation(self):
queue = self._load_queues()[0]
irrads = ('NM-251H', 'NM-251H', '', 'NM-251H', 'NM-251H', 'NM-251H',
'NM-251H', '', 'NM-251H', 'NM-251H')
for irrad, an in zip(irrads, queue.automated_runs):
self.assertEqual(an.irradiation, irrad)
class ExecutorTest(BaseExperimentTest):
def testPreviousBlank(self):
exp = self.experimentor
ext = exp.executor
ext.experiment_queue = exp.experiment_queues[0]
result = ext._get_preceeding_blank_or_background(inform=False)
# self.assertIsInstance(result, IsotopeRecord)
def testExecutorHumanError(self):
exp = self.experimentor
ext = exp.executor
ext.experiment_queue = exp.experiment_queues[0]
self.assertTrue(ext._check_for_human_errors())
def testPreExecuteCheck(self):
exp = self.experimentor
ext = exp.executor
ext.experiment_queue = exp.experiment_queues[0]
ext._pre_execute_check(inform=False)
class HumanErrorCheckerTest(BaseExperimentTest):
def setUp(self):
super(HumanErrorCheckerTest, self).setUp()
from pychron.experiment.utilities.human_error_checker import HumanErrorChecker
hec = HumanErrorChecker()
self.hec = hec
def testNoLabnumber(self):
err = self._get_errors()
self.assertTrue('-01' in list(err.keys()))
self.assertEqual(err['-01'], 'no labnumber')
def testNoDuration(self):
err = self._get_errors()
self.assertEqual(err['61311-101'], 'no duration')
#
def testNoCleanup(self):
err = self._get_errors()
self.assertEqual(err['61311-100'], 'no cleanup')
def testPositionNoExtract(self):
err = self._get_errors()
self.assertEqual(err['61311-102'], 'position but no extract value')
def _get_errors(self):
hec = self.hec
exp = self.experimentor
q = exp.experiment_queues[0]
err = hec.check(q, test_all=True, inform=False)
return err
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
d1fb0d046a7c0d9e863a9839b89b7b64feca1388 | 21b201ebf2ffbbc19fa8d74e5657e12ef597b02d | /research/pcl_rl/baseline.py | 58f7893a6c9a4bb783738399016266fa483810a2 | [] | no_license | alhsnouf/model | fa619691ad9d0afc7ad849a9471e6bb0643a8d47 | 5fe429b115634e642a7469b3f1d4bc0c5cf98782 | refs/heads/master | 2021-04-12T11:16:02.150045 | 2018-03-27T15:19:18 | 2018-03-27T15:19:18 | 126,702,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:4252c9a3674f2fe52c94c44905609a6f028ed27517804d6b9a68c3b4ce0b5efe
size 7292
| [
"alhanouf987@hotmail.com"
] | alhanouf987@hotmail.com |
1485edb0101fc10707d2f472fb4c3ed4549ba608 | 7ab41799fd38489c93282f1beb3b20e7ef8ff165 | /python/94.py | 6a9b8fc33e6ab122cea451ead65017089a84d0c5 | [] | no_license | scturtle/leetcode-sol | 86c4095df6b31a9fcad683f2d63669ce1691633c | e1a9ce5d9b8fe4bd11e50bd1d5ba1933de845db7 | refs/heads/master | 2020-04-23T00:01:37.016267 | 2015-11-21T04:15:27 | 2015-11-21T04:15:27 | 32,385,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | class Solution(object):
def inorderTraversal(self, root, ans=None):
"""
:type root: TreeNode
:rtype: List[int]
"""
if ans is None:
ans = []
if not root:
return ans
self.inorderTraversal(root.left, ans)
ans.append(root.val)
self.inorderTraversal(root.right, ans)
return ans
| [
"scturtle@gmail.com"
] | scturtle@gmail.com |
56229b6884789b2f9643a05e87993227183ac570 | f0dce7b15b55647b709300d335ddcca523ee61f7 | /34_Find_First_and_Last_Position_of_Element_in_Sorted_Array.py | b4dd6bab76ed54fd3570cd6599c240a478044dc2 | [] | no_license | breezekiller789/LeetCode | ecc4883f616d21e7b72d85c9f93293a8daf3dc74 | 51090f28eaab17e823981eddc9119abe174ceb4e | refs/heads/master | 2023-06-03T20:54:29.222478 | 2021-06-18T14:33:05 | 2021-06-18T14:33:05 | 347,049,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://leetcode.com/problems/find-first-and-last-position-of-element-in-sorted-array/
# Binary search
# Output: [3,4]
nums = [5, 7, 7, 8, 8, 10]
target = 8
# Output: [-1,-1]
nums = [5, 7, 7, 8, 8, 10]
target = 6
# Output: [-1,-1]
nums = [5, 7, 7, 8, 8, 10]
target = 0
def FindInterval(nums, startIndex, target):
left = startIndex - 1
right = startIndex + 1
length = len(nums)
while left >= 0 and right < length and nums[left] == target and \
nums[right] == target:
left -= 1
right += 1
while left >= 0 and nums[left] == target:
left -= 1
while right < length and nums[right] == target:
right += 1
return [left+1, right-1]
low = 0
high = len(nums) - 1
while low <= high:
mid = (low+high)/2
if nums[mid] > target:
high = mid - 1
elif nums[mid] < target:
low = mid + 1
else:
print FindInterval(nums, mid, target)
exit()
print [-1, -1]
| [
"breezekiller789@csie.io"
] | breezekiller789@csie.io |
628e6d136054a6d06792eb383522a465965a4b1b | bcc04939aa70675c9be19c0bf4a9642877db46b1 | /qa/urls.py | bab92ea3fbcf77b3fe73b3d096450528a2fbd7e2 | [
"MIT"
] | permissive | zkeshtkar/gapbug | 164398e2ddd8f952d5851eab19e34f9f84a080e1 | eec5baf9b4346aef26bcb10e48ddcb358140d708 | refs/heads/main | 2023-06-20T07:39:50.084126 | 2021-07-16T13:31:10 | 2021-07-16T13:31:10 | 387,550,452 | 0 | 0 | MIT | 2021-07-19T17:53:20 | 2021-07-19T17:53:19 | null | UTF-8 | Python | false | false | 1,726 | py | from django.urls import path
from . import views
app_name = "qa"
urlpatterns = [
path("", views.QuestionList.as_view(), name="index"),
path("ask", views.Ask.as_view(), name="question"),
path("show/<int:id>/<str:slug>", views.show, name="show"),
path(
"<int:id>/answer/submit", views.AnswerQuestion.as_view(), name="submit_answer"
),
path(
"<int:question_id>/up", views.QuestionVoteUp.as_view(), name="question_vote_up"
),
path(
"<int:question_id>/down",
views.QuestionVoteDown.as_view(),
name="question_vote_down",
),
path("<int:question_id>/edit", views.EditQuestion.as_view(), name="question_edit"),
path("<int:pk>/delete/", views.DeleteQuestion.as_view(), name="question_delete"),
path(
"<int:question_id>/answer/<int:pk>/delete/",
views.DeleteAnswer.as_view(),
name="answer_delete",
),
path(
"<int:question_id>/edit/answer/<int:answer_id>",
views.EditAnswer.as_view(),
name="answer_edit",
),
path(
"<int:question_id>/<int:answer_id>/up",
views.AnswerVoteUp.as_view(),
name="answer_voteup",
),
path(
"<int:question_id>/<int:answer_id>/down",
views.AnswerVoteDown.as_view(),
name="answer_voteup",
),
path(
"<int:question_id>/<int:answer_id>/accept",
views.AcceptAnswer.as_view(),
name="accept_answer",
),
path("search/", views.Search.as_view(), name="search"),
path("tags/", views.TagList.as_view(), name="tags_list"),
path("tags/<str:tag>/", views.QuestionByTag.as_view(), name="by_tag"),
path("tagslist/", views.QuestionTagList.as_view(), name="all_tags"),
]
| [
"mshirdel@gmail.com"
] | mshirdel@gmail.com |
fca7b1ef9fb4de7c9a18ac5ac1b8740490e71104 | a5aabe2e4057d78e687a57a6b560516a7cdb5836 | /unsserv/extreme/sampling/protocol.py | b81ea0a13077b05dbebfab9cabc85dd2beb7c114 | [
"MIT"
] | permissive | aratz-lasa/py-unsserv | 0ffc09ddab65a11ce917d0faa8b1b5dff091e563 | 6f332385e55d05953186b9a8b7848bca4b878e18 | refs/heads/master | 2022-12-14T21:10:12.397834 | 2020-05-03T11:29:49 | 2020-05-03T11:29:49 | 228,329,158 | 5 | 0 | MIT | 2022-12-08T07:00:55 | 2019-12-16T07:35:20 | Python | UTF-8 | Python | false | false | 3,695 | py | from enum import IntEnum, auto
from typing import Tuple, Sequence
from unsserv.common.utils import parse_node
from unsserv.common.structs import Node
from unsserv.common.rpc.structs import Message
from unsserv.common.rpc.protocol import AProtocol, ITranscoder, Command, Data, Handler
from unsserv.extreme.sampling.structs import Sample, SampleResult
FIELD_COMMAND = "mrwb-command"
FIELD_TTL = "mrwb-ttl"
FIELD_ORIGIN_NODE = "mrwb-origin-node"
FIELD_SAMPLE_RESULT = "mrwb-sample-result"
FIELD_SAMPLE_ID = "mrwb-sample-id"
class MRWBCommand(IntEnum):
GET_DEGREE = auto()
SAMPLE = auto()
SAMPLE_RESULT = auto()
class MRWBTranscoder(ITranscoder):
def encode(self, command: Command, *data: Data) -> Message:
if command == MRWBCommand.SAMPLE:
sample: Sample = data[0]
message_data = {
FIELD_COMMAND: MRWBCommand.SAMPLE,
FIELD_SAMPLE_ID: sample.id,
FIELD_ORIGIN_NODE: sample.origin_node,
FIELD_TTL: sample.ttl,
}
return Message(self.my_node, self.service_id, message_data)
elif command == MRWBCommand.SAMPLE_RESULT:
sample_result: SampleResult = data[0]
message_data = {
FIELD_COMMAND: MRWBCommand.SAMPLE_RESULT,
FIELD_SAMPLE_ID: sample_result.sample_id,
FIELD_SAMPLE_RESULT: sample_result.result,
}
return Message(self.my_node, self.service_id, message_data)
elif command == MRWBCommand.GET_DEGREE:
message_data = {FIELD_COMMAND: MRWBCommand.GET_DEGREE}
return Message(self.my_node, self.service_id, message_data)
raise ValueError("Invalid Command")
def decode(self, message: Message) -> Tuple[Command, Sequence[Data]]:
command = message.data[FIELD_COMMAND]
if command == MRWBCommand.SAMPLE:
sample = Sample(
id=message.data[FIELD_SAMPLE_ID],
origin_node=parse_node(message.data[FIELD_ORIGIN_NODE]),
ttl=message.data[FIELD_TTL],
)
return MRWBCommand.SAMPLE, [sample]
elif command == MRWBCommand.SAMPLE_RESULT:
sample_result = SampleResult(
sample_id=message.data[FIELD_SAMPLE_ID],
result=parse_node(message.data[FIELD_SAMPLE_RESULT]),
)
return MRWBCommand.SAMPLE_RESULT, [sample_result]
elif command == MRWBCommand.GET_DEGREE:
return MRWBCommand.GET_DEGREE, []
raise ValueError("Invalid Command")
class MRWBProtocol(AProtocol):
def _get_new_transcoder(self):
return MRWBTranscoder(self.my_node, self.service_id)
async def sample(self, destination: Node, sample: Sample):
message = self._transcoder.encode(MRWBCommand.SAMPLE, sample)
return await self._rpc.call_send_message(destination, message)
async def sample_result(self, destination: Node, sample_result: SampleResult):
message = self._transcoder.encode(MRWBCommand.SAMPLE_RESULT, sample_result)
return await self._rpc.call_send_message(destination, message)
async def get_degree(self, destination: Node) -> int:
message = self._transcoder.encode(MRWBCommand.GET_DEGREE)
return await self._rpc.call_send_message(destination, message)
def set_handler_sample(self, handler: Handler):
self._handlers[MRWBCommand.SAMPLE] = handler
def set_handler_sample_result(self, handler: Handler):
self._handlers[MRWBCommand.SAMPLE_RESULT] = handler
def set_handler_get_degree(self, handler: Handler):
self._handlers[MRWBCommand.GET_DEGREE] = handler
| [
"aratzml@opendeusto.es"
] | aratzml@opendeusto.es |
25a25d841579b1ba7c8b6f758ec38f5162118091 | bcfc082c98c13bccd4a415c30b67c61d0b91828c | /pymc_hacking/eg3/run_mcmc.py | d43d95f4d452d8ea1875956fa9da5748be328748 | [] | no_license | Markus333/doing_bayesian_data_analysis | 0e7375af1acfd9952044ade28e59d734974e9a71 | 27f144fda3e9df41dbb74f70c5bf82547d2fa649 | refs/heads/master | 2020-12-28T21:39:08.758946 | 2013-01-10T06:46:11 | 2013-01-10T06:46:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | #!/usr/bin/env python
import two_normal_model
from pymc import MCMC
from pymc.Matplot import plot
# do posterior sampling
m = MCMC(two_normal_model)
m.sample(iter=100000, burn=1000)
print(m.stats())
import numpy
for p in ['mean1', 'mean2', 'std_dev', 'theta']:
numpy.savetxt("%s.trace" % p, m.trace(p)[:])
# draw some pictures
plot(m)
| [
"matthew.kelcey@gmail.com"
] | matthew.kelcey@gmail.com |
59979f1621b5032c17414f37611219c67fb481dd | c7e5d2fd3a9fdc585f335477eb74248a4416e44b | /setup.py | 19611e6865b7fddf5b82ed3fc61d2ccc8153f801 | [
"MIT"
] | permissive | lord63/pyhipku | d4ec626df5c9e354894f1290633132bb86388730 | 4037014ee4d56ed3dd62b3fe1b9681095e6f5de8 | refs/heads/master | 2022-03-10T16:46:10.514356 | 2021-10-04T11:12:48 | 2022-02-22T14:54:31 | 31,259,992 | 104 | 6 | MIT | 2021-10-04T11:12:54 | 2015-02-24T12:57:02 | Python | UTF-8 | Python | false | false | 1,199 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import pyhipku
try:
import pypandoc
long_description = pypandoc.convert('README.md','rst')
except (IOError, ImportError):
with open('README.md') as f:
long_description = f.read()
setup(
name='pyhipku',
version=pyhipku.__version__,
url='http://github.com/lord63/pyhipku/',
license='MIT',
author='lord63',
author_email='lord63.j@gmail.com',
description='Encode any IP address as a haiku',
long_description=long_description,
packages=['pyhipku'],
include_package_data=True,
keywords='ip haiku',
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| [
"lord63.j@gmail.com"
] | lord63.j@gmail.com |
7fccc2a26214b24ed6112b3083ded4973cf22b7c | b92b0e9ba2338ab311312dcbbeefcbb7c912fc2e | /build/shogun_lib/examples/undocumented/python_modular/preprocessor_sortwordstring_modular.py | cbeb3b9c60661aeb5b61178e97e3ba7a043908ed | [] | no_license | behollis/muViewBranch | 384f8f97f67723b2a4019294854969d6fc1f53e8 | 1d80914f57e47b3ad565c4696861f7b3213675e0 | refs/heads/master | 2021-01-10T13:22:28.580069 | 2015-10-27T21:43:20 | 2015-10-27T21:43:20 | 45,059,082 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,405 | py | from tools.load import LoadMatrix
lm=LoadMatrix()
traindna = lm.load_dna('../data/fm_train_dna.dat')
testdna = lm.load_dna('../data/fm_test_dna.dat')
parameter_list = [[traindna,testdna,3,0,False,False],[traindna,testdna,3,0,False,False]]
def preprocessor_sortwordstring_modular (fm_train_dna=traindna,fm_test_dna=testdna,order=3,gap=0,reverse=False,use_sign=False):
from shogun.Kernel import CommWordStringKernel
from shogun.Features import StringCharFeatures, StringWordFeatures, DNA
from shogun.Preprocessor import SortWordString
charfeat=StringCharFeatures(fm_train_dna, DNA)
feats_train=StringWordFeatures(charfeat.get_alphabet())
feats_train.obtain_from_char(charfeat, order-1, order, gap, reverse)
preproc=SortWordString()
preproc.init(feats_train)
feats_train.add_preprocessor(preproc)
feats_train.apply_preprocessor()
charfeat=StringCharFeatures(fm_test_dna, DNA)
feats_test=StringWordFeatures(charfeat.get_alphabet())
feats_test.obtain_from_char(charfeat, order-1, order, gap, reverse)
feats_test.add_preprocessor(preproc)
feats_test.apply_preprocessor()
kernel=CommWordStringKernel(feats_train, feats_train, use_sign)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('CommWordString')
preprocessor_sortwordstring_modular(*parameter_list[0])
| [
"prosen@305cdda6-5ce1-45b3-a98d-dfc68c8b3305"
] | prosen@305cdda6-5ce1-45b3-a98d-dfc68c8b3305 |
80d08ab26221fb1bcfb4478b7d0d7ee2bedb94d3 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_QC1702.py | cfd5953d19d3326974e9bc2e2f80559fa8e2ef50 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,647 | py | # qubit number=5
# total number=51
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.h(input_qubit[0]) # number=44
prog.cz(input_qubit[3],input_qubit[0]) # number=45
prog.h(input_qubit[0]) # number=46
prog.cx(input_qubit[3],input_qubit[0]) # number=48
prog.z(input_qubit[3]) # number=49
prog.cx(input_qubit[3],input_qubit[0]) # number=50
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.rx(0.11938052083641225,input_qubit[1]) # number=36
prog.cx(input_qubit[1],input_qubit[2]) # number=47
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.rx(1.4765485471872026,input_qubit[2]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=41
prog.x(input_qubit[0]) # number=42
prog.cx(input_qubit[1],input_qubit[0]) # number=43
prog.x(input_qubit[4]) # number=30
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.rx(0.45238934211692994,input_qubit[3]) # number=38
prog.y(input_qubit[1]) # number=39
prog.rx(-2.5258404934861938,input_qubit[1]) # number=25
prog.h(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=22
prog.x(input_qubit[3]) # number=23
prog.cx(input_qubit[0],input_qubit[3]) # number=24
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.rx(-0.0722566310325653,input_qubit[4]) # number=37
prog.x(input_qubit[1]) # number=14
prog.cx(input_qubit[0],input_qubit[2]) # number=26
prog.x(input_qubit[2]) # number=27
prog.h(input_qubit[4]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1702.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
6f8b3a5d8957841087142834c91c801aa460a488 | 8e79de4b73998dd0ee1dae4881784a2b12410615 | /219/notifications.py | 09294bb1d55b253633d306d9185376cafe2febce | [
"MIT"
] | permissive | alehpineda/bitesofpy | e6eb7c9413cf407a12643efece01bef5457e5dcb | bfd319a606cd0b7b9bfb85a3e8942872a2d43c48 | refs/heads/master | 2021-07-15T19:59:35.061049 | 2020-09-25T17:49:32 | 2020-09-25T17:49:32 | 209,878,791 | 0 | 0 | MIT | 2020-09-06T00:11:45 | 2019-09-20T20:49:51 | Python | UTF-8 | Python | false | false | 268 | py | from datetime import date, timedelta
TODAY = date.today()
def gen_bite_planning(num_bites=1, num_days=1, start_date=TODAY):
days = 0
while True:
days += num_days
for _ in range(num_bites):
yield start_date + timedelta(days=days)
| [
"ale.hpineda@gmail.com"
] | ale.hpineda@gmail.com |
92c31e6b240db898369e4593625739fe5d39e00f | e6b9ca7b13a21fcc5a26e787191c845698a47f17 | /django_mako_plus/provider/compile.py | 1090218285d818cbbd93b2b3b8f62d91c72c36ac | [
"Apache-2.0"
] | permissive | BrightBridgeWeb/django-mako-plus | c42e6b3ff4a62b5110f6412958b8df585ae78881 | 24690661b80562a510c1632853815df5111b606c | refs/heads/master | 2020-04-15T14:47:44.565952 | 2019-01-02T02:29:42 | 2019-01-02T02:29:42 | 164,768,193 | 0 | 0 | Apache-2.0 | 2019-01-09T02:06:52 | 2019-01-09T02:06:51 | null | UTF-8 | Python | false | false | 7,416 | py | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
import os
import os.path
import shutil
import collections
import logging
from .base import BaseProvider
from ..util import log
from ..command import run_command
class CompileProvider(BaseProvider):
'''
Runs a command, such as compiling *.scss or *.less, when an output file
timestamp is older than the source file. In production mode, this check
is done only once (the first time a template is run) per server start.
When settings.DEBUG=True, checks for a recompile every request.
When settings.DEBUG=False, checks for a recompile only once per server run.
'''
def __init__(self, template, options):
super().__init__(template, options)
self.sourcepath = os.path.join(settings.BASE_DIR if settings.DEBUG else settings.STATIC_ROOT, self.build_sourcepath())
self.targetpath = os.path.join(settings.BASE_DIR if settings.DEBUG else settings.STATIC_ROOT, self.build_targetpath())
# since this is in the constructor, it runs only one time per server
# run when in production mode
if not os.path.exists(self.sourcepath):
msg = 'skipping nonexistent file'
elif self.needs_compile:
msg = 'compiling file'
if not os.path.exists(os.path.dirname(self.targetpath)):
os.makedirs(os.path.dirname(self.targetpath))
run_command(*self.build_command())
else:
msg = 'already up to date'
if log.isEnabledFor(logging.DEBUG):
log.debug('%s created for %s: [%s]', repr(self), self.sourcepath, msg)
DEFAULT_OPTIONS = {
'group': 'styles',
# explicitly sets the path to search for - if this filepath exists, DMP
# includes a link to it in the template. globs are not supported because this
# should resolve to one exact file. possible values:
# 1. None: a default path is used, such as "{app}/{subdir}/{filename.ext}", prefixed
# with the static root at production; see subclasses for their default filenames.
# 2. function, lambda, or other callable: called as func(provider) and
# should return a string
# 3. str: used directly
'sourcepath': None,
# explicitly sets the path to search for - if this filepath exists, DMP
# includes a link to it in the template. globs are not supported because this
# should resolve to one exact file. possible values:
# 1. None: a default path is used, such as "{app}/{subdir}/{filename.ext}", prefixed
# with the static root at production; see subclasses for their default filenames.
# 2. function, lambda, or other callable: called as func(provider) and
# should return a string
# 3. str: used directly
'targetpath': None,
# explicitly sets the command to be run. possible values:
# 1. None: the default command is run
# 2. function, lambda, or other callable: called as func(provider), expects list as return
# 3. list: used directly in the call to subprocess module
'command': [],
}
def build_sourcepath(self):
# if defined in settings, run the function or return the string
if self.options['sourcepath'] is not None:
return self.options['sourcepath'](self) if callable(self.options['sourcepath']) else self.options['sourcepath']
# build the default
if self.app_config is None:
log.warn('{} skipped: template %s not in project subdir and `targetpath` not in settings', (self.__class__.__qualname__, self.template_relpath))
return self.build_default_sourcepath()
def build_default_sourcepath(self):
raise ImproperlyConfigured('{} must set `sourcepath` in options (or a subclass can override build_default_sourcepath).'.format(self.__class__.__qualname__))
def build_targetpath(self):
# if defined in settings, run the function or return the string
if self.options['targetpath'] is not None:
return self.options['targetpath'](self) if callable(self.options['targetpath']) else self.options['targetpath']
# build the default
if self.app_config is None:
log.warn('{} skipped: template %s not in project subdir and `targetpath` not in settings', (self.__class__.__qualname__, self.template_relpath))
return self.build_default_targetpath()
def build_default_targetpath(self):
raise ImproperlyConfigured('{} must set `targetpath` in options (or a subclass can override build_default_targetpath).'.format(self.__class__.__qualname__))
def build_command(self):
'''Returns the command to run, as a list (see subprocess module)'''
# if defined in settings, run the function or return the string
if self.options['command']:
return self.options['command'](self) if callable(self.options['command']) else self.options['command']
# build the default
return self.build_default_command()
def build_default_command(self):
raise ImproperlyConfigured('{} must set `command` in options (or a subclass can override build_default_command).'.format(self.__class__.__qualname__))
@property
def needs_compile(self):
'''Returns True if self.sourcepath is newer than self.targetpath'''
try:
source_mtime = os.stat(self.sourcepath).st_mtime
except OSError: # no source for this template, so just return
return False
try:
target_mtime = os.stat(self.targetpath).st_mtime
except OSError: # target doesn't exist, so compile
return True
# both source and target exist, so compile if source newer
return source_mtime > target_mtime
###################
### Sass
class CompileScssProvider(CompileProvider):
'''Specialized CompileProvider for SCSS'''
def build_default_sourcepath(self):
return os.path.join(
self.app_config.name,
'styles',
self.template_relpath + '.scss',
)
def build_default_targetpath(self):
# posixpath because URLs use forward slash
return os.path.join(
self.app_config.name,
'styles',
self.template_relpath + '.css',
)
def build_default_command(self):
return [
shutil.which('sass'),
'--source-map',
'--load-path={}'.format(settings.BASE_DIR),
self.sourcepath,
self.targetpath,
]
#####################
### Less
class CompileLessProvider(CompileProvider):
'''Specialized CompileProvider that contains settings for *.less files.'''
def build_default_sourcepath(self):
return os.path.join(
self.app_config.name,
'styles',
self.template_relpath + '.less',
)
def build_default_targetpath(self):
# posixpath because URLs use forward slash
return os.path.join(
self.app_config.name,
'styles',
self.template_relpath + '.css',
)
def build_default_command(self):
return [
shutil.which('lessc'),
'--source-map',
self.sourcepath,
self.targetpath,
]
| [
"doconix@gmail.com"
] | doconix@gmail.com |
df6d1e4f182172ca91145d9307cefe3d8451ec3f | 4992f95174927775146f46275ed604aefe5e9699 | /dstagram/config/urls.py | a7febe8b3c2ba7dc58f8b9ec32801e3465038596 | [
"MIT"
] | permissive | djangojeng-e/mini_projects | 55d5858628eb5f42cb4a5a5e417958fe5929d658 | 32014388e8c83556d83f6ae911bd0e33df2067a7 | refs/heads/master | 2022-12-25T21:26:23.057485 | 2020-05-27T08:02:20 | 2020-05-27T08:02:20 | 229,744,025 | 0 | 0 | MIT | 2022-12-11T05:30:16 | 2019-12-23T12:01:05 | CSS | UTF-8 | Python | false | false | 1,080 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from account.views import register
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('photo.urls')),
path('accounts/', include('account.urls')),
path('register/', register, name='register')
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"headfat1218@gmail.com"
] | headfat1218@gmail.com |
a1526b01102784964eeb982e8698dfbe4e2c1e4c | 63e0bfa7fb4ecf0b6d4f8fd740be0316cd82ea00 | /Graphs/DFSgraph.py | dd8c66f76b63d4d6e3f7eda3c8b8e87506da5a67 | [] | no_license | shaheershantk/Problem-Solving-with-Algorithms-Data-Structure | 7ceb025c8af97dd81890d3baebc69a82d8196801 | ce8b4ba1240fed3109a767984e370ce7a7eb630b | refs/heads/master | 2016-09-06T00:45:50.861117 | 2014-12-01T09:00:16 | 2014-12-01T09:00:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | from graph import Graph
class DFSGraph(Graph):
def __init__(self):
super().__init__()
self.time = 0
def dfs(self):
for aVertex in self:
aVertex.setColor('white')
aVertex.setPred(-1)
for aVertex in self:
if aVertex.getColor() == 'white':
self.dfsvisit(aVertex)
def dfsvisit(self,startVertex):
startVertex.setColor('gray')
self.time += 1
startVertex.setDiscovery(self.time)
for nextVertex in startVertex.getConnections():
if nextVertex.getColor() == 'white':
nextVertex.setPred(startVertex)
self.dfsvisit(nextVertex)
startVertex.setColor('black')
self.time += 1
startVertex.setFinish(self.time)
| [
"shaheer.shan@gmail.com"
] | shaheer.shan@gmail.com |
ff606cd3f830e14dcd8513ca07d6193f66176520 | 04afb34356de112445c3e5733fd2b773d92372ef | /Sem1/FP/Exam/board.py | ac01e707bf43b664662a7821bfd3124d34aeec33 | [] | no_license | AndreeaCimpean/Uni | a4e48e5e1dcecbc0c28ad45ddd3b0989ff7985c8 | 27df09339e4f8141be3c22ae93c4c063ffd2b172 | refs/heads/master | 2020-08-21T19:12:49.840044 | 2020-05-15T17:22:50 | 2020-05-15T17:22:50 | 216,222,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,139 | py | from texttable import Texttable
import random
class Board:
def __init__(self):
self._data = [0] * 64
self.place_stars()
self.place_endeavour()
self.place_cruisers(3)
def get_cell(self, i, j):
return self._data[((ord(i) - ord('A')) * 8 + j - 1)]
def set_cell(self, i, j, value):
self._data[((ord(i) - ord('A')) * 8 + j - 1)] = value
def __str__(self):
d = {0: " ", 1: "*", -2:"E", 2:"B"}
t = Texttable()
row = ['0', '1', '2', '3', '4', '5', '6', '7', '8']
t.add_row(row)
for i in range(0, 64, 8):
row = []
row.append(chr(ord("A") + i//8))
row += self._data[i:i+8]
for j in range(1, 9):
row[j] = d[row[j]]
t.add_row(row)
return t.draw()
def empty_neighbours(self, i, j):
'''
Check if all neighbours of a cell are empty
:param i: the row of the cell
:param j: the column of the cell
:return:
True if all neighbours are empty
False otherwise
'''
neighbours = self.get_neighbours(i, j)
for neighbour in neighbours:
if self.get_cell(neighbour[0], neighbour[1]) != 0:
return False
return True
def get_neighbours(self, i, j):
neighbours = []
directionsi = [0, 0, 1, -1, 1, 1, -1, -1]
directionsj = [1, -1, 0, 0, 1, -1, -1, 1]
for index in range(0, 8):
# print(index)
neighbour = chr(ord(i) + directionsi[index]), j + directionsj[index]
if neighbour[0] >= 'A' and neighbour[0] <= 'H' and neighbour[1] >= 1 and neighbour[1] <= 8:
neighbours.append(neighbour)
return neighbours
def place_stars(self):
'''
Place random 10 stars on the board, so that there is no 2 adjacent stars(row, column, diagonal)
:return:
None, but place the start
'''
count = 0
while count != 10:
while True:
x = random.randint(0, 7)
x = chr(ord('A') + x)
y = random.randint(1, 8)
if self.get_cell(x, y) != 0:
continue
else:
break
if self.empty_neighbours(x, y) == False:
continue
else:
self.set_cell(x, y, 1)
count += 1
def place_endeavour(self):
while True:
x = random.randint(0, 7)
x = chr(ord('A') + x)
y = random.randint(1, 8)
if self.get_cell(x, y) != 0:
continue
else:
break
self.set_cell(x, y, -2)
def place_cruisers(self, number):
count = 0
while count != number:
while True:
x = random.randint(0, 7)
x = chr(ord('A') + x)
y = random.randint(1, 8)
if self.get_cell(x, y) != 0:
continue
else:
break
self.set_cell(x, y, 2)
count += 1
def is_won(self):
count_cruisers = 0
for i in range(0, 8):
for j in range(1, 9):
if self.get_cell(chr(ord("A") + i), j) == 2:
count_cruisers += 1
if count_cruisers == 0:
return True
return False
def is_lost(self):
for i in range(0, 8):
for j in range(1, 9):
if self.get_cell(chr(ord("A") + i), j) == -2:
return False
return True
def find_number_of_ships(self):
count_cruisers = 0
for i in range(0, 8):
for j in range(1, 9):
if self.get_cell(chr(ord("A") + i), j) == 2:
count_cruisers += 1
return count_cruisers
def find_endeavour(self):
for i in range(0, 8):
for j in range(1, 9):
if self.get_cell(chr(ord("A") + i), j) == -2:
return chr(ord("A") + i), j
| [
"andreeacimpean.910@gmail.com"
] | andreeacimpean.910@gmail.com |
3f00da3a7e49680da3280abdc9e62595162564a2 | ffad717edc7ab2c25d5397d46e3fcd3975ec845f | /Python/pyesri 2/ANSWERS/countwords.py | 2f99c6fb13bb5934869c3497fc0145931cb580f8 | [] | no_license | shaunakv1/esri-developer-conference-2015-training | 2f74caea97aa6333aa38fb29183e12a802bd8f90 | 68b0a19aac0f9755202ef4354ad629ebd8fde6ba | refs/heads/master | 2021-01-01T20:35:48.543254 | 2015-03-09T22:13:14 | 2015-03-09T22:13:14 | 31,855,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | #!/usr/bin/python
import sys
if len(sys.argv) < 3:
print "Syntax: countwords.py PATTERN FILE ..."
sys.exit()
pattern = sys.argv[1]
for fname in sys.argv[2:]:
count = 0
with open(fname) as f:
for line in f:
if pattern in line:
count += 1
print '''"{0}" occurred on {1} lines in {2}'''.format(pattern,count,fname)
| [
"shaunakv1@gmail.com"
] | shaunakv1@gmail.com |
475f9818e2bccdeb3c2faec0b8c438eb5c4c96f4 | e0f1b0f8d8771e0852c9d5a118c8a1d5bac274ba | /Keras/5_Deep_Learning_for_Computer_Vision/5.2.py | d69ed0c7c87d8a147caa42dd7731c667bea73655 | [] | no_license | rapsealk/TIL | cdec9a67c510ba0cc33f5f11cdace0ffb4f847e1 | b1a78201fef37cc6d28f8acda41645cd7db4ef6f | refs/heads/master | 2022-11-26T17:11:36.375136 | 2020-10-21T04:35:01 | 2020-10-21T04:35:01 | 122,223,741 | 3 | 0 | null | 2022-11-10T14:58:46 | 2018-02-20T16:26:15 | Java | UTF-8 | Python | false | false | 6,457 | py | #!/usr/bin/env python3
"""
Datasets can be downloaded @ https://www.kaggle.com/c/dogs-vs-cats/data
"""
"""
5.2.2
"""
import os, shutil
original_dataset_dir = './datasets/cats_and_dogs/train'
base_dir = './datasets/cats_and_dogs_small'
os.mkdir(base_dir)
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)
train_cats_dir = os.path.join(train_dir, 'cats')
os.mkdir(train_cats_dir)
train_dogs_dir = os.path.join(train_dir, 'dogs')
os.mkdir(train_dogs_dir)
test_cats_dir = os.path.join(test_dir, 'cats')
os.mkdir(test_cats_dir)
test_dogs_dir = os.path.join(test_dir, 'dogs')
os.mkdir(test_dogs_dir)
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dir, fname)
shutil.copyfile(src, dst)
fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dir, fname)
shutil.copyfile(src, dst)
fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
print('훈련용 고양이 이미지 전체 개수:', len(os.listdir(train_cats_dir)))
print('훈련용 강아지 이미지 전체 개수:', len(os.listdir(train_dogs_dir)))
print('검증용 고양이 이미지 전체 개수:', len(os.listdir(validation_cats_dir)))
print('검증용 강아지 이미지 전체 개수:', len(os.listdir(validation_dogs_dir)))
print('테스트용 고양이 이미지 전체 개수:', len(os.listdir(test_cats_dir)))
print('테스트용 강아지 이미지 전체 개수:', len(os.listdir(test_dogs_dir)))
"""
5.2.3
"""
from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5)) # 코드 5-13: 드롭아웃을 포함한 새로운 컨브넷 정의하기
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
from keras import optimizers
model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])
"""
5.2.4
"""
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
validation_generator = test_datagen.flow_from_directory(validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
for data_batch, labels_batch in train_generator:
print('배치 데이터 크기:', data_batch.shape)
print('배치 레이블 크기:', labels_batch.shape)
break
hist = model.fit_generator(train_generator, steps_per_epoch=100, epochs=30, validation_data=validation_generator, validation_steps=50)
model.save('cats_and_dogs_small_1.h5')
"""
코드 5-10: 훈련의 정확도와 손실 그래프 그리기
"""
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc)+1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
"""
코드 5-11: ImageDataGenerator를 사용하여 데이터 증식 설정하기
"""
datagen = ImageDataGenerator(rotation_range=20, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.1,
horizontal_flip=True, fill_mode='nearest')
"""
코드 5-12: 랜덤하게 증식된 훈련 이미지 그리기
"""
from keras.preprocessing import image
fnames = sorted([os.path.join(train_cats_dir, fname) for fname in os.listdir(train_cats_dir)])
img_path = fnames[3]
img = image.load_img(img_path, target_size=(150, 150))
x = image.img_to_array(img) # (150, 150, 3) numpy array
x = x.reshape((1,) + x.shape) # (1, 150, 150, 3) numpy array
i = 0
for batch in datagen.flow(x, batch_size=1):
plt.figure(i)
imgplot = plt.imshow(img.array_to_img(batch[0]))
i += 1
if i % 4 == 0:
break
plt.show()
"""
코드 5-14: 데이터 증식 제너레이터를 사용하여 컨브넷 훈련하기
"""
train_datagen = ImageDataGenerator(rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=32, class_mode='binary')
validation_generator = test_datagen.flow_from_directory(validation_dir, target_size=(150, 150), batch_size=32, class_mode='binary')
history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=100, validation_data=validation_generator, validation_steps=50)
model.save('cats_and_dogs_small_2.h5') | [
"piono623@naver.com"
] | piono623@naver.com |
4ecdb76b8e0319bfa6d4f6b7dc0e061f3c6dc767 | e2345e19d448c4fa36af58a6fc908698086137f4 | /woodwork/__init__.py | 3c97e9a8ad0cc661bc81bca8183f7354516c0add | [
"MIT"
] | permissive | westurner/woodwork | df727eb30ea7c07399664b44f0df3afc626db371 | 2475f9cbc220fc57828f880014e9e2a00f547c84 | refs/heads/develop | 2023-01-09T07:29:48.452416 | 2019-07-20T12:20:40 | 2019-07-20T12:20:40 | 191,066,025 | 0 | 0 | MIT | 2022-12-26T20:47:39 | 2019-06-09T23:32:34 | Makefile | UTF-8 | Python | false | false | 141 | py | # -*- coding: utf-8 -*-
"""Top-level package for woodwork."""
__author__ = """Wes Turner"""
__email__ = 'wes@wrd.nu'
__version__ = '0.1.0'
| [
"wes@wrd.nu"
] | wes@wrd.nu |
3b042aa37d0a4f05387f289756ac8cbe1d169d5c | 003349d700f7d762f2cc3124717e332d0091be1a | /www/src/Lib/asyncio/coroutines.py | 58f1db3f88c528324bed50fd77177ba810c60fb5 | [
"BSD-3-Clause"
] | permissive | Rocia/brython | bffce20d736f67b58587f503ad8b503232823fbb | 4c29ad017d0b91971d195f31f6a0e18f68e28c55 | refs/heads/master | 2021-01-15T13:18:54.149409 | 2017-08-06T09:33:15 | 2017-08-06T09:33:15 | 99,669,037 | 1 | 0 | null | 2017-08-08T08:23:59 | 2017-08-08T08:23:58 | null | UTF-8 | Python | false | false | 6,467 | py | __all__ = ['coroutine',
'iscoroutinefunction', 'iscoroutine']
import functools
import inspect
import opcode
import os
import sys
import traceback
import types
from . import events
from . import futures
from .log import logger
# Opcode of "yield from" instruction
_YIELD_FROM = opcode.opmap['YIELD_FROM']
# If you set _DEBUG to true, @coroutine will wrap the resulting
# generator objects in a CoroWrapper instance (defined below). That
# instance will log a message when the generator is never iterated
# over, which may happen when you forget to use "yield from" with a
# coroutine call. Note that the value of the _DEBUG flag is taken
# when the decorator is used, so to be of any use it must be set
# before you define your coroutines. A downside of using this feature
# is that tracebacks show entries for the CoroWrapper.__next__ method
# when _DEBUG is true.
_DEBUG = (not sys.flags.ignore_environment
and bool(os.environ.get('PYTHONASYNCIODEBUG')))
# Check for CPython issue #21209
def has_yield_from_bug():
class MyGen:
def __init__(self):
self.send_args = None
def __iter__(self):
return self
def __next__(self):
return 42
def send(self, *what):
self.send_args = what
return None
def yield_from_gen(gen):
yield from gen
value = (1, 2, 3)
gen = MyGen()
coro = yield_from_gen(gen)
next(coro)
coro.send(value)
return gen.send_args != (value,)
_YIELD_FROM_BUG = has_yield_from_bug()
del has_yield_from_bug
class CoroWrapper:
# Wrapper for coroutine object in _DEBUG mode.
def __init__(self, gen, func):
assert inspect.isgenerator(gen), gen
self.gen = gen
self.func = func
self._source_traceback = traceback.extract_stack(sys._getframe(1))
# __name__, __qualname__, __doc__ attributes are set by the coroutine()
# decorator
def __repr__(self):
coro_repr = _format_coroutine(self)
if self._source_traceback:
frame = self._source_traceback[-1]
coro_repr += ', created at %s:%s' % (frame[0], frame[1])
return '<%s %s>' % (self.__class__.__name__, coro_repr)
def __iter__(self):
return self
def __next__(self):
return next(self.gen)
if _YIELD_FROM_BUG:
# For for CPython issue #21209: using "yield from" and a custom
# generator, generator.send(tuple) unpacks the tuple instead of passing
# the tuple unchanged. Check if the caller is a generator using "yield
# from" to decide if the parameter should be unpacked or not.
def send(self, *value):
frame = sys._getframe()
caller = frame.f_back
assert caller.f_lasti >= 0
if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM:
value = value[0]
return self.gen.send(value)
else:
def send(self, value):
return self.gen.send(value)
def throw(self, exc):
return self.gen.throw(exc)
def close(self):
return self.gen.close()
@property
def gi_frame(self):
return None
#return self.gen.gi_frame
@property
def gi_running(self):
return self.gen.gi_running
@property
def gi_code(self):
return self.gen.__code__
def __del__(self):
# Be careful accessing self.gen.frame -- self.gen might not exist.
gen = getattr(self, 'gen', None)
frame = getattr(gen, 'gi_frame', None)
if frame is not None and frame.f_lasti == -1:
msg = '%r was never yielded from' % self
tb = getattr(self, '_source_traceback', ())
if tb:
tb = ''.join(traceback.format_list(tb))
msg += ('\nCoroutine object created at '
'(most recent call last):\n')
msg += tb.rstrip()
logger.error(msg)
def coroutine(func):
"""Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged.
"""
if inspect.isgeneratorfunction(func):
coro = func
else:
@functools.wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
if isinstance(res, futures.Future) or inspect.isgenerator(res):
res = yield from res
res.gi_frame = None
return res
if not _DEBUG:
wrapper = coro
else:
@functools.wraps(func)
def wrapper(*args, **kwds):
w = CoroWrapper(coro(*args, **kwds), func)
if w._source_traceback:
del w._source_traceback[-1]
w.__name__ = func.__name__
if hasattr(func, '__qualname__'):
w.__qualname__ = func.__qualname__
w.__doc__ = func.__doc__
return w
wrapper.gi_frame = None
wrapper._is_coroutine = True # For iscoroutinefunction().
return wrapper
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function."""
return getattr(func, '_is_coroutine', False)
_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
def iscoroutine(obj):
"""Return True if obj is a coroutine object."""
return isinstance(obj, _COROUTINE_TYPES)
def _format_coroutine(coro):
try:
assert iscoroutine(coro)
coro_name = getattr(coro, '__qualname__', coro.__name__)
filename = coro.__code__.co_filename
if (isinstance(coro, CoroWrapper)
and not inspect.isgeneratorfunction(coro.func)):
filename, lineno = events._get_function_source(coro.func)
if coro.gi_frame is None:
coro_repr = ('%s() done, defined at %s:%s'
% (coro_name, filename, lineno))
else:
coro_repr = ('%s() running, defined at %s:%s'
% (coro_name, filename, lineno))
elif coro.gi_frame is not None:
lineno = coro.gi_frame.f_lineno
coro_repr = ('%s() running at %s:%s'
% (coro_name, filename, lineno))
else:
lineno = coro.__code__.co_firstlineno
coro_repr = ('%s() done, defined at %s:%s'
% (coro_name, filename, lineno))
except:
coro_repr = "Coroutine: %s" % coro_name
return coro_repr
| [
"jonathan.verner@matfyz.cz"
] | jonathan.verner@matfyz.cz |
16fc7bcb55d17f616c53e65cc1ae9dafcc3968f6 | 215e3c24d9bf55c5951cdbab08d045663003331a | /Lib/hTools2/dialogs/font/__init__.py | 74e3e5f5c6b36643c9ecc94222334f4bf4af2a8f | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | hipertipo/hTools2 | 8ac14ee37d6ed78a5ce906e65befa889798cc53d | a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c | refs/heads/master | 2022-07-10T20:37:13.869044 | 2018-11-21T10:42:44 | 2018-11-21T10:42:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | # dialogs.font
'''Dialogs to do things to the current font.'''
from element_set import setElementDialog
from groups_print import printGroupsDialog
from glyphs_rename import batchRenameGlyphs
from info_copy import copyFontInfoDialog
from info_print import clearFontInfoDialog
from layer_delete import deleteLayerDialog
from layer_import import importUFOIntoLayerDialog
from spaces_create import createSpaceGlyphsDialog
from vmetrics_adjust import adjustVerticalMetrics
from vmetrics_transfer import transferVMetricsDialog
__all__ = [
'adjustVerticalMetrics',
'copyFontInfoDialog',
'createSpaceGlyphsDialog',
'deleteLayerDialog',
'importUFOIntoLayerDialog',
'printGroupsDialog',
'clearFontInfoDialog',
'batchRenameGlyphs',
'setElementDialog',
'transferVMetricsDialog',
]
| [
"gustavo@hipertipo.com"
] | gustavo@hipertipo.com |
e60dcbd49809656ea4dc38d9856068c52d115ebc | 75224b9a071a7e231c87cb984e1ac81d873a0165 | /finalsweek/game/program_api/game_deck_api.py | 97726ef723a21f6ee0a7388e4d0b29ad0ee1aa0c | [] | no_license | tckerr/finalsweek | 94fe740f9f1db100071d3d5b04d57d8aa48f9695 | dea12866919e5b37643e46d42d797d672dd83182 | refs/heads/master | 2021-01-23T03:52:52.777566 | 2017-05-20T00:58:20 | 2017-05-20T00:58:20 | 86,127,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,372 | py | from game.definitions import OperationType, OperatorType
from game.operation.decorators import accepts_operation, accepts_operator
from game.scripting.api.program_child_api import ProgramChildApi
from trace.definitions import LogLevel, LogType
from trace.logger import Logger
# TODO: split action card and dismissal card APIs
class GameDeckApi(ProgramChildApi):
@accepts_operation(OperationType.Draw)
@accepts_operator(OperatorType.Add)
def draw_action_cards(self, operation):
operation = self._mutate(operation)
actor = self.program_api.actors.get(operation.targeted_actor_id)
action_card_deck = self.data.action_card_deck
self._assert_deck_size(action_card_deck, operation.value)
drawn = [self._draw_action_card(action_card_deck, actor) for _ in range(0, operation.value)]
self.program_api.increment_metadata("drawn_action_cards", len(drawn))
return drawn
def set_discipline_card_for_phase(self, phase):
discipline_card = self.data.discipline_card_deck.cards.pop()
self.data.phase_discipline_cards[phase.id] = discipline_card
self._log_discipline_card_draw(discipline_card, phase.phase_type)
return discipline_card
def get_discipline_card_for_phase(self, phase_id):
return self.data.phase_discipline_cards[phase_id]
def _draw_action_card(self, action_card_deck, actor):
card = action_card_deck.cards.pop()
actor.action_card_hand.cards.append(card)
self._log_action_card_draw(card)
return card
@staticmethod
def _assert_deck_size(action_card_deck, quantity):
deck_length = len(action_card_deck.cards)
if deck_length < quantity:
message = "Cannot draw {quantity} cards from a pile of size {pile_size}."
raise Exception(message.format(quantity=quantity, pile_size=deck_length))
@staticmethod
def _log_action_card_draw(card):
message = "Drew action card '{}', pc: {}".format(card.template.name, card.id)
Logger.log(message, level=LogLevel.Info, log_type=LogType.GameLogic)
@staticmethod
def _log_discipline_card_draw(discipline_card, phase_type):
message = "Drew dismissal card '{}' for phase '{}'".format(discipline_card.template.name, phase_type)
Logger.log(message, level=LogLevel.Info, log_type=LogType.GameLogic)
| [
"tckerr@gmail.com"
] | tckerr@gmail.com |
d58981503e1312e14b20bf7ce3a549340b34779d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/142/usersdata/231/62183/submittedfiles/av2_p3_civil.py | c48a48605ffd795c38ee98f06a74012f88f0d143 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | # -*- coding: utf-8 -*-
def media(a):
soma = 0
for i in range(0,len(a),1):
soma = soma + a[i]
media = soma/len(a)
return (media)
#ESCREVA AS DEMAIS FUNÇÕES
def soma1(g):
media=media(a)
cont=0
for i in range(0,len(g),1):
cont=cont+(g[i]-media(a))
return cont
def entradaLista(n):
a = []
for i in range(0,n,1):
valor = float(input('Digite um valor: '))
a.append(valor)
return (a)
n = int(input('Digite o tamanho da lista: '))
x = entradaLista(n)
y = entradaLista(n)
p = #CALCULE O VALOR DE P
p = abs(p)
print('%.4f' % p)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
33872138ea3dceeb7ec6c48bc1b0b17ef7f988e6 | 91e0036d8e976b09c9a9e3281f33559c4b163412 | /Api/admin.py | 39cdfb2cdc1835fbf3a59932392c2fba1d9315fa | [] | no_license | sirajmuneer123/LibraryApp | b1b77f817ec909899715b484788eb6f9b2e1a853 | f41543ece1f513edc85508cd45b4b68c1e4b3cbc | refs/heads/master | 2020-09-09T00:14:32.235343 | 2019-11-15T13:50:18 | 2019-11-15T13:50:18 | 221,285,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | from django.apps import apps
from django.contrib import admin
from django.contrib.admin.sites import AlreadyRegistered
app_models = apps.get_app_config('Api').get_models()
for model in app_models:
try:
admin.site.register(model)
except AlreadyRegistered:
pass
| [
"sirajmuneer4@gmail.com"
] | sirajmuneer4@gmail.com |
c30df6cdff0df25e70d3951f98834cff940e8c4f | b333dc607a2f1556f6a8adb6d16dc88fa8a30c8b | /portal/apps/epubparser/views.py | a2dac12c6e6f988b3cc1a028f3cb4ed50e01b92c | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hernan0216/utopia-cms | 6558f8f600620c042dd79c7d2edf18fb77caebb8 | 48b48ef9acf8e3d0eb7d52601a122a01da82075c | refs/heads/main | 2023-02-06T10:31:35.525180 | 2020-12-15T17:43:28 | 2020-12-15T17:43:28 | 321,775,279 | 1 | 0 | BSD-3-Clause | 2020-12-15T19:59:17 | 2020-12-15T19:59:16 | null | UTF-8 | Python | false | false | 8,191 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.shortcuts import redirect
from django.template import RequestContext
import os, re
import ebooklib
from ebooklib import epub, utils
from bs4 import BeautifulSoup, Tag
from django.views.generic import ListView, FormView, View, DetailView
from django.core.urlresolvers import reverse_lazy, reverse
from django.contrib import messages
from models import EpubFile
from forms import UploadEpubForm, EpubChangeSectionForm
import traceback
from lxml import etree
from lxml.etree import tostring
from core.models import Article, Section
class FileAddView(FormView):
form_class = UploadEpubForm
success_url = reverse_lazy('epub-home')
template_name = "epubparser/templates/add_epub_parser.html"
def form_valid(self, form):
epub_section = form.cleaned_data['section']
epub_file = form.cleaned_data['f']
if str(epub_file).endswith('.epub'):
form.save(commit=True)
messages.success(self.request, 'Epub ingresado correctamente', fail_silently=True)
else:
messages.error(self.request, 'El archivo no es un EPUB')
return super(FileAddView, self).form_valid(form)
class FileListView(ListView):
model = EpubFile
queryset = EpubFile.objects.order_by('-id')
context_object_name = "files"
template_name = "epubparser/templates/index_epub_parser.html"
paginate_by = 5
class ParseView(DetailView):
model = EpubFile
context_object_name = "files"
template_name = "epubparser/templates/index_epub_parser.html"
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(ParseView, self).get_context_data(**kwargs)
epub_file = context['files'].f
file = EpubFile.objects.get(f=epub_file)
epub_section = file.section
try:
book = epub.read_epub(epub_file)
for item in book.get_items():
# - 9 es el codigo de tipo de archivo xhtml
if (item.get_type() is 9):
content = item.get_body_content()
print content
#reemplazo los estilos con classes css del xhtml del epub
content = replace_style(content,
'<span class="char-style-override-1">',
'</span>', '<span>', '</span> ')
content = replace_style(content,
'<span class="char-style-override-3">',
'</span>', '_', '_')
content = replace_style(content,
'<span class="Muy-destacado char-style-override-3">',
'</span>', '', '')
content = replace_style(content,
'<span class="Muy-destacado char-style-override-4">',
'</span>', '', '')
content = replace_style(content,
'<p class="Subt-tulo">',
'</p>', '<p class="Normal">\nS>', '</p>')
content = replace_style(content,
'<p class="Primer para-style-override-1">',
'</p>', '<p class="Primer">', '</p>')
content = replace_style(content,
'<span>', '</span>', ' ', ' ')
soup = BeautifulSoup(content, 'html.parser')
#cada subcuadro contiene un articulo
subcuadro_nota = soup('div', {'class': 'Subcuadro-nota'})
for e in subcuadro_nota:
tag = etree.fromstring(str(e))
titulo = ''.join(tag.xpath('//p[starts-with(@class, "T-tulo")]/text()'))
bajada = ''.join(tag.xpath('//p[@class="Bajada"]/text()'))
copete = ''.join(tag.xpath('//p[starts-with(@class, "Copete")]/text()'))
parrafos = '\n\n'.join(
tag.xpath('(//p[@class="Primer"]|//p[@class="Normal"]|//p[@class="Normal"]/span '
'|//p[@class="Subt-tulo"]|//p[@class="Autor"])/text()'))
if titulo:
try:
article = Article(
headline=titulo,
deck=bajada,
lead=copete,
#home_lead=copete,
body=parrafos,
)
article.save()
ar = Article.objects.get(id=article.id)
ar.sections.add(epub_section.id)
ar.save()
success_msg = 'Articulo generado correctamente: %s' % article.headline
messages.success(self.request, success_msg, fail_silently=True)
except:
traceback.print_exc()
messages.error(self.request, 'Hubo un error al procesar el archivo')
except:
traceback.print_exc()
messages.error(self.request, 'Hubo un error al procesar el archivo')
files = EpubFile.objects.order_by('-id')
section = Section.objects.all()
context['files'] = files
context['section'] = section
return context
class FileChangeView(DetailView):
model = EpubFile
context_object_name = "files"
template_name = "epubparser/templates/change_epub_parser.html"
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(FileChangeView, self).get_context_data(**kwargs)
epub_file = context['files']
sec = context['files'].section
section = Section.objects.all()
context['section'] = section
context['files'] = epub_file
changeForm = EpubChangeSectionForm()
changeForm.f = epub_file
changeForm.section = sec
context['changeForm'] = changeForm
return context
def changeSection(request):
if request.method == 'POST':
try:
id_epub = request.POST.get('id_epub')
sec = request.POST.get('section')
epub = EpubFile.objects.get(id=id_epub)
section = Section.objects.get(id=sec)
epub.section = section
epub.save()
except:
traceback.print_exc()
messages.error(request, 'Debe seleccionar una SECCIÓN')
return redirect(reverse('epub-home'))
def replace_style(content, tag_abre_style, tag_cierra_style, tag_change_style, tag_close_style):
encontre = True
while encontre:
posicion_abre_span = content.find(tag_abre_style)
#si no encuentra el span se va del loop
if (posicion_abre_span == -1):
encontre = False
else:
#posicion de cierre del span
posicion_cierra_span = content.find(tag_cierra_style, posicion_abre_span)
#reemplaza el proximo cierre de span por el cierre de em
content = replace_at_position(content,tag_cierra_style, tag_close_style, posicion_cierra_span)
#reemplaza la apertura del span por la apertura del em
content = replace_at_position(content,tag_abre_style, tag_change_style, posicion_abre_span)
return content
#reemplaza en la cadena total la cadena vieja por la cadena nueva
#la cadena vieja esta ubicada en la cadena_total en la posicion pos
def replace_at_position(cadena_total, cadena_vieja, cadena_nueva, pos):
cadena_total = cadena_total[:pos] + cadena_nueva + cadena_total[pos+len(cadena_vieja):]
return cadena_total
| [
"apacheco@ladiaria.com.uy"
] | apacheco@ladiaria.com.uy |
70c7ef7f60eb803955e8f403aaebada84e807bda | 36f6b1d7a7355ee21e387b2a4f56ebd8dd044b2c | /snippets/try_bench.py | f416d2344f0af895df5c80c490df937f4b35e6bc | [] | no_license | sbl1996/hinas | e826936537094d7de5ba36cc78dcdb8e4de076ac | e2db5ebc219a2f7dc1b1344e5d13c97177467e08 | refs/heads/main | 2023-02-20T17:52:38.719867 | 2021-01-21T02:22:05 | 2021-01-21T02:22:05 | 301,271,639 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | import numpy as np
from hinas.models.nas_bench_201.search.darts import Network
from hinas.models.nas_bench_201.api import SimpleNASBench201
api = SimpleNASBench201("/Users/hrvvi/Code/study/pytorch/datasets/NAS-Bench-201-v1_1-096897-simple.pth")
net = Network(4, 8)
val_accs = []
ranks = []
for i in range(100):
net._initialize_alphas()
s = net.genotype()
val_accs.append(np.mean(api.query_eval_acc(s)))
ranks.append(api.query_eval_acc_rank(s))
| [
"sbl1996@126.com"
] | sbl1996@126.com |
2eb572de05a2ef49a132ba18d76c92d43849d6f6 | b253452291fe7a0ebd5673bf9f6e8ead4a6825c8 | /fireplace/cards/gvg/mage.py | 1204db22fc7a1a231b295c5a1f01cf38318431cb | [] | no_license | rafzi/fireplace | 3c75b3892848635f5de264f01fd1431c34ef6983 | 8fc6198c1b855b448e2fceebe7bdab5e6436e2b7 | refs/heads/master | 2021-01-14T08:51:54.403001 | 2015-07-08T13:45:09 | 2015-07-08T13:45:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | from ..utils import *
##
# Minions
# Snowchugger
class GVG_002:
events = [
Damage().on(
lambda self, target, amount, source: source is self and [Freeze(target)] or []
)
]
# Goblin Blastmage
class GVG_004:
def action(self):
if self.poweredUp:
return [Hit(RANDOM_ENEMY_CHARACTER, 1) * 4]
# Illuminator
class GVG_089:
events = [
OWN_TURN_END.on(
lambda self, player: player.secrets and [Heal(FRIENDLY_HERO, 4)] or []
)
]
##
# Spells
# Flamecannon
class GVG_001:
action = [Hit(RANDOM_ENEMY_MINION, 4)]
# Unstable Portal
class GVG_003:
# TODO
def action(self):
card = self.controller.give(RandomMinion())
self.buff(card, "GVG_003e")
# Echo of Medivh
class GVG_005:
action = [Give(CONTROLLER, Copy(FRIENDLY_MINIONS))]
| [
"jerome@leclan.ch"
] | jerome@leclan.ch |
93ee83b0a17ef7ea90b7e103fcfa8ebc52d9406c | d1c7d493eb01ba3636482ad452aa540e253ff0e9 | /python-3/beginner/1164.py | 5bb22dbc2ce9dc78e4b7a81857d93a04b81e579d | [
"MIT"
] | permissive | MisaelAugusto/uri | 411aa8b3915c9c046ce46ac180daab7950922109 | 22bee72edf44f939d7a290383336b4d061faecbb | refs/heads/master | 2022-12-05T08:32:22.999188 | 2020-08-31T12:31:05 | 2020-08-31T12:31:05 | 268,656,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | # -*- coding: utf-8 -*-
N = int(input())
for i in range(N):
X = int(input())
total = 0
for i in range(1, int((X / 2)) + 1):
if (X % i == 0):
total += i
print(("%d eh perfeito" % X) if (total == X) else ("%d nao eh perfeito" % X)) | [
"misael.costa@ccc.ufcg.edu.br"
] | misael.costa@ccc.ufcg.edu.br |
0d69a25061e8410a69ca57c98149a3ff32ec0dbd | b02a5015ecc61414834c4b24e5f33168eb99070a | /CCscripts/DrawCuts.py | 4aa074ccf23b22710695a909fad2956b4ae69a06 | [
"MIT"
] | permissive | mrvollger/SDA | f1aa8edf9989125d7e0c0f6ae159bca495915826 | 3d5e9ec8d1e7ac97121c33c6be80d635392631cf | refs/heads/master | 2023-05-13T05:24:54.665854 | 2023-05-07T23:40:25 | 2023-05-07T23:40:25 | 101,452,926 | 29 | 5 | MIT | 2019-11-21T18:08:13 | 2017-08-26T00:58:01 | Python | UTF-8 | Python | false | false | 605 | py | #!/usr/bin/env python
import ABPUtils
import argparse
ap = argparse.ArgumentParser(description="Plot cuts individually")
ap.add_argument("graph", help="Original graph file.")
ap.add_argument("cuts", help="Cuts file.")
ap.add_argument("--out", help="Output file.", default="./")
args = ap.parse_args()
g = ABPUtils.ReadGraph(args.graph)
cuts = ABPUtils.ReadCuts(args.cuts)
cutIndex = 1
ABPUtils.ColorGraphByCut(g,cuts)
for cut in cuts:
sub = g.subgraph(list(cut))
sub.graph['NumVertexColors']=len(cuts)
ABPUtils.DrawGraph(sub, "{}subgraph.{}.png".format(args.out,cutIndex))
cutIndex+=1
| [
"mrvollger@gmail.com"
] | mrvollger@gmail.com |
04674a600e226e31aa0f8316d18ab01272775691 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_subcontracts.py | 7377f23ac79db909499e908ee8af2088973acc0e | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py |
from xai.brain.wordbase.verbs._subcontract import _SUBCONTRACT
#calss header
class _SUBCONTRACTS(_SUBCONTRACT, ):
def __init__(self,):
_SUBCONTRACT.__init__(self)
self.name = "SUBCONTRACTS"
self.specie = 'verbs'
self.basic = "subcontract"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
8d5a83daead27736aecc6e7ac623324c76e906dc | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/3-lists_20200405001109.py | 5a0bda1428374ca1f3e46e140e448d5438925749 | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | # A List is a collection which is ordered and changeable. Allows duplicate members.
numbers = [ 3, 23, 111, 3423, 352]
print(numbers)
print(type(numbers))
#using a constructor
listNum = list (( 213, 11, 342, 2342, 55432))
print(listNum)
fruits = ['Apples', 'Oranges', 'Grapes', 'Pears']
print(fruits[2])
#Get len
print(len(fruits))
#append to the list
fruits.append('Mango')
print(fruits)
#remove from the list
fruits.remove('Grapes')
print(fruits)
#insert into a spot
fruits.insert(2, 'Coconut')
print(fruits)
#remove from a spot
fruits.pop(4)
print(fruits)
#reverse list
fruits.reverse()
print(fruits)
#sort an array
| [
"tikana4@yahoo.com"
] | tikana4@yahoo.com |
f2eee898f0944c4b6faea8454a3765ba7fb32f35 | 428ee863e50fecfaedbbf64f3da95e9acb746ae4 | /src/tamsin/sysmod.py | ee7671e8e91b5727f007a422a134f6c0d9003edd | [
"BSD-3-Clause",
"Unlicense",
"LicenseRef-scancode-public-domain"
] | permissive | catseye/Tamsin | ba53a0ee4ac882486a958e6ba7225f19eea763ef | 1c9e7ade052d734fa1753d612f2426ac067d5252 | refs/heads/master | 2021-01-17T09:21:25.202969 | 2016-03-31T15:00:14 | 2016-03-31T15:00:14 | 19,212,331 | 12 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,364 | py | # encoding: UTF-8
# Copyright (c)2014 Chris Pressey, Cat's Eye Technologies.
# Distributed under a BSD-style license; see LICENSE for more information.
# Python version of Tamsin's $ module.
import sys
from tamsin.term import Atom, Constructor
from tamsin.scanner import EOF
TRANSLATOR = {'return': 'return_', 'print': 'print_'}
def call(name, interpreter, args):
name = TRANSLATOR.get(name, name)
if name not in globals():
raise NotImplementedError(name)
return globals()[name](interpreter, args)
def arity(name):
name = TRANSLATOR.get(name, name)
if name not in globals():
raise NotImplementedError(name)
return globals()[name].arity
def return_(self, args):
return (True, args[0])
return_.arity = 1
def fail(self, args):
return (False, args[0])
fail.arity = 1
def expect(self, args):
upcoming_token = self.scanner.peek()
term = args[0]
token = str(term)
if self.scanner.consume(token):
return (True, term)
else:
return (False,
Atom(self.scanner.error_message("'%s'" % token, upcoming_token))
)
expect.arity = 1
def eof(self, args):
if self.scanner.peek() is EOF:
return (True, '')
else:
return (False,
Atom(self.scanner.error_message('EOF', self.scanner.peek()))
)
eof.arity = 0
def any(self, args):
if self.scanner.peek() is not EOF:
return (True, Atom(self.scanner.scan()))
else:
return (False,
Atom(self.scanner.error_message('any token', EOF))
)
any.arity = 0
def alnum(self, args):
if (self.scanner.peek() is not EOF and
self.scanner.peek()[0].isalnum()):
return (True, Atom(self.scanner.scan()))
else:
return (False,
Atom(self.scanner.error_message('alphanumeric', self.scanner.peek()))
)
alnum.arity = 0
def upper(self, args):
if (self.scanner.peek() is not EOF and
self.scanner.peek()[0].isupper()):
return (True, Atom(self.scanner.scan()))
else:
return (False,
Atom(self.scanner.error_message('uppercase', self.scanner.peek()))
)
upper.arity = 0
def startswith(self, args):
if (self.scanner.peek() is not EOF and
self.scanner.peek()[0].startswith((str(args[0]),))):
return (True, Atom(self.scanner.scan()))
else:
return (False,
Atom(self.scanner.error_message("'%s...'" % args[0], self.scanner.peek()))
)
startswith.arity = 1
def equal(self, args):
if args[0].match(args[1]) != False:
return (True, args[0])
else:
return (False, Atom("term '%s' does not equal '%s'" %
(args[0], args[1])))
equal.arity = 2
def unquote(self, args):
q = str(args[0])
l = str(args[1])
r = str(args[2])
if (q.startswith(l) and q.endswith(r)):
if len(r) == 0:
return (True, Atom(q[len(l):]))
return (True, Atom(q[len(l):-len(r)]))
else:
return (False, Atom("term '%s' is not quoted with '%s' and '%s'" %
(q, l, r)))
unquote.arity = 3
def mkterm(self, args):
t = args[0]
l = args[1]
contents = []
while isinstance(l, Constructor) and l.tag == 'list':
contents.append(l.contents[0])
l = l.contents[1]
if contents:
return (True, Constructor(t.text, contents))
else:
return (True, t)
mkterm.arity = 2
def reverse(self, args):
return (True, args[0].reversed(args[1]))
reverse.arity = 2
def print_(self, args):
val = args[0]
sys.stdout.write(str(val))
sys.stdout.write("\n")
return (True, val)
print_.arity = 1
def emit(self, args):
val = args[0]
sys.stdout.write(str(val))
return (True, val)
emit.arity = 1
def repr(self, args):
val = args[0]
val = Atom(val.repr())
return (True, val)
repr.arity = 1
counter = 0
def gensym(self, args):
global counter
counter += 1
return (True, Atom(str(args[0]) + str(counter)))
gensym.arity = 1
def hexbyte(self, args):
return (True, Atom(chr(int(args[0].text + args[1].text, 16))))
hexbyte.arity = 2
def format_octal(self, args):
return (True, Atom("%o" % ord(args[0].text[0])))
format_octal.arity = 1
def length(self, args):
return (True, Atom(str(len(str(args[0])))))
length.arity = 1
| [
"cpressey@catseye.tc"
] | cpressey@catseye.tc |
e7436a8ac1f5eadf84e2b41f3f537b5b70bdc951 | fda673f9450d6d78c542a699fe8b9064f65d9057 | /spider/ms_main_update_score.py | a97faedfdf27c2b6a8d2aba18398cc1e66b7101c | [] | no_license | asdlei99/MovieSite | 1221f8103ec4af63b4f9b80d89d7f7bf5a21a1c3 | 2a7ee09a308e13df58f6b43f6288908c68fc6271 | refs/heads/master | 2020-03-26T16:19:19.059927 | 2018-06-08T09:01:05 | 2018-06-08T09:01:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,946 | py | # coding=utf-8
"""
Update douban_addr and socre for db
"""
import urllib
import re
import os
from ms_utils.db_helper import connect_db
from ms_utils.html_helper import Douban
from ms_utils.common import get_webdriver, get_html_content, get_douban_sn, \
get_douban_url
from ms_utils.log import Log
from ms_constants import CATES_ENG_CH, UPDATE_SCORE_PATH, MOVIE_NAME_ENG, \
TV_NAME_ENG, ANIME_NAME_ENG, SHOW_NAME_ENG
LOG = Log()
Douban = Douban()
def _replace_symbol(s):
res = s.replace("'", "'").replace("&", "&").replace(""", '"')
return res
def _update(url, sid, cate_eng, ch_name, foreign_name, douban_sn_old, imdb_sn_old,
score_old, conn, force=False):
"""
:param url:
:param sid:
:param cate_eng:
:param ch_name:
:param foreign_name:
:param douban_sn_old:
:param imdb_sn_old:
:param score_old:
:param conn:
:param force: Force to update
:return:
"""
content = get_html_content(url)
cate_chn = CATES_ENG_CH.get(cate_eng)
db_value = Douban.get_douban_text_info(
content, cate_eng, cate_chn, enable_log=False)
if db_value == 'continue':
return 'mismatch'
else:
(name1, name2, year, director, screenwriter, actor, mtype,
region, date_show, date, running_time, score_new, othername,
imdb_sn_new, intro) = db_value
score_new = float(score_new)
condition = True if force else (name1 == ch_name and name2 == foreign_name)
if condition:
set_clause = 'SET score="%s"' % score_new
if not douban_sn_old:
douban_sn_new = get_douban_sn(url)
if douban_sn_new:
set_clause += ',douban_sn="%s"' % douban_sn_new
if not imdb_sn_old:
set_clause += ',imdb_sn="%s"' % imdb_sn_new
sql = ('UPDATE movie_%s %s WHERE id=%s' % (cate_eng, set_clause, sid))
try:
_cur = conn.cursor()
_cur.execute(sql)
conn.commit()
_cur.close()
except Exception as e:
LOG.info('%5s FAILED %s %s %s' % (cate_chn, name1, name2, str(e)),
path=UPDATE_SCORE_PATH,
filename='update_score.log')
return 'error'
LOG.info('%5s %3.1f(%3.1f) %s %s' %
(cate_chn, score_old, score_new, name1, name2),
path=UPDATE_SCORE_PATH,
filename='update_score.log')
return 'ok'
else:
return 'mismatch'
def _compare_info(d_url, ch_name_db, foreign_name_db,
director_db, actor_db, year_db, region_db, cate_eng, conn):
"""
比较名字(30),年代(10),国家地区(10),导演(25),演员(25)
:return:
"""
d_content = get_html_content(d_url, url_log=False)
text_info = Douban.get_douban_text_info(d_content, cate_eng)
if not isinstance(text_info, tuple):
return text_info
(name1, name2, year, director, screenwriter, actor, mtype,
region, date_show, date, running_time, score, othername, imdb,
intro) = text_info
director_db = director_db.split('/')[0].strip()
actor_db = actor_db.split('/')[0].strip()
weight = 0
# name
if ch_name_db == name1 and foreign_name_db == name2:
LOG.debug('Name match (30)')
weight += 30
if director_db in director:
LOG.debug('Director match (25)')
weight += 25
if actor_db in actor:
LOG.debug('Actor match (25)')
weight += 25
# TODO
def main():
"""
Update Douban link and score
:return:
"""
if not os.path.exists(UPDATE_SCORE_PATH):
os.makedirs(UPDATE_SCORE_PATH)
conn = connect_db()
driver = get_webdriver()
for cate_eng in ('movie', 'tv', 'anime', 'show'):
sid = 10
while True:
print sid
cur = conn.cursor()
cur.execute('SELECT ch_name, foreign_name, score, director, actor, '
'year, region, douban_sn, imdb_sn FROM movie_%s '
'WHERE id=%d' % (cate_eng, sid))
res = cur.fetchone()
cur.close()
if res:
res = (item.encode('utf-8') for item in res)
(ch_name, foreign_name, score_old, director, actor, year,
region, douban_sn_old, imdb_sn_old) = res
score_old = float(score_old)
if douban_sn_old:
url = get_douban_url(douban_sn_old)
_update(url, sid, cate_eng, ch_name, foreign_name,
douban_sn_old, imdb_sn_old, score_old, conn, force=True)
else:
urls = Douban.get_douban_search_result(ch_name, driver)
for url in urls:
# 对比豆瓣与数据库信息
res = _compare_info(url, ch_name, foreign_name,
director, actor, year,
region, cate_eng, conn)
if res:
result = _update(url, sid, cate_eng, ch_name,
foreign_name, douban_sn_old, imdb_sn_old,
score_old, conn)
if result == 'ok' or result == 'error':
sid += 1
break
elif result == 'mismatch':
continue
LOG.info('%5s FAILED %s %s' % (CATES_ENG_CH.get(cate_eng),
ch_name, foreign_name),
path=UPDATE_SCORE_PATH,
filename='update_score.log')
sid += 1
else:
sid += 1
if __name__ == '__main__':
main() | [
"1225191678@qq.com"
] | 1225191678@qq.com |
7a2d0fe093e4636025d1c89079263aa7eacf5957 | c6fca34b2c9cb973d9d65d23e58e40d4513e173a | /aoc2016/day25.py | f1a75d25732ce30e7e8d149f847be0996fb35b09 | [] | no_license | tomkooij/AdventOfCode | 8ff47c027c887194b0d441f61a8db172c4e260ea | 7890d45a01498dcb48972a7e311888ce6f003bd2 | refs/heads/master | 2021-08-15T19:46:21.869137 | 2021-01-18T06:37:50 | 2021-01-18T06:37:50 | 48,421,868 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | # adventofcode assembunny
from collections import defaultdict
def run(len_output, **kwargs):
def int_or_reg(register):
try:
return int(register)
except ValueError:
return regs[register]
regs = defaultdict(int)
output = []
for key, value in kwargs.items():
regs[key] = value
while regs['ip'] < len(instructions):
opcode, *rest = instructions[regs['ip']]
#print('evaluate ', instruction)
if opcode == 'cpy':
register = rest[1]
value = rest[0]
regs[register] = int_or_reg(value)
regs['ip'] += 1
elif opcode == 'inc':
register = rest[0]
regs[register] += 1
regs['ip'] += 1
elif opcode == 'dec':
register = rest[0]
regs[register] -= 1
regs['ip'] += 1
elif opcode == 'jnz':
register = rest[0]
offset = rest[1]
zero = int_or_reg(register)
if zero:
regs['ip'] += int_or_reg(offset)
else:
regs['ip'] += 1
elif opcode == 'out':
value = int_or_reg(rest[0])
output.append(value)
if len(output) >= len_output:
return output
regs['ip'] += 1
else:
assert False, 'unknown instruction.'
return regs['a']
with open('input\input25.txt') as f:
instructions = [line.rstrip('\n').split() for line in f.readlines()]
print('part A: ')
# stupid brute force
for i in range(1000):
if not i % 25:
print(i)
if run(16, a=i)== 8*[0, 1]:
print('part A: ', i)
break
| [
"tomkooij@tomkooij.nl"
] | tomkooij@tomkooij.nl |
eacfea7464bd43192aea85f068759c41257baf5d | ca9e037a6ac24117b7250922c607ce9e5609e82b | /docrec/compatibility/sib18.py | 5298bf7d77cec38ab770661ad3d5f55c27f43caa | [] | no_license | thiagopx/deeprec-cvpr20 | 4d4e1752fa33a17c3da26cc11433531a2e08d40c | 3f04c494574790cbc390930cd60e0e207ec15b10 | refs/heads/master | 2022-06-02T18:31:32.664352 | 2022-05-16T20:50:35 | 2022-05-16T20:50:35 | 248,400,773 | 2 | 3 | null | 2022-05-16T20:55:20 | 2020-03-19T03:21:03 | Python | UTF-8 | Python | false | false | 4,872 | py | import sys
import cv2
import numpy as np
import math
from time import time
from skimage.filters import threshold_sauvola, threshold_otsu
import tensorflow as tf
from .algorithm import Algorithm
from ..models.squeezenet import SqueezeNet
class Sib18(Algorithm):
''' Proposed algorithm. '''
def __init__(
self, arch, weights_path, vshift, input_size, num_classes,
thresh_method='sauvola', seed=None, offset=None, sess=None
):
assert arch in ['sn']
assert thresh_method in ['otsu', 'sauvola']
self.sess = sess
if self.sess is None:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
# preparing model
self.offset = offset
self.vshift = vshift
self.input_size_h, self.input_size_w = input_size
self.images_ph = tf.placeholder(
tf.float32, name='images_ph', shape=(None, self.input_size_h, self.input_size_w, 3) # channels last
)
self.batch = np.ones((2 * vshift + 1, self.input_size_h, self.input_size_w, 3), dtype=np.float32)
# model
model = SqueezeNet(self.images_ph, include_top=True, num_classes=num_classes, mode='test', channels_first=False, sess=self.sess)
logits = model.output
probs = tf.nn.softmax(logits)
self.comp_op = tf.reduce_max(probs[:, 1])
self.disp_op = tf.argmax(probs[:, 1]) - vshift
# result
self.compatibilities = None
self.displacements = None
# init model
self.sess.run(tf.global_variables_initializer())
model.load_weights(weights_path)
self.inference_time = 0
self.preparation_time = 0
self.pairwise_time = 0
self.thresh_method = thresh_method
def _extract_features(self, strip):
''' Extract image around the border. '''
image = cv2.cvtColor(strip.filled_image(), cv2.COLOR_RGB2GRAY)
thresh_func = threshold_sauvola if self.thresh_method == 'sauvola' else threshold_otsu
thresh = thresh_func(image)
thresholded = (image > thresh).astype(np.float32)
image_bin = np.stack(3 * [thresholded]).transpose((1, 2, 0)) # channels last
wl = math.ceil(self.input_size_w / 2)
wr = int(self.input_size_w / 2)
h, w, _ = strip.image.shape
# vertical offset
offset = (h - self.input_size_h) // 2 if self.offset is None else self.offset
# left image
left_border = strip.offsets_l
left = np.ones((self.input_size_h, wl, 3), dtype=np.float32)
for y, x in enumerate(left_border[offset : offset + self.input_size_h]):
w_new = min(wl, w - x)
left[y, : w_new] = image_bin[y + offset, x : x + w_new]
# right image
right_border = strip.offsets_r
right = np.ones((self.input_size_h, wr, 3), dtype=np.float32)
for y, x in enumerate(right_border[offset : offset + self.input_size_h]):
w_new = min(wr, x + 1)
right[y, : w_new] = image_bin[y + offset, x - w_new + 1: x + 1]
return left, right
def run(self, strips, d=0, ignore_pairs=[], verbose=False): # d is not being used at this moment
''' Run algorithm. '''
t0 = time()
N = len(strips.strips)
compatibilities = np.zeros((N, N), dtype=np.float32)
displacements = np.zeros((N, N), dtype=np.int32)
wr = int(self.input_size_w / 2)
# features
features = []
for strip in strips.strips:
left, right = self._extract_features(strip)
features.append((left, right))
self.preparatation_time = time() - t0
t0 = time()
self.inference_time = 0
for i in range(N):
if verbose: print('row {} of {}'.format(i + 1, N))
self.batch[:, :, : wr] = features[i][1]
for j in range(N):
if i == j or (i, j) in ignore_pairs:
continue
feat_j = features[j][0]
self.batch[self.vshift, :, wr : ] = feat_j
for r in range(1, self.vshift + 1):
self.batch[self.vshift - r, : -r, wr :] = feat_j[r :] # slide up
self.batch[self.vshift + r, r : , wr :] = feat_j[: -r] # slide down
t1 = time()
comp, disp = self.sess.run([self.comp_op, self.disp_op], feed_dict={self.images_ph: self.batch})
self.inference_time += time() - t1
compatibilities[i, j] = comp
displacements[i, j] = disp
self.pairwise_time = time() - t0
self.compatibilities = compatibilities
self.displacements = displacements
return self
def name(self):
''' Method name. '''
return 'sib18'
| [
"paixao@gmail.com"
] | paixao@gmail.com |
45921dd008fad6bac984470e8218cf30bc8ac73e | b2afbc68e3900ebd2a673f5152f4f04374792bca | /pm4py/objects/conversion/log/versions/to_trace_log.py | 6a08685e41dbd465c77611512cf40f07a7bc4638 | [] | no_license | indrawaspada/online_process_monitoring_using_incremental_state-space_expansion_an_exact_algorithm | 83d0c2a3dfd7f1f9ce686564bf7f525bb534e973 | 2b23ab20f739e87447af86bb7f63d8816c69c9db | refs/heads/master | 2022-09-18T12:15:33.172540 | 2020-06-04T13:58:07 | 2020-06-04T13:58:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,443 | py | import pm4py
from pm4py.objects.log import log as log_instance
from pm4py.objects.log.util import general as log_util
def apply(log, parameters=None):
if isinstance(log, pm4py.objects.log.log.EventLog) and (not isinstance(log, pm4py.objects.log.log.TraceLog)):
parameters = parameters if parameters is not None else dict()
if log_util.PARAMETER_KEY_CASE_GLUE in parameters:
glue = parameters[log_util.PARAMETER_KEY_CASE_GLUE]
else:
glue = log_util.CASE_ATTRIBUTE_GLUE
if log_util.PARAMETER_KEY_CASE_ATTRIBUTE_PRFIX in parameters:
case_pref = parameters[log_util.PARAMETER_KEY_CASE_ATTRIBUTE_PRFIX]
else:
case_pref = log_util.CASE_ATTRIBUTE_PREFIX
return transform_event_log_to_trace_log(log, case_glue=glue, include_case_attributes=False,
case_attribute_prefix=case_pref)
return log
def transform_event_log_to_trace_log(log, case_glue=log_util.CASE_ATTRIBUTE_GLUE, include_case_attributes=True,
case_attribute_prefix=log_util.CASE_ATTRIBUTE_PREFIX):
"""
Converts the event log to a trace log
Parameters
----------
log: :class:`pm4py.log.log.EventLog`
An event Log
case_glue:
Case identifier. Default is 'case:concept:name'
include_case_attributes:
Default is True
case_attribute_prefix:
Default is 'case:'
Returns
-------
log : :class:`pm4py.log.log.TraceLog`
A trace log
"""
traces = {}
for event in log:
glue = event[case_glue]
if glue not in traces:
trace_attr = {}
if include_case_attributes:
for k in event.keys():
if k.startswith(case_attribute_prefix):
trace_attr[k.replace(case_attribute_prefix, '')] = event[k]
traces[glue] = log_instance.Trace(attributes=trace_attr)
if include_case_attributes:
for k in list(event.keys()):
if k.startswith(case_attribute_prefix):
del event[k]
traces[glue].append(event)
return log_instance.TraceLog(traces.values(), attributes=log.attributes, classifiers=log.classifiers,
omni_present=log.omni_present, extensions=log.extensions) | [
"daniel.schuster@fit.fraunhofer.de"
] | daniel.schuster@fit.fraunhofer.de |
2f0d441ea6d652558924b0e2e0149d7843eebddb | 25597909c9fd5ae13eb89cf255f4ae79053a6d80 | /.ipynb_checkpoints/c2_model_selection-checkpoint.py | 8bb516e43fa4fba192fe988c6aea97eb6256ce53 | [] | no_license | FangyangJz/AI_Machine_Learning_Muke | 986038941dd7e4af9d2873369408808d5a75e412 | 751f3b850dad31441fd80ac94e1909adbb43473a | refs/heads/master | 2020-03-22T21:53:28.386637 | 2018-07-30T15:01:11 | 2018-07-30T15:01:11 | 140,719,785 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | # !/usr/bin/env python3
# -*- coding:utf-8 -*-
'''
created by Fangyang on Time:2018/7/11
'''
__author__ = 'Fangyang'
import numpy as np
def train_test_split(X, y, test_ratio=0.2, seed=None):
'''将数据X和y按照test_ratio分割成X_train, X_test, y_train, y_test'''
assert X.shape[0] == y.shape[0], \
"the size of X must be equal to the size of the y"
assert 0.0 <= test_ratio <= 1.0, \
"test_ratio must be valid"
if seed:
np.random.seed(seed)
shuffled_indexes = np.random.permutation(len(X))
test_size = int(len(X) * test_ratio)
test_indexes = shuffled_indexes[:test_size]
train_indexes = shuffled_indexes[test_size:]
X_train = X[train_indexes]
y_train = y[train_indexes]
X_test = X[test_indexes]
y_test = y[test_indexes]
return X_train, y_train, X_test, y_test | [
"fangyang.jing@hotmail.com"
] | fangyang.jing@hotmail.com |
f0ef214be096340939ed6eb9684488ba392bb112 | 8b45916af90aca42f23eab1fd0f78833dfca5bfa | /tests/test_app.py | 8af16aa31636dc536e6387f9cfcf93d8e4a4dd4d | [
"MIT"
] | permissive | bradh/rio-viz | aeeef8c3b94729092ce21ff44b7ecaf79b43b50c | bc73a06c09e49b19541543f1e758109466ca17f8 | refs/heads/master | 2022-03-01T02:50:23.640625 | 2019-07-15T15:37:31 | 2019-07-15T15:37:31 | 197,893,243 | 0 | 0 | MIT | 2019-07-20T07:07:08 | 2019-07-20T07:04:58 | Python | UTF-8 | Python | false | false | 3,052 | py | """tests rio_viz.server."""
import os
import pytest
from starlette.testclient import TestClient
from rio_viz.raster import RasterTiles
from rio_viz.app import viz
from rio_tiler.errors import TileOutsideBounds
cog_path = os.path.join(os.path.dirname(__file__), "fixtures", "cog.tif")
def test_viz():
"""Should work as expected (create TileServer object)."""
r = RasterTiles(cog_path)
app = viz(r)
assert app.raster == r
assert app.port == 8080
assert app.get_bounds() == r.bounds
assert app.get_center() == r.center
assert app.get_endpoint_url() == "http://127.0.0.1:8080"
assert app.get_template_url() == "http://127.0.0.1:8080/index.html"
client = TestClient(app.app)
response = client.get("/")
assert response.status_code == 404
response = client.get("/tiles/7/64/43.png?rescale=1,10")
assert response.status_code == 200
assert response.headers["content-type"] == "image/png"
response = client.get("/tiles/7/64/43.png?rescale=1,10&indexes=1")
assert response.status_code == 200
assert response.headers["content-type"] == "image/png"
response = client.get("/tiles/7/64/43.png?rescale=1,10&color_map=cfastie")
assert response.status_code == 200
assert response.headers["content-type"] == "image/png"
with pytest.raises(TileOutsideBounds):
client.get("/tiles/18/8624/119094.png")
with pytest.raises(TileOutsideBounds):
client.get("/tiles/18/8624/119094.pbf")
response = client.get("/tiles/7/64/43.pbf")
assert response.status_code == 200
assert response.headers["content-type"] == "application/x-protobuf"
response = client.get("/tiles/7/64/43.pbf?feature_type=polygon")
assert response.status_code == 200
assert response.headers["content-type"] == "application/x-protobuf"
response = client.get("/metadata")
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
response = client.get("/tilejson.json?tile_format=png")
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
r = response.json()
assert r["bounds"]
assert r["center"]
assert r["minzoom"] == 6
assert r["maxzoom"] == 8
assert r["tiles"][0].endswith("png")
response = client.get("/tilejson.json?tile_format=pbf")
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
r = response.json()
assert r["tiles"][0].endswith("pbf")
response = client.get("/tilejson.json?tile_format=pbf&feature_type=polygon")
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
r = response.json()
assert r["tiles"][0].endswith("pbf?feature_type=polygon")
response = client.get("/point?coordinates=-2,48")
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
assert response.json() == {"coordinates": [-2.0, 48.0], "value": {"band1": 110}}
| [
"vincent.sarago@gmail.com"
] | vincent.sarago@gmail.com |
df9837844c511d1fd489c599062faa029c57ba79 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03681/s154151403.py | efacb540b804061374acf8c118541a6ce367cf52 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | def factorial_mod(n,mod):
ret = 1
for i in range(1,n+1):
ret *= i
ret %= mod
return ret
def comb_mod(n,r,mod):
if r > n or r < 0:
ret = 0
else:
fact_n = factorial_mod(n, mod)
fact_r = factorial_mod(r, mod)
fact_nr = factorial_mod(n-r, mod)
ret = fact_n * pow(fact_r, mod-2, mod) * pow(fact_nr, mod-2, mod) % mod
return ret
n,m = map(int,input().split())
c = abs(n-m)
mod = 10**9+7
if c >= 2:
ans = 0
elif c == 1:
ans = factorial_mod(n,mod)*factorial_mod(m,mod)
else:
ans = 2*factorial_mod(n,mod)*factorial_mod(m,mod)
print(ans%mod) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
776304ba0a3143e65e73466d9f081b897a53d0d9 | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /content/test/gpu/gpu_path_util/setup_tools_perf_paths.py | 5d90d6b007b2d134c53bc620913767cf22226e3e | [
"BSD-3-Clause"
] | permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 228 | py | # Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gpu_path_util
gpu_path_util.AddDirToPathIfNeeded(gpu_path_util.TOOLS_PERF_DIR)
| [
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] | chromium-scoped@luci-project-accounts.iam.gserviceaccount.com |
7e864297750e34096f4ed3464d0db7370279f277 | 657ac5489186e9e6f9aa7522cbf25ee929f8dde6 | /microchip/microchip/wsgi.py | 34cc8817d3d3337752078af76ec64f2d42776f9c | [] | no_license | Ginkooo/MicroChipServer | ab8f30fb87d89d9cd217b5abe7a8e940c9d3de3f | fe18496f1023f13a0dad2efba17cfa95e1748a5e | refs/heads/master | 2021-01-13T05:28:46.870298 | 2017-05-10T10:34:47 | 2017-05-10T10:34:47 | 86,624,819 | 0 | 1 | null | 2017-04-02T13:16:07 | 2017-03-29T20:09:22 | Python | UTF-8 | Python | false | false | 396 | py | """
WSGI config for microchip project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "microchip.settings")
application = get_wsgi_application()
| [
"piotr_czajka@outlook.com"
] | piotr_czajka@outlook.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.