blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a8b98c7f070d0e848c24f545f4bbe15d3ad0aeea | 892dd32ee0be7135cd33c875b06dcc66307dcc99 | /automation/MPTS/sample/TSMOnly.py | e8f67c541b758ced10e2ead3362d47a562c65066 | [] | no_license | cloudbytestorage/devops | 6d21ed0afd752bdde8cefa448d4433b435493ffa | b18193b08ba3d6538277ba48253c29d6a96b0b4a | refs/heads/master | 2020-05-29T08:48:34.489204 | 2018-01-03T09:28:53 | 2018-01-03T09:28:53 | 68,889,307 | 4 | 8 | null | 2017-11-30T08:11:39 | 2016-09-22T05:53:44 | Python | UTF-8 | Python | false | false | 3,749 | py | import json
import requests
import time
#NoofAccounts=_MyValue_
#NoofTSMs=_MyValue_
#NoofNFSVolumes=_MyValue_
#NoofISCSIVolumes=_MyValue_
#### Function(s) Declaration Begins
def sendrequest(url, querystring):
#print url+querystring
response = requests.get(
stdurl+querystring, verify=False
)
return(response);
def filesave(loglocation,permission,content):
f=open(loglocation,permission)
f.write(content.text)
f.close()
return;
def queryAsyncJobResult(jobid):
querycommand = 'command=queryAsyncJobResult&jobId=%s' %(jobid)
check_createTSM = sendrequest(stdurl, querycommand)
data = json.loads(check_createTSM.text)
status = data["queryasyncjobresultresponse"]["jobstatus"]
filesave("logs/queryAsyncJobResult.txt","w",check_createTSM)
if status == 0 :
print "Processing ..."
time.sleep(2);
queryAsyncJobResult(jobid);
else :
#print "status : "
return ;
#### Function(s) Declartion Ends
config = {}
with open('config.txt') as cfg:
config = json.load(cfg)
stdurl = 'https://%s/client/api?apikey=%s&response=%s&' %(config['host'], config['apikey'], config['response'])
#querycommand = 'command=%s' %(config['command'])
######## To Make A TSM Begins here
print "TSM Creation Begins"
for x in range(1, int(config['Number_of_TSMs'])+1):
#for x in range (1, NoofTSMs+1):
###Stage 1 to 8 ... Prior to that 2 commands are for listing.
#querycommand = 'command=createAccount&name=%s&description=%s' %(config['tsmName%d' %(x)], config['tsmDescription%d' %(x)])
querycommand = 'command=listHAPool'
resp_listHAPool = sendrequest(stdurl,querycommand)
filesave("logs/CurrentHAPoolList.txt","w",resp_listHAPool)
data = json.loads(resp_listHAPool.text)
hapools = data["listHAPoolResponse"]["hapool"]
for hapool in hapools:
if hapool['name'] == "%s" %(config['tsmPoolName%d' %(x)]):
pool_id = hapool['id']
break
#print "Poolid =" ,pool_id
querycommand = 'command=listAccount'
resp_listAccount = sendrequest(stdurl, querycommand)
filesave("logs/CurrentAccountList.txt", "w", resp_listAccount)
data = json.loads(resp_listAccount.text)
accounts = data["listAccountResponse"]["account"]
for account in accounts:
if account['name'] == "%s" %(config['tsmAccountName%d' %(x)]):
account_id = account['id']
break
#print "Accountid =", account_id
#Stage1 Command addTSM
querycommand = 'command=createTsm&accountid=%s&poolid=%s&name=%s&ipaddress=%s&subnet=%s&router=%s&dnsname=%s&dnsserver=%s&tntinterface=%s&gracecontrol=%s&graceallowed=%s&blocksize=%s&latency=%s&iopscontrol=%s&totaliops=%s&tpcontrol=%s&totalthroughput=%s&backuptpcontrol=%s&totalbackupthroughput=%s"asize=%s' %(account_id, pool_id, config['tsmName%d' %(x)], config['tsmIPAddress%d' %(x)], config['tsmSubnet%d' %(x)], config['tsmRouter%d' %(x)], config['tsmDNSName%d' %(x)], config['tsmDNSServer%d' %(x)], config['tsmTntInterface%d' %(x)], config['tsmGraceControl%d' %(x)], config['tsmGraceAllowed%d' %(x)], config['tsmBlocksize%d' %(x)], config['tsmLatency%d' %(x)], config['tsmIopsControl%d' %(x)], config['tsmTotalIops%d' %(x)], config['tsmTpControl%d' %(x)], config['tsmTotalThroughput%d' %(x)], config['tsmBackupTpcontrol%d' %(x)], config['tsmTotalBackupThroughput%d' %(x)], config['tsmQuotasize%d' %(x)])
resp_addTsm = sendrequest(stdurl, querycommand)
filesave("logs/AddTsm.txt", "w", resp_addTsm)
data = json.loads(resp_addTsm.text)
job_id = data["addTsmResponse"]["jobid"]
queryAsyncJobResult(job_id);
print "\nTSM %d Created\n" %(x);
print "TSM Creation Done"
##### TSM Creation ends here
| [
"karthik.s@cloudbyte.com"
] | karthik.s@cloudbyte.com |
8021b7fcb0d5d9321c202c7efa6aa736d098d313 | 2f9c2bb2c8d32368f90ef798c08848cec4ea2ebd | /tests/unit/flow/test_asyncflow.py | 5cf46aaf6c1d714e2dbd80eeda95f8804342fc62 | [
"Apache-2.0"
] | permissive | automation555/jina | 9e0aafd9d894bd5995f091ea0f8566a9ed0f781d | 337526c00265190fc45235b80df10c0a75b51c09 | refs/heads/master | 2023-06-03T04:33:18.460871 | 2021-06-17T08:51:21 | 2021-06-17T08:51:21 | 377,765,051 | 0 | 0 | Apache-2.0 | 2021-06-17T08:55:30 | 2021-06-17T08:50:48 | Python | UTF-8 | Python | false | false | 5,500 | py | import asyncio
import time
import numpy as np
import pytest
from jina import Document, Flow
from jina.flow.asyncio import AsyncFlow
from jina.logging.profile import TimeContext
from jina.types.document.generators import from_ndarray
from jina.types.request import Response
from tests import validate_callback
num_docs = 5
def validate(req):
assert len(req.docs) == num_docs
assert req.docs[0].blob.ndim == 1
# TODO(Deepankar): with `restful: True` few of the asyncio tests are flaky.
# Though it runs fine locally, results in - `RuntimeError - Event loop closed` in CI (Disabling for now)
def documents(start_index, end_index):
for i in range(start_index, end_index):
with Document() as doc:
doc.text = 'this is text'
doc.tags['id'] = 'id in tags'
doc.tags['inner_dict'] = {'id': 'id in inner_dict'}
with Document() as chunk:
chunk.text = 'text in chunk'
chunk.tags['id'] = 'id in chunk tags'
doc.chunks.append(chunk)
yield doc
@pytest.mark.asyncio
@pytest.mark.parametrize('restful', [False])
@pytest.mark.parametrize('flow_cls', [Flow, AsyncFlow])
async def test_run_async_flow(restful, mocker, flow_cls):
r_val = mocker.Mock()
with flow_cls(restful=restful, asyncio=True).add() as f:
async for r in f.index(
from_ndarray(np.random.random([num_docs, 4])), on_done=r_val
):
assert isinstance(r, Response)
validate_callback(r_val, validate)
async def async_input_function():
for _ in range(num_docs):
yield Document(content=np.random.random([4]))
await asyncio.sleep(0.1)
async def async_input_function2():
for _ in range(num_docs):
yield Document(content=np.random.random([4]))
await asyncio.sleep(0.1)
@pytest.mark.asyncio
@pytest.mark.parametrize('restful', [False])
@pytest.mark.parametrize(
'inputs',
[
async_input_function,
async_input_function(),
async_input_function2(),
async_input_function2,
],
)
async def test_run_async_flow_async_input(restful, inputs, mocker):
r_val = mocker.Mock()
with AsyncFlow(asyncio=True).add() as f:
async for r in f.index(inputs, on_done=r_val):
assert isinstance(r, Response)
validate_callback(r_val, validate)
async def run_async_flow_5s(restful):
# WaitDriver pause 5s makes total roundtrip ~5s
from jina import Executor, requests
class Wait5s(Executor):
@requests
def foo(self, **kwargs):
print('im called!')
time.sleep(5)
with Flow(restful=restful, asyncio=True).add(uses=Wait5s) as f:
async for r in f.index(
from_ndarray(np.random.random([num_docs, 4])),
on_done=validate,
):
assert isinstance(r, Response)
async def sleep_print():
# total roundtrip takes ~5s
print('heavylifting other io-bound jobs, e.g. download, upload, file io')
await asyncio.sleep(5)
print('heavylifting done after 5s')
async def concurrent_main(restful):
# about 5s; but some dispatch cost, can't be just 5s, usually at <7s
await asyncio.gather(run_async_flow_5s(restful), sleep_print())
async def sequential_main(restful):
# about 10s; with some dispatch cost , usually at <12s
await run_async_flow_5s(restful)
await sleep_print()
@pytest.mark.asyncio
@pytest.mark.parametrize('restful', [False])
async def test_run_async_flow_other_task_sequential(restful):
with TimeContext('sequential await') as t:
await sequential_main(restful)
assert t.duration >= 10
@pytest.mark.asyncio
@pytest.mark.parametrize('restful', [False])
async def test_run_async_flow_other_task_concurrent(restful):
with TimeContext('concurrent await') as t:
await concurrent_main(restful)
# some dispatch cost, can't be just 5s, usually at 7~8s, but must <10s
assert t.duration < 10
@pytest.mark.asyncio
@pytest.mark.parametrize('return_results', [False])
@pytest.mark.parametrize('restful', [False])
@pytest.mark.parametrize('flow_cls', [Flow, AsyncFlow])
async def test_return_results_async_flow(return_results, restful, flow_cls):
with flow_cls(
restful=restful, asyncio=True, return_results=return_results
).add() as f:
async for r in f.index(from_ndarray(np.random.random([10, 2]))):
assert isinstance(r, Response)
@pytest.mark.asyncio
@pytest.mark.parametrize('return_results', [False, True])
@pytest.mark.parametrize('restful', [False])
@pytest.mark.parametrize('flow_api', ['delete', 'index', 'update', 'search'])
@pytest.mark.parametrize('flow_cls', [Flow, AsyncFlow])
async def test_return_results_async_flow_crud(
return_results, restful, flow_api, flow_cls
):
with flow_cls(
restful=restful, asyncio=True, return_results=return_results
).add() as f:
async for r in getattr(f, flow_api)(documents(0, 10)):
assert isinstance(r, Response)
@pytest.mark.asyncio
@pytest.mark.parametrize('flow_cls', [Flow, AsyncFlow])
async def test_async_flow_empty_data(flow_cls):
from jina import Executor, requests
class MyExec(Executor):
@requests
def foo(self, parameters, **kwargs):
assert parameters['hello'] == 'world'
with flow_cls(asyncio=True).add(uses=MyExec) as f:
async for r in f.post('/hello', parameters={'hello': 'world'}):
assert isinstance(r, Response)
| [
"rajashree.patil@embold.io"
] | rajashree.patil@embold.io |
cd9ba9cd628293fc4ec5050ca228e80392fb1503 | e2992452a3c52f4cbbc64e1686128ad464b71d16 | /libMe/util/EncodeUtil.py | f03ef111ddbb46dd976e0044504b8005c05cc0da | [] | no_license | MaGuiSen/studyScrapy | 6b84605a15027ffc24501d690666f419ebb379fd | 03604bafe19e55db12677a4af388c8a9198ca572 | refs/heads/master | 2021-01-17T04:30:23.858217 | 2017-08-30T01:50:08 | 2017-08-30T01:50:08 | 95,433,695 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | # -*- coding: utf-8 -*-
def getCoding(strInput):
"""
获取编码格式
"""
if isinstance(strInput, unicode):
return "unicode"
try:
strInput.decode("utf8")
return 'utf8'
except:
pass
try:
strInput.decode("gbk")
return 'gbk'
except:
pass
def toUnicode(strInput):
"""
得到unicode
:return:
"""
strCodingFmt = getCoding(strInput)
if strCodingFmt == "utf8":
return strInput.decode('utf8')
elif strCodingFmt == "unicode":
return strInput
elif strCodingFmt == "gbk":
return strInput.decode("gbk")
def tran2UTF8(strInput):
"""
转化为utf8格式
"""
strCodingFmt = getCoding(strInput)
if strCodingFmt == "utf8":
return strInput
elif strCodingFmt == "unicode":
return strInput.encode("utf8")
elif strCodingFmt == "gbk":
return strInput.decode("gbk").encode("utf8")
def tran2GBK(strInput):
"""
转化为gbk格式
"""
strCodingFmt = getCoding(strInput)
if strCodingFmt == "gbk":
return strInput
elif strCodingFmt == "unicode":
return strInput.encode("gbk")
elif strCodingFmt == "utf8":
return strInput.decode("utf8").encode("gbk")
| [
"1059876295@qq.com"
] | 1059876295@qq.com |
a47b02fdaab7172d06df7b342db19356ab519101 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /4_set/滚动替换更新/Distinct Coin Sums.py | a98649e5d5c1c416fb9cc5913814f3c670f65be5 | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | class Solution:
def solve(self, coins, quantities):
"""Return the number of distinct coin sum values you can get by using non-empty group of these coins."""
dp = set([0])
for index, coin in enumerate(coins):
ndp = set()
for pre in dp:
for count in range(quantities[index] + 1):
ndp.add(pre + coin * count)
dp = ndp
return len(dp) - 1
def solve2(self, coins, quantities):
dp = 1
for coin, count in zip(coins, quantities):
for _ in range(count):
# 相当于集合并集操作
dp |= dp << coin
return bin(dp).count('1') - 1
print(Solution().solve([4, 2, 1], [1, 2, 1]))
# We can have the following distinct coin sums
# [1]
# [2]
# [1, 2]
# [4]
# [1, 4]
# [2, 4]
# [1, 2, 4]
# [2, 2, 4]
# [1, 2, 2, 4]
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
92fa1609950bf1451a3c35e5f8b6d7ea0e503658 | 67117705720a3e3d81253ba48c1826d36737b126 | /Wk10_STRANDS/evaluate_random.py | 5fd8aa91b959b382d35421fe4925a13425d89ebb | [] | no_license | pyliut/Rokos2021 | 41f0f96bc396b6e8a5e268e31a38a4a4b288c370 | 70753ab29afc45766eb502f91b65cc455e6055e1 | refs/heads/main | 2023-08-13T17:29:30.013829 | 2021-09-26T19:01:35 | 2021-09-26T19:01:35 | 382,092,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 1 15:15:57 2021
@author: pyliu
"""
import pandas as pd
import numpy as np
import scipy as sp
import time
import random
from random_prior import *
from evaluate_prior import *
def evaluate_random(edge,df_test, df_train, filename_train = "tsc_map.yaml",
metric = "difference", cutoff=100, max_obs = 10,
plot_graph = False, verbose = True,
random_state = None):
"""
Wrapper for random_prior.py & evaluate_prior.py
Max_obs is a scalar INT
"""
mean_test, var_test, prior, t_op_prior, edge_prior = random_prior(df_train,
filename = filename_train,metric = metric,
cutoff=cutoff, max_obs = max_obs,
plot_graph = plot_graph,
random_state = random_state)
ks_random, n_random = evaluate_prior(edge, df_test,
mean_test, var_test, prior, t_op_prior,
metric = metric, verbose = verbose,
random_state = random_state)
return ks_random, n_random
| [
"noreply@github.com"
] | pyliut.noreply@github.com |
cce02dbf8c91ec4ea667196ff84e70b817f7c157 | b08870f8fe7b3cf1bbab3c52a7bacbb36ee1dcc6 | /verp/stock/doctype/quick_stock_balance/quick_stock_balance.py | 34d21c079b2527cb9d590283dc3ba5f86886df3f | [] | no_license | vsadminpk18/verpfinalversion | 7148a64fe6134e2a6371470aceb1b57cc4b5a559 | 93d164b370ad9ca0dd5cda0053082dc3abbd20da | refs/heads/master | 2023-07-13T04:11:59.211046 | 2021-08-27T06:26:48 | 2021-08-27T06:26:48 | 400,410,611 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from verp.stock.utils import get_stock_balance, get_stock_value_on
class QuickStockBalance(Document):
pass
@frappe.whitelist()
def get_stock_item_details(warehouse, date, item=None, barcode=None):
out = {}
if barcode:
out["item"] = frappe.db.get_value(
"Item Barcode", filters={"barcode": barcode}, fieldname=["parent"])
if not out["item"]:
frappe.throw(
_("Invalid Barcode. There is no Item attached to this barcode."))
else:
out["item"] = item
barcodes = frappe.db.get_values("Item Barcode", filters={"parent": out["item"]},
fieldname=["barcode"])
out["barcodes"] = [x[0] for x in barcodes]
out["qty"] = get_stock_balance(out["item"], warehouse, date)
out["value"] = get_stock_value_on(warehouse, date, out["item"])
out["image"] = frappe.db.get_value("Item",
filters={"name": out["item"]}, fieldname=["image"])
return out
| [
"admin@vespersolutions.tech"
] | admin@vespersolutions.tech |
6f7e66a72531e3e9d15c5337c50890545e6c8b34 | ffc1cc3bb7b68335b115122fdc7924fc4e31d528 | /pro42.py | c6c957f50c694f62e26d9d6bbc341b486cd588f5 | [] | no_license | Rihanashariff/swathi24 | dba1dd3c3d2ff583ae431b432e0ef262bfeb3ac3 | 2b0d21f2febdd2a563e8f0affeebd5ca7a5821b8 | refs/heads/master | 2020-07-02T05:28:32.199982 | 2019-06-29T08:22:10 | 2019-06-29T08:22:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | #split arra
n,k=map(int,input().split())
l=list(map(int,input().split()))
if k==1:
print(min(l))
elif k==2:
print(max(l[0],l[n-1]))
else:
print(max(l))
| [
"noreply@github.com"
] | Rihanashariff.noreply@github.com |
eb754b8caa1a4935595aabf735b2d83766c294f8 | c5f58af61e3577ded52acda210f4f664651b598c | /template/mmdetection/configs/legacy_1.x/mask_rcnn_r50_fpn_1x_coco_v1.py | b089c0d4ad86f3f7240624bb3b70c23518049957 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hojihun5516/object-detection-level2-cv-02 | 0a4ee5cea9a77ef5d43fb61a4b37fe3a87cb0eac | bc8a08286935b31b8e7e597c4b1ca2cbbaeb9109 | refs/heads/master | 2023-08-31T09:50:59.150971 | 2021-10-16T15:00:19 | 2021-10-16T15:00:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | _base_ = [
"../_base_/models/mask_rcnn_r50_fpn.py",
"../_base_/datasets/coco_instance.py",
"../_base_/schedules/schedule_1x.py",
"../_base_/default_runtime.py",
]
model = dict(
rpn_head=dict(
anchor_generator=dict(type="LegacyAnchorGenerator", center_offset=0.5),
bbox_coder=dict(type="LegacyDeltaXYWHBBoxCoder"),
loss_bbox=dict(type="SmoothL1Loss", beta=1.0 / 9.0, loss_weight=1.0),
),
roi_head=dict(
bbox_roi_extractor=dict(
type="SingleRoIExtractor", roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=2, aligned=False)
),
mask_roi_extractor=dict(
type="SingleRoIExtractor", roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=2, aligned=False)
),
bbox_head=dict(
bbox_coder=dict(type="LegacyDeltaXYWHBBoxCoder"),
loss_bbox=dict(type="SmoothL1Loss", beta=1.0, loss_weight=1.0),
),
),
# model training and testing settings
train_cfg=dict(rpn_proposal=dict(max_per_img=2000), rcnn=dict(assigner=dict(match_low_quality=True))),
)
| [
"hojihun5516@daum.net"
] | hojihun5516@daum.net |
dc5c63b9b7018214aec3114408ab7a45adf6bb20 | e5cb4f8e6a350b511080e28a4acb4f5fd264f5f9 | /emission/core/wrapper/confirmedtrip.py | 3d6357e93f31c0b398c5fbf82d183913e3bd25db | [
"BSD-3-Clause"
] | permissive | jf87/e-mission-server | cde78e92badc640f307e979bd044da814c31de02 | 2e5fb715b6b9ec88c2c938de08a659d11fac34a6 | refs/heads/master | 2021-08-09T00:08:58.838672 | 2021-06-21T08:31:22 | 2021-06-21T08:31:22 | 163,435,995 | 0 | 0 | BSD-3-Clause | 2020-08-09T13:32:08 | 2018-12-28T17:50:22 | Jupyter Notebook | UTF-8 | Python | false | false | 1,095 | py | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import emission.core.wrapper.trip as ecwt
import emission.core.wrapper.wrapperbase as ecwb
class Confirmedtrip(ecwt.Trip):
props = ecwt.Trip.props
props.update({"raw_trip": ecwb.WrapperBase.Access.WORM,
"cleaned_trip": ecwb.WrapperBase.Access.WORM,
# the confirmed section that is the "primary"
# https://github.com/e-mission/e-mission-docs/issues/476#issuecomment-738120752
"primary_section": ecwb.WrapperBase.Access.WORM,
"inferred_primary_mode": ecwb.WrapperBase.Access.WORM,
# the user input will have all `manual/*` entries
# let's make that be somewhat flexible instead of hardcoding into the data model
"user_input": ecwb.WrapperBase.Access.WORM
})
def _populateDependencies(self):
super(Confirmedtrip, self)._populateDependencies()
| [
"shankari@eecs.berkeley.edu"
] | shankari@eecs.berkeley.edu |
5e8021a95005100a27855482ebdecd37028530ca | 15fbf63eedc5a19836ff198bd2b80117e356955a | /stickerfinder/telegram/callback_handlers/report.py | 738bd113d52daf995eb74a28b3ab42892717e2cd | [
"MIT"
] | permissive | drmhdh/sticker-finder | cf2656160bee45d1b51ddda130a766f48fecfe8a | 688656a4b67c1e5057c0d1dc5b21201f0466b7ab | refs/heads/master | 2022-01-26T14:39:45.366537 | 2019-05-02T20:32:11 | 2019-05-02T20:32:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,608 | py | """Module for handling sticker set voting task buttons."""
from stickerfinder.models import Task
from stickerfinder.helper.maintenance import check_maintenance_chat
from stickerfinder.helper.callback import CallbackResult
from stickerfinder.helper.telegram import call_tg_func
from stickerfinder.helper.keyboard import get_report_keyboard
def handle_report_ban(session, action, query, payload, chat, tg_chat):
"""Handle the ban button of voting tasks in maintenance channels."""
task = session.query(Task).get(payload)
if CallbackResult(action).name == 'ban':
task.sticker_set.banned = True
call_tg_func(query, 'answer', ['Set tagged as nsfw'])
else:
task.sticker_set.banned = False
call_tg_func(query, 'answer', ['Set no longer tagged as nsfw'])
session.commit()
keyboard = get_report_keyboard(task)
call_tg_func(query.message, 'edit_reply_markup', [], {'reply_markup': keyboard})
def handle_report_nsfw(session, action, query, payload, chat, tg_chat):
"""Handle the nsfw button of voting tasks in maintenance channels."""
task = session.query(Task).get(payload)
if CallbackResult(action).name == 'ban':
task.sticker_set.nsfw = True
call_tg_func(query, 'answer', ['Set banned'])
else:
task.sticker_set.nsfw = False
call_tg_func(query, 'answer', ['Set unbanned'])
session.commit()
keyboard = get_report_keyboard(task)
call_tg_func(query.message, 'edit_reply_markup', [], {'reply_markup': keyboard})
def handle_report_furry(session, action, query, payload, chat, tg_chat):
"""Handle the furry button of voting tasks in maintenance channels."""
task = session.query(Task).get(payload)
if CallbackResult(action).name == 'ban':
task.sticker_set.furry = True
call_tg_func(query, 'answer', ['Set tagged as furry'])
else:
task.sticker_set.furry = False
call_tg_func(query, 'answer', ['Set tagged as furry'])
session.commit()
keyboard = get_report_keyboard(task)
call_tg_func(query.message, 'edit_reply_markup', [], {'reply_markup': keyboard})
def handle_report_next(session, action, query, payload, chat, tg_chat):
"""Handle the nextbutton of voting tasks in maintenance channels."""
task = session.query(Task).get(payload)
if not task.reviewed:
task.reviewed = True
check_maintenance_chat(session, tg_chat, chat)
try:
keyboard = get_report_keyboard(task)
call_tg_func(query.message, 'edit_reply_markup', [], {'reply_markup': keyboard})
except: # noqa
return
| [
"arne@twobeer.de"
] | arne@twobeer.de |
1928029f64bfc831e014cbfda0d1844428cc4025 | 524591f2c4f760bc01c12fea3061833847a4ff9a | /arm/opt/ros/kinetic/lib/python2.7/dist-packages/shape_msgs/msg/_SolidPrimitive.py | cd7fab91d3d5ac29f8339b1ff910e358710e246d | [
"BSD-3-Clause"
] | permissive | Roboy/roboy_plexus | 6f78d45c52055d97159fd4d0ca8e0f32f1fbd07e | 1f3039edd24c059459563cb81d194326fe824905 | refs/heads/roboy3 | 2023-03-10T15:01:34.703853 | 2021-08-16T13:42:54 | 2021-08-16T13:42:54 | 101,666,005 | 2 | 4 | BSD-3-Clause | 2022-10-22T13:43:45 | 2017-08-28T16:53:52 | C++ | UTF-8 | Python | false | false | 5,767 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from shape_msgs/SolidPrimitive.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SolidPrimitive(genpy.Message):
_md5sum = "d8f8cbc74c5ff283fca29569ccefb45d"
_type = "shape_msgs/SolidPrimitive"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# Define box, sphere, cylinder, cone
# All shapes are defined to have their bounding boxes centered around 0,0,0.
uint8 BOX=1
uint8 SPHERE=2
uint8 CYLINDER=3
uint8 CONE=4
# The type of the shape
uint8 type
# The dimensions of the shape
float64[] dimensions
# The meaning of the shape dimensions: each constant defines the index in the 'dimensions' array
# For the BOX type, the X, Y, and Z dimensions are the length of the corresponding
# sides of the box.
uint8 BOX_X=0
uint8 BOX_Y=1
uint8 BOX_Z=2
# For the SPHERE type, only one component is used, and it gives the radius of
# the sphere.
uint8 SPHERE_RADIUS=0
# For the CYLINDER and CONE types, the center line is oriented along
# the Z axis. Therefore the CYLINDER_HEIGHT (CONE_HEIGHT) component
# of dimensions gives the height of the cylinder (cone). The
# CYLINDER_RADIUS (CONE_RADIUS) component of dimensions gives the
# radius of the base of the cylinder (cone). Cone and cylinder
# primitives are defined to be circular. The tip of the cone is
# pointing up, along +Z axis.
uint8 CYLINDER_HEIGHT=0
uint8 CYLINDER_RADIUS=1
uint8 CONE_HEIGHT=0
uint8 CONE_RADIUS=1
"""
# Pseudo-constants
BOX = 1
SPHERE = 2
CYLINDER = 3
CONE = 4
BOX_X = 0
BOX_Y = 1
BOX_Z = 2
SPHERE_RADIUS = 0
CYLINDER_HEIGHT = 0
CYLINDER_RADIUS = 1
CONE_HEIGHT = 0
CONE_RADIUS = 1
__slots__ = ['type','dimensions']
_slot_types = ['uint8','float64[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
type,dimensions
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SolidPrimitive, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.type is None:
self.type = 0
if self.dimensions is None:
self.dimensions = []
else:
self.type = 0
self.dimensions = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.type))
length = len(self.dimensions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *self.dimensions))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.type,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.dimensions = struct.unpack(pattern, str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.type))
length = len(self.dimensions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.dimensions.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.type,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
| [
"simon.trendel@tum.de"
] | simon.trendel@tum.de |
752b3698e3219d1ca10929b4519afa6a755b629c | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_210/139.py | f73e8657befbc6ae924e000ab345415d1206ec8b | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | from math import pi
f = open('ans.txt', 'w')
c = int(input())
for i in range(1,c+1):
c, j = map(int, input().split())
if (c == 1 and j == 0) or (j == 1 and c == 0):
a, b = map(int, input().split())
f.write(f"Case #{i}: 2\n")
elif c == 2 or j == 2:
a, b = map(int, input().split())
a2, b2 = map(int, input().split())
if (0 < b2 +1440 - a <= 720) or (0 < b+1440-a2 <= 720)or (0 < b2-a <= 720) or (0 < b-a2 <= 720):
f.write(f"Case #{i}: 2\n")
else:
f.write(f"Case #{i}: 4\n")
else:
a, b = map(int, input().split())
a2, b2 = map(int, input().split())
f.write(f"Case #{i}: 2\n")
f.close() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
28759365923d8f64d0f27f0cc86e033caa637517 | ebc7607785e8bcd6825df9e8daccd38adc26ba7b | /python/baekjoon/2.algorithm/implementation/백준_청소년_상어.py | ec642c5b7e07bf253b3c70c6b6aca33a2a3575c5 | [] | no_license | galid1/Algorithm | 18d1b72b0d5225f99b193e8892d8b513a853d53a | 5bd69e73332f4dd61656ccdecd59c40a2fedb4b2 | refs/heads/master | 2022-02-12T07:38:14.032073 | 2022-02-05T08:34:46 | 2022-02-05T08:34:46 | 179,923,655 | 3 | 0 | null | 2019-06-14T07:18:14 | 2019-04-07T05:49:06 | Python | UTF-8 | Python | false | false | 2,421 | py | import sys, copy
def get_fish_idx(x, y, fish_list):
for idx, fish in enumerate(fish_list):
if fish[2][0] == x and fish[2][1] == y:
return idx
# 해당 좌표의 물고기가 없는 경우
return -1
def is_valid(x, y):
return 0 <= x < 4 and 0 <= y < 4
def move_fishes(sx, sy, fish_list):
global ds
for idx, fish in enumerate(fish_list):
cur_d = fish[1]
cx, cy = fish[2]
for _ in range(8):
nx, ny = cx+ds[cur_d][0], cy + ds[cur_d][1]
# 이동 가능
if (nx != sx or ny != sy) and is_valid(nx, ny):
will_fish_idx = get_fish_idx(nx, ny, fish_list)
# 이동하려는 칸에 물고기 존재
if will_fish_idx != -1:
fish_list[will_fish_idx][2] = [cx, cy]
fish_list[idx][1] = cur_d
fish_list[idx][2] = [nx, ny]
break
cur_d = (cur_d + 1) % 8
def solve(sx, sy, sd, eat_fish_sum, fish_list):
global ds, ans
move_fishes(sx, sy, fish_list)
cx, cy = sx, sy
while True:
nsx, nsy = cx+ds[sd][0], cy+ds[sd][1]
# 더이상 이동 불가
if not is_valid(nsx, nsy) or not fish_list:
ans = max(ans, eat_fish_sum)
return
will_eat_fish_idx = get_fish_idx(nsx, nsy, fish_list)
# 이동하려는 칸에 물고기 존재
if will_eat_fish_idx != -1:
will_eat_fish_num = fish_list[will_eat_fish_idx][0]
will_eat_fish_dir = fish_list[will_eat_fish_idx][1]
n_fish_list = copy.deepcopy(fish_list)
del n_fish_list[will_eat_fish_idx]
solve(nsx, nsy, will_eat_fish_dir, eat_fish_sum + will_eat_fish_num, n_fish_list)
cx, cy = nsx, nsy
# 입력
ds = [[-1, 0], [-1, -1], [0, -1], [1, -1], [1, 0], [1, 1], [0, 1], [-1, 1]]
fishes = []
for i in range(4):
infos = list(map(int, sys.stdin.readline().strip().split(" ")))
for j in range(0, 8, 2):
# 번호, 방향 위치
fishes.append([infos[j], infos[j + 1] - 1, [i, j // 2]])
fishes.sort(key=lambda fish: fish[0])
zero_zero_fish_idx = get_fish_idx(0, 0, fishes)
eat_fish_sum = fishes[zero_zero_fish_idx][0]
sd = fishes[zero_zero_fish_idx][1]
del fishes[zero_zero_fish_idx]
ans = eat_fish_sum
solve(0, 0, sd, eat_fish_sum, fishes)
print(ans)
| [
"galid1@naver.com"
] | galid1@naver.com |
a247dd35bfeefdfd7695bf5a39e69db77ff084f0 | fc61c9d899ef9c8ddadac0875ab738df28be272e | /python/python-online-lec/ch4/python_advanced/ch04-01.py | cee48df2e2ff9a08a9daa072f373c86f7ad177cc | [
"MIT"
] | permissive | hbyyy/TIL | 4908b91d880df268f5f7b8a43703f673183f2e8b | e89ae2913a8a38eb7f480a9ec2324c3ac11e309e | refs/heads/master | 2022-12-21T04:34:13.896078 | 2021-05-24T13:19:57 | 2021-05-24T13:19:57 | 219,916,943 | 0 | 0 | MIT | 2022-12-12T05:15:04 | 2019-11-06T05:12:55 | Jupyter Notebook | UTF-8 | Python | false | false | 215 | py | # first class function
print(set(dir(iter([1,2,3,4,5]))) - set(dir(range(10))) )
def factorial(n: int) -> int:
if n == 1:
return n
return n * factorial(n-1)
print([*map(factorial, range(1, 6))])
| [
"qjaduddl94@gmail.com"
] | qjaduddl94@gmail.com |
d0b6e06ab67e1a7dec9b3228f774cbce4c4df7db | e59e711e81536f027b10f7033a698ab9e39d489c | /Geometry/CaloEventSetup/python/HFNoseTopology_cfi.py | 031fe99595ff33182b0da4158489904226849541 | [
"Apache-2.0"
] | permissive | AndreaBellora/cmssw | 73e5f668dfd188bfedcb532b2bd364c486561b03 | 26cf1918ad4133f8178e303a68d97fc58fbeac9f | refs/heads/master | 2023-07-20T11:44:57.079220 | 2018-07-31T12:08:32 | 2018-07-31T12:08:32 | 143,017,412 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | import FWCore.ParameterSet.Config as cms
#
# This cfi should be included to build the HGCal Topologies
#
HFNoseTopologyBuilder = cms.ESProducer("HGCalTopologyBuilder",
Name = cms.string("HFNoseSensitive"),
Type = cms.int32(6)
)
| [
"sunanda.banerjee@cern.ch"
] | sunanda.banerjee@cern.ch |
a802683d34b658d3d0748f9a476e3ecdc62a1c39 | 5864e86954a221d52d4fa83a607c71bacf201c5a | /carbon/common/lib/markdown/extensions/def_list.py | 4f0de89912416b74ca84996e379a5edc82dfedb1 | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,372 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\lib\markdown\extensions\def_list.py
import re
import markdown
from markdown.util import etree
class DefListProcessor(markdown.blockprocessors.BlockProcessor):
RE = re.compile('(^|\\n)[ ]{0,3}:[ ]{1,3}(.*?)(\\n|$)')
NO_INDENT_RE = re.compile('^[ ]{0,3}[^ :]')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
terms = [ l.strip() for l in block[:m.start()].split('\n') if l.strip() ]
block = block[m.end():]
no_indent = self.NO_INDENT_RE.match(block)
if no_indent:
d, theRest = block, None
else:
d, theRest = self.detab(block)
if d:
d = '%s\n%s' % (m.group(2), d)
else:
d = m.group(2)
sibling = self.lastChild(parent)
if not terms and sibling.tag == 'p':
state = 'looselist'
terms = sibling.text.split('\n')
parent.remove(sibling)
sibling = self.lastChild(parent)
else:
state = 'list'
if sibling and sibling.tag == 'dl':
dl = sibling
if len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
state = 'looselist'
else:
dl = etree.SubElement(parent, 'dl')
for term in terms:
dt = etree.SubElement(dl, 'dt')
dt.text = term
self.parser.state.set(state)
dd = etree.SubElement(dl, 'dd')
self.parser.parseBlocks(dd, [d])
self.parser.state.reset()
if theRest:
blocks.insert(0, theRest)
class DefListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
ITEM_TYPES = ['dd']
LIST_TYPES = ['dl']
def create_item(self, parent, block):
dd = markdown.etree.SubElement(parent, 'dd')
self.parser.parseBlocks(dd, [block])
class DefListExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
md.parser.blockprocessors.add('defindent', DefListIndentProcessor(md.parser), '>indent')
md.parser.blockprocessors.add('deflist', DefListProcessor(md.parser), '>ulist')
def makeExtension(configs = {}):
return DefListExtension(configs=configs)
| [
"le02005@163.com"
] | le02005@163.com |
7c87c227337384c92c3b5bf13a083449e40b2b7e | a982d8f9fd99c4af077dcafc1092a0f3779d9b39 | /web_pjt/web_pjt/articles/views.py | 1390a35c247ee3cc9ccd0f2dabc9c68159866dd9 | [] | no_license | do-park/DDDJ | cbe4586a583618b616eaab19014ba16e69b38f06 | 22b53bb4822d7e0b61f5a8226bbadb9b9cb20ea0 | refs/heads/master | 2023-01-12T14:30:16.303107 | 2020-11-07T16:09:11 | 2020-11-07T16:09:11 | 273,131,598 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,077 | py | from django.shortcuts import render, redirect, get_object_or_404
from .models import Article, Comment
from movies.models import Movie
from .forms import ArticleForm, CommentForm
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from datetime import timedelta
import datetime
from django.utils import timezone
def index(request):
articles = Article.objects.order_by('-pk')
check_now = timezone.now() # 현재 시간 기준으로 6시간 이내에 작성한 글에는 new를 띄우기 위한 timedelta값
check_delta = timezone.now() - timedelta(hours=6) # 이 값을 index.html로 넘겨 html단에서 처리한다.
paginator = Paginator(articles, 15) # 숫자만 변경하면 한 페이지에 들어갈 글 수를 변경할 수 있음
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'articles': articles,
'page_obj': page_obj,
'check_delta' : check_delta,
'check_now' : check_now,
}
return render(request, 'articles/index.html', context)
@login_required
def create(request):
if request.method == 'POST':
form = ArticleForm(request.POST)
if form.is_valid():
article = form.save(commit=False)
article.user = request.user
article.save()
return redirect('articles:detail', article.pk)
else:
form = ArticleForm()
context = {
'form': form,
}
return render(request, 'articles/form.html', context)
@login_required
def detail(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
movie = get_object_or_404(Movie, pk=article.movie_title.pk)
form = CommentForm()
context = {
'article': article,
'movie': movie,
'form': form,
}
return render(request, 'articles/detail.html', context)
@login_required
def update(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
if request.user == article.user:
if request.method == "POST":
form = ArticleForm(request.POST, instance=article)
if form.is_valid():
updated = form.save()
return redirect('articles:detail', updated.pk)
else:
form = ArticleForm(instance=article)
context = {
'form': form,
}
return render(request, 'articles/form.html', context)
else:
return redirect('articles:detail', article_pk)
@login_required
def delete(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
if request.user == article.user:
article.delete()
return redirect('articles:index')
else:
return redirect('articles:detail', article_pk)
@login_required
def comment_create(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.article = article
comment.user = request.user
comment.save()
return redirect('articles:detail', article.pk)
@login_required
def comment_delete(request, article_pk, comment_pk):
comment = get_object_or_404(Comment, pk=comment_pk)
if request.user == comment.user:
comment.delete()
return redirect('articles:detail', article_pk)
@login_required
def comment_update(request, article_pk, comment_pk):
comment = get_object_or_404(Comment, pk=comment_pk)
if request.user == comment.user:
form = CommentForm(request.POST, instance=comment)
if request.method == 'POST':
if form.is_valid():
form.save()
return redirect('articles:detail', article_pk)
else:
form = CommentForm(instance=comment)
context = {
'form': form,
}
return render(request, 'articles/form.html', context)
else:
return redirect('articles:detail', article_pk)
@login_required
def search(request):
kwd = request.COOKIES['kwd'] # articles/index.html에서 저장한 키워드를 쿠키에서 꺼낸다.
articles = Article.objects.filter(title__contains=kwd).order_by('-pk') # 키워드를 제목에 포함하는 글 검색해 pk 역순으로 정렬
paginator = Paginator(articles, 15)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'kwd': kwd,
'articles': articles,
'page_obj': page_obj,
}
return render(request, 'articles/index.html', context)
@login_required
def best(request):
articles = Article.objects.filter(rank=10).order_by('-pk')
paginator = Paginator(articles, 15)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'articles': articles,
'page_obj': page_obj,
}
return render(request, 'articles/index.html', context)
@login_required
def worst(request):
articles = Article.objects.filter(rank=0).order_by('-pk')
paginator = Paginator(articles, 15)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'articles': articles,
'page_obj': page_obj,
}
return render(request, 'articles/index.html', context) | [
"dohee.pa@gmail.com"
] | dohee.pa@gmail.com |
e61c31f37c9964c7ffd39869a8d2ecb2b8a7ced8 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_alibis.py | d0171b2a3add283c5bec1d670d6db8781a0e1ca8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.nouns._alibi import _ALIBI
#calss header
class _ALIBIS(_ALIBI, ):
def __init__(self,):
_ALIBI.__init__(self)
self.name = "ALIBIS"
self.specie = 'nouns'
self.basic = "alibi"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
5f7d39150e878a66ec23e062dd7d70b9bcd804e6 | 2af1e6357f51d0d08b1a991e2bd922b7bdc8c0b6 | /baekjoon/not_accepted/1167 트리의 지름_floyd-warshall_2.py | 40e4929db54865cabac4b0a5397d10b91363bfe1 | [] | no_license | grasshopperTrainer/coding_practice | 530e9912b10952c866d35d69f12c99b96959a22d | d1e5e6d6fa3f71f1a0105940fff1785068aec8b0 | refs/heads/master | 2023-06-01T13:30:15.362657 | 2021-06-08T08:40:15 | 2021-06-08T08:40:15 | 267,359,225 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | # reducing memory usage
# time out
from sys import stdin
def solution(N, routes):
MAX = 10_000
tree = {}
for r in routes:
depart = r[0]
it = iter(r[1:])
for n in it:
tree[(depart, n)] = next(it)
for m in range(1, N+1):
for i in range(1, N+1):
for j in range(1, N+1):
if i != j and (i, m) in tree and (m, j) in tree:
t = tree[(i, m)] + tree[(m, j)]
if tree.setdefault((i, j), MAX) > t:
tree[(i, j)] = t
return max(tree.values())
N, routes = 0, []
for i, row in enumerate(stdin.readlines()):
if i == 0:
N = int(row)
else:
routes.append([int(c) for c in row.strip().split(' ')[:-1]])
print(solution(N, routes)) | [
"grasshoppertrainer@gmail.com"
] | grasshoppertrainer@gmail.com |
6a03f62b4905e3c7d1cdbf69926f721ac85d51c6 | 3344e1489e1c8616181314a6524ff593f642c2c9 | /dl/chapter2/or_func.py | 6a2fcb88fe740c410fecab9cb1a821c2f0694d73 | [] | no_license | 21nkyu/dl | 2f0fb8405f7e10bd02e31efa9334921b8df97f97 | cdcbd3a2bedaa4542d7dbacbf027396fc70ce3f4 | refs/heads/main | 2023-08-31T16:38:36.615165 | 2021-11-03T15:20:22 | 2021-11-03T15:20:22 | 422,246,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | import numpy as np
def or_func(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.2
tmp = np.sum(x*w)+b
if tmp <= 0:
return 0
return 1 | [
"adele7178@naver.com"
] | adele7178@naver.com |
eb9292b595bef60e166068873d143b42ad54a2d8 | e86dbbe3f0650b4d1f4039211e4702859b6b5bfa | /pyscf/delta_scf/scf_s.py | aff04e8034bf02661bdd32a2210175d2181c7620 | [] | no_license | sapatha2/cuo | 07a568b021b69e6448763d232b5f63857f9e2932 | 006b190ae29de0af24c0fd905186ccda0c9ade94 | refs/heads/master | 2020-03-24T02:01:50.269432 | 2019-06-14T17:52:53 | 2019-06-14T17:52:53 | 142,359,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,932 | py | #PySCF input file for CuO calculations
import json
from pyscf import gto,scf,mcscf, fci,lo,ci,cc
from pyscf.scf import ROHF,ROKS,UHF,UKS, addons
import numpy as np
import pandas as pd
df=json.load(open("trail.json"))
charge=0
#Additional states
S=[1,1,1,1,1]
symm_dict=[
{'A1':(5,5),'E1x':(3,3),'E1y':(3,2),'E2x':(1,1),'E2y':(1,1)},
{'A1':(6,5),'E1x':(3,3),'E1y':(2,2),'E2x':(1,1),'E2y':(1,1)}, #(pi -> s)
{'A1':(6,6),'E1x':(3,2),'E1y':(2,2),'E2x':(1,1),'E2y':(1,1)}, #(2pi -> 2s)
{'A1':(5,4),'E1x':(3,3),'E1y':(3,3),'E2x':(1,1),'E2y':(1,1)}, #(z -> pi)
{'A1':(5,5),'E1x':(3,3),'E1y':(3,3),'E2x':(1,1),'E2y':(1,0)}, #(dd -> pi)
]
datacsv={}
datacsv={}
for nm in['run','method','basis','pseudopotential','bond-length','S','E','conv']:
datacsv[nm]=[]
for run in range(len(S)):
for r in [1.725]:
for method in ['B3LYP']:
for basis in ['vdz','vtz']:
for el in ['Cu']:
if(S[run]>0):
molname=el+'O'
mol=gto.Mole()
mol.ecp={}
mol.basis={}
for e in [el,'O']:
mol.ecp[e]=gto.basis.parse_ecp(df[e]['ecp'])
mol.basis[e]=gto.basis.parse(df[e][basis])
mol.charge=charge
mol.spin=S[run]
mol.build(atom="%s 0. 0. 0.; O 0. 0. %g"%(el,r),verbose=4,symmetry=True)
if basis=='vdz':
#These are the orbitals for which we want to read-in an initial DM guess
TM_3s_orbitals = []
TM_4s_orbitals = []
TM_3p_orbitals = []
TM_3d_orbitals = []
O_2s_orbitals = []
O_2p_orbitals = []
aos=mol.ao_labels()
print('')
print('AO labels')
print(aos)
print('')
for i,x in enumerate(aos):
#Find the TM 3s labels
if (('3s' in x) and (el in x)):
TM_3s_orbitals.append(i)
#Find the TM 4s labels
if (('4s' in x) and (el in x)):
TM_4s_orbitals.append(i)
#Find the TM 3p labels
if (('3p' in x) and (el in x)):
TM_3p_orbitals.append(i)
#Find the TM 3d labels
if (('3d' in x) and (el in x)):
TM_3d_orbitals.append(i)
#Find the O 2s labels
if (('2s' in x) and ('O' in x)):
O_2s_orbitals.append(i)
#Find the O 2p labels
if (('2p' in x) and ('O' in x)):
O_2p_orbitals.append(i)
#There should be 5 3d TM orbitals. Let's check this!
assert len(TM_3d_orbitals)==5
##############################################################################################
if("U" in method):
if("HF" in method):
m=UHF(mol)
else:
m=UKS(mol)
m.xc=method[1:]
else:
if(method=="ROHF"):
m=ROHF(mol)
else:
m=ROKS(mol)
m.xc=method
##############################################################################################
dm=np.zeros(m.init_guess_by_minao().shape)
#The 3s is always doubly-occupied for the TM atom
for s in TM_3s_orbitals:
for spin in [0,1]:
dm[spin,s,s]=1
#The 4s is always at least singly-occupied for the TM atom
for s in TM_4s_orbitals:
dm[0,s,s]=1
#Control the 4s double-occupancy
if (el=='Cr'):
for s in TM_4s_orbitals:
print('We are singly filling this 4s-orbital: '+np.str(aos[s]) )
dm[1,s,s]=0
#Always doubly-occupy the 3p orbitals for the TM atom
for p in TM_3p_orbitals:
for s in [0,1]:
dm[s,p,p]=1
#Control the 3d occupancy for CrO...
if (el=='Cr'):
for i,d in enumerate(TM_3d_orbitals):
#These are the 3d orbitals we want to fill to get the correct symmetry
if ( ('xy' in aos[d]) or ('yz' in aos[d]) or ('z^2' in aos[d]) or ('x2-y2' in aos[d]) ):
print('We are singly filling this d-orbital: '+np.str(aos[d]) )
dm[0,d,d]=1
m.chkfile=el+basis+"_r"+str(r)+"_s"+str(S[run])+"_"+method+"_"+str(run)+".chk"
m.irrep_nelec = symm_dict[run]
m.max_cycle=100
m = addons.remove_linear_dep_(m)
m.conv_tol=1e-6
#Only need an initial guess for CrO and CuO...
if (el=='Cr' or el=='Cu'):
total_energy=m.kernel(dm)
else:
total_energy=m.kernel()
#Compute the Mulliken orbital occupancies...
m.analyze()
assert(np.sum(m.mo_occ)==25)
#Once we get past the vdz basis, just read-in the existing chk file...
else:
##############################################################################################
if("U" in method):
if("HF" in method):
m=UHF(mol)
else:
m=UKS(mol)
m.xc=method[1:]
else:
if(method=="ROHF"):
m=ROHF(mol)
else:
m=ROKS(mol)
m.xc=method
##############################################################################################
dm=m.from_chk(el+'vdz'+"_r"+str(r)+"_s"+str(S[run])+"_"+method+"_"+str(run)+".chk")
m.chkfile=el+basis+"_r"+str(r)+"_s"+str(S[run])+"_"+method+"_"+str(run)+".chk"
m.irrep_nelec = symm_dict[run]
m.max_cycle=100
m = addons.remove_linear_dep_(m)
m.conv_tol=1e-6
total_energy=m.kernel(dm)
m.analyze()
assert(np.sum(m.mo_occ)==25)
datacsv['run'].append(run)
datacsv['bond-length'].append(r)
datacsv['S'].append(S[run])
datacsv['method'].append(method)
datacsv['basis'].append(basis)
datacsv['pseudopotential'].append('trail')
datacsv['E'].append(total_energy)
datacsv['conv'].append(m.converged)
pd.DataFrame(datacsv).to_csv("cuo.csv",index=False)
| [
"sapatha2@illinois.edu"
] | sapatha2@illinois.edu |
c4cdc5f902e454c3555367530422548c79313419 | 319d66c48f51e3d98e9df953d406a6f545b72363 | /Python/TwoStrings.py | 93cdab9452eab2fbcc476f7b088f5e8746fc5d76 | [
"Apache-2.0"
] | permissive | WinrichSy/HackerRank-Solutions | 291bc7a32dc4d9569d7028d6d665e86869fbf952 | ed928de50cbbbdf0aee471630f6c04f9a0f69a1f | refs/heads/master | 2022-07-18T15:43:48.865714 | 2020-05-16T00:21:56 | 2020-05-16T00:21:56 | 255,453,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | #Two Strings
#https://www.hackerrank.com/challenges/two-strings/problem
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the twoStrings function below.
def twoStrings(s1, s2):
s1_set = list(set(s1))
s2_set = list(set(s2))
for i in s1_set:
if i in s2_set:
return 'YES'
return 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
s1 = input()
s2 = input()
result = twoStrings(s1, s2)
fptr.write(result + '\n')
fptr.close()
| [
"winrichsy@gmail.com"
] | winrichsy@gmail.com |
970a27ad5a6f180d6f567b10a24309540a2f364f | 45de3aa97525713e3a452c18dcabe61ac9cf0877 | /src/secondaires/familier/types/barre_attache.py | 697fc617228ad33fb007e25b34c48b074ec5d852 | [
"BSD-3-Clause"
] | permissive | stormi/tsunami | 95a6da188eadea3620c70f7028f32806ee2ec0d1 | bdc853229834b52b2ee8ed54a3161a1a3133d926 | refs/heads/master | 2020-12-26T04:27:13.578652 | 2015-11-17T21:32:38 | 2015-11-17T21:32:38 | 25,606,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | py | # -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le type barre d'attache."""
from primaires.interpreteur.editeur.entier import Entier
from primaires.objet.types.base import BaseType
class BarreAttache(BaseType):
"""Type d'objet: barre d'attache."""
nom_type = "barre d'attache"
def __init__(self, cle=""):
"""Constructeur de l'objet"""
BaseType.__init__(self, cle)
self.nb_places = 1
self.etendre_editeur("b", "nombre de places", Entier,
self, "nb_places")
def travailler_enveloppes(self, enveloppes):
"""Travail sur les enveloppes"""
nb = enveloppes["b"]
nb.apercu = "{objet.nb_places}"
nb.prompt = "Nombre de places disponibles sur cette barre " \
"d'attache : "
nb.aide_courte = \
"Entrez le |ent|nombre de places|ff| disponibles sur cette " \
"barre d'attache\nou |cmd|/|ff| pour revenir à la fenêtre " \
"parente.\n\nNombre de places actuel : {objet.nb_places}"
| [
"kredh@free.fr"
] | kredh@free.fr |
7be2abeab2df7f1d5bc04459eee30d4129b87489 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-fractionDigits-1-1.py | 8d454b0ee6aa32933dfcdcb7ea28358723d58032 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 298 | py | from output.models.nist_data.atomic.integer.schema_instance.nistschema_sv_iv_atomic_integer_fraction_digits_1_xsd.nistschema_sv_iv_atomic_integer_fraction_digits_1 import NistschemaSvIvAtomicIntegerFractionDigits1
obj = NistschemaSvIvAtomicIntegerFractionDigits1(
value=-999999999999999999
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
28c4653058cf8e268d6f9df167b0f7f1436718d3 | 5f9ebed60f6f2568b7c4a34505ff9e36b77968f3 | /figure_paper_xpol_composites.py | 7ef77c6a90cb4f682d27bf7c45b258a63c37460c | [] | no_license | rvalenzuelar/xpol_vis | 3497b990694f033c711b7a4e2f0c199d46567efd | 181fd831afaafa7e7018a2425b0ee9b2a820f649 | refs/heads/master | 2020-04-05T14:09:37.028300 | 2018-04-26T20:20:32 | 2018-04-26T20:20:32 | 50,193,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,449 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 22 11:50:10 2016
@author: raul
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import xpol_tta_analysis as xta
import numpy as np
import mpl_toolkits.axisartist as AA
import matplotlib as mpl
from matplotlib.gridspec import GridSpecFromSubplotSpec as gssp
from rvtools import add_colorbar
mpl.rcParams['font.size'] = 15
'''
use:
%run -i figure_paper_xpol_composites.py
if instances do not exist in iPython namespace
then create them
'''
# params = dict(wdir_surf=130,wdir_wprof=170,
# rain_czd=0.25,nhours=2)
params = dict(wdir_thres=150,
rain_czd=0.25,
nhours=2
)
try:
xall
except NameError:
''' 02Feb04 (12) is excluded for RHIs only internally in
the process method
'''
xall = xta.process(case=[8, 9, 10, 11, 12, 13, 14], params=params)
scale = 1.2
fig = plt.figure(figsize=(7.5*scale, 11*scale))
gs0 = gridspec.GridSpec(1, 2,
wspace=0.01)
height_ratios = [2.5, 1, 2.5, 1]
gs00 = gssp(4, 1,
subplot_spec=gs0[0],
height_ratios=height_ratios,
hspace=0)
gs01 = gssp(4, 1,
subplot_spec=gs0[1],
height_ratios=height_ratios,
hspace=0)
ax0 = plt.subplot(gs00[0],gid='(a)')
ax1 = plt.subplot(gs01[0],gid='(b)')
ax2 = plt.subplot(gs00[1],gid='(c)')
ax3 = plt.subplot(gs01[1],gid='(d)')
ax4 = plt.subplot(gs00[2],gid='(e)')
ax5 = plt.subplot(gs01[2],gid='(f)')
ax6 = plt.subplot(gs00[3],gid='(g)')
ax7 = plt.subplot(gs01[3],gid='(h)')
axes = [ax0, ax1, ax2, ax3, ax4, ax5, ax6, ax7]
cvalues1 = range(-30,34,4)
cvalues2 = range(0,32,2)
ax0.text(0.5, 1.05, 'TTA',transform=ax0.transAxes,
fontsize=15,weight='bold')
ax1.text(0.5, 1.05, 'NO-TTA',transform=ax1.transAxes,
ha='center',fontsize=15,weight='bold')
xall.plot(ax=ax0, name='contourf',mode='ppi',target='vr',
cbar=dict(loc='right',invisible=True),
terrain=True,bmap=True,qc=True,
cvalues=cvalues1)
xall.plot(ax=ax1, name='contourf',mode='ppi',target='vr',
cbar=dict(loc='right',label='[m/s]'),
cvalues=cvalues1,
terrain=True,bmap=True,qc=True,
tta=False)
xall.plot(ax=ax2, name='contourf',mode='rhi',target='vr',
cbar=dict(loc='right',invisible=True),
cvalues=cvalues2,
qc=True,
xticklabs=False)
xall.plot(ax=ax3, name='contourf',mode='rhi',target='vr',
cbar=dict(loc='right',label='[m/s]',labelpad=13),
cvalues=cvalues2,
xticklabs=False,
yticklabs=False,
qc=True,
tta=False)
xall.plot(ax=ax4, name='contourf',mode='ppi',target='z',
cbar=dict(loc='right',invisible=True),
terrain=True,bmap=True,qc=True,
sector=range(135,180),
cvalues=cvalues1)
hdtm = xall.plot(ax=ax5, name='contourf',mode='ppi',target='z',
cbar=dict(loc='right',label='[%]'),
cvalues=cvalues1,
terrain=True,bmap=True,qc=True,
sector=range(135,180),
tta=False)
xall.plot(ax=ax6, name='contourf',mode='rhi',target='z',
cbar=dict(loc='right',invisible=True),
qc=True,
cvalues=cvalues2)
xall.plot(ax=ax7, name='contourf',mode='rhi',target='z',
cbar=dict(loc='right',invisible=True),
cvalues=cvalues2,
yticklabs=False,
qc=True,
tta=False)
''' add axis id '''
for ax in axes:
gid = ax.get_gid()
if gid in ['(a)','(b)','(e)','(f)']:
ax.text(0.9,0.93,gid,size=14,
weight='bold',
transform=ax.transAxes,
color='w')
else:
ax.text(0.9,0.82,gid,size=14,
weight='bold',
transform=ax.transAxes)
if gid in ['(c)','(d)']:
ax.set_xlabel('')
''' make floating axis colorbar for terrain '''
# [left, bott, wid, hgt]
axaa = AA.Axes(fig,[-0.38,0.74,0.5,0.1])
axaa.tick_params(labelsize=25)
add_colorbar(axaa,hdtm,label='',
ticks=range(0,1001,1000),
ticklabels=['0','1.0'])
fig.add_axes(axaa)
axaa.remove() # leave only colorbar
ax0.text(-0.15, 0.93,'[km]',transform=ax0.transAxes)
''' add PPI arrows '''
def arrow_end(st_co,r,az):
en_co=[st_co[0],st_co[1]]
en_co[0]+=r*np.sin(np.radians(az))
en_co[1]+=r*np.cos(np.radians(az))
return (en_co[0],en_co[1])
tta_arrows={'arrow1':{'c0':(140,115),'az':300},
'arrow2':{'c0':(120,98),'az':325},
'arrow3':{'c0':(90,93),'az':340},
'arrow4':{'c0':(60,98),'az':345},
'arrow5':{'c0':(35,105),'az':350},
'arrow6':{'c0':(15,115),'az':355},
}
ntta_arrows={'arrow1':{'c0':(130,115),'az':335},
'arrow2':{'c0':(105,112),'az':350},
'arrow3':{'c0':(80,115),'az':0},
'arrow4':{'c0':(55,123),'az':5},
'arrow5':{'c0':(35,130),'az':5},
'arrow6':{'c0':(15,140),'az':10},
}
# scale = 4.1 # use for output figure
# #scale = 1.0 # use for iPython figure
# length = 30
# arrows=[tta_arrows,ntta_arrows]
# axes = [axes[0],axes[1]]
# for ax,arrow in zip(axes,arrows):
# for _,arr in arrow.iteritems():
# c0 = tuple(v*scale for v in arr['c0'])
# az = arr['az']
# ax.annotate("",
# xy = arrow_end(c0,length*scale,az),
# xytext = c0,
# xycoords = 'axes pixels',
# textcoords = 'axes pixels',
# zorder = 1,
# arrowprops = dict(shrinkA=6,
# shrinkB=6,
# fc='w',
# ec='k',
# lw=1))
#
# ''' single arrows '''
# c0 = tuple(v*scale for v in (145,34))
# axes[0].annotate("",
# xy = arrow_end(c0,length*scale,355),
# xytext = c0,
# xycoords = 'axes pixels',
# textcoords = 'axes pixels',
# zorder = 1,
# arrowprops = dict(shrinkA=6,
# shrinkB=6,
# fc='w',
# ec='k',
# lw=1))
# c0 = tuple(v*scale for v in (140,67))
# axes[1].annotate("",
# xy = arrow_end(c0,length*scale,0),
# xytext = c0,
# xycoords = 'axes pixels',
# textcoords = 'axes pixels',
# zorder = 1,
# arrowprops = dict(shrinkA=6,
# shrinkB=6,
# fc='w',
# ec='k',
# lw=1))
#
# ''' add RHI arrow '''
# ax2.annotate("",
# xy = (150*scale, 25*scale),
# xytext = (25*scale,3*scale),
# xycoords='axes pixels',
# textcoords='axes pixels',
# arrowprops=dict(shrinkA=5,
# shrinkB=5,
# fc="w", ec="k",
# connectionstyle="arc3,rad=-0.1"))
plt.show()
# fname='/home/raul/Desktop/fig_xpol_composite.png'
# plt.savefig(fname, dpi=300, format='png',papertype='letter',
# bbox_inches='tight')
| [
"raul.valenzuela@colorado.edu"
] | raul.valenzuela@colorado.edu |
14ae6c673eef938af1474c912a457a9e27db17f9 | 2cb3447b55b3a298ba744f2fe67aaae16938c66d | /xmldirector/plonecore/demo/xmldocument.py | 36c34ddcd099b1a6dd349501b5b31e08ddfeddd3 | [] | no_license | tecumsehmaverick/xmldirector.plonecore | e2bc8fe0c065f73bd27438184cf4552c2df668e8 | 30f28878d6de1ffd1baf2fa0e6d903a7da204c7b | refs/heads/master | 2020-12-28T20:30:29.720830 | 2015-01-23T17:01:34 | 2015-01-23T17:01:34 | 29,760,631 | 0 | 1 | null | 2015-01-24T01:01:02 | 2015-01-24T01:01:02 | null | UTF-8 | Python | false | false | 1,337 | py | # -*- coding: utf-8 -*-
################################################################
# xmldirector.plonecore
# (C) 2014, Andreas Jung, www.zopyx.com, Tuebingen, Germany
################################################################
"""
A sample Dexterity content-type implementation using
all XML field types.
"""
from zope.interface import implements
from plone.dexterity.content import Item
from plone.supermodel import model
from xmldirector.plonecore.i18n import MessageFactory as _
from xmldirector.plonecore.dx import dexterity_base
from xmldirector.plonecore.dx.xml_binary import XMLBinary
from xmldirector.plonecore.dx.xml_image import XMLImage
from xmldirector.plonecore.dx.xml_field import XMLText
from xmldirector.plonecore.dx.xpath_field import XMLXPath
class IXMLDocument(model.Schema):
xml_content = XMLText(
title=_(u'XML Content'),
required=False
)
xml_xpath = XMLXPath(
title=_(u'XML XPath expression'),
description=_(u'Format: field=<fieldname>,xpath=<xpath expression>'),
required=False
)
xml_binary = XMLBinary(
title=_(u'XML Binary'),
required=False
)
xml_image = XMLImage(
title=_(u'XML Image'),
required=False
)
class XMLDocument(Item, dexterity_base.Mixin):
implements(IXMLDocument)
| [
"yet@gmx.de"
] | yet@gmx.de |
8dc8c99c234c8352a74622536467e1147f3e3197 | 474c7eab287cb3ebd3788a1cac72a6dffa9941bc | /Leetcode/ana.py | 5747471217ef057955d52e3a9d2c7d38e609badf | [] | no_license | shenlant314/Reptile | bf85b7d8e19defa65a6a30140732bf37222e98da | 42f5ea8681f8c477de9db109e9a0d5dba2dfccae | refs/heads/master | 2023-04-17T06:07:52.455713 | 2021-04-19T23:39:02 | 2021-04-19T23:39:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,558 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 4 20:54:36 2020
@author: Lee
"""
from pymongo import MongoClient
import pandas as pd
from pyecharts import options as opts
from pyecharts.charts import Geo,Line,WordCloud,Pie,Parallel,PictorialBar,Bar,Polar
from pyecharts.globals import ChartType, SymbolType
pd.set_option('display.max_rows', 20)
pd.set_option('display.max_columns', None)
db=MongoClient().leetcode
data=pd.DataFrame(list(db['2'].find()))
data=data.drop(columns='_id',axis=0)
data['acRate']=data['acRate'].apply(lambda x:float(x[:-1]))
# 最多喜欢
like=data.sort_values(by="likes",ascending=False)
print(like)
bar1=(
Bar()
.add_xaxis(list(like['title'])[:10])
.add_yaxis("", list(like['likes'])[:10], category_gap="50%")
.set_global_opts(title_opts=opts.TitleOpts(title="Leetcode点赞最多的十道题"),
xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=-15,font_weight='bold')))
)
bar1.render('Leetcode点赞最多的十道题.html')
# 最高ac率
print(data.sort_values(by="acRate",ascending=False))
# 最多提交
print(data.sort_values(by="totalSubmissionRaw",ascending=False))
# 难度分类
data_diff=data.groupby('difficulty').mean()
print(data_diff)
data_diff=data.groupby('difficulty').count()
print(data_diff)
data_diff=data.groupby('difficulty')
for a,b in data_diff:
print(b.sort_values(by="likes",ascending=False))
print(b.sort_values(by="acRate",ascending=False))
print(b.sort_values(by="totalSubmissionRaw",ascending=False)) | [
"870407139@qq.com"
] | 870407139@qq.com |
e95e5d6df88dae462e0f7709393296bff7e29fa5 | 9d131148c2189f73e63b76c7a097cd4cd64ee18f | /analyze.py | 1e3e75212c4eaa4f94c68423f94ae41c15889a9d | [] | no_license | osmmaz/Twitter_Analytics | 2e6f51b5f6be58d4dd0d6474c9a1b309b85cb099 | 829a27a038a5ff8270378cace4bfaa4b6a778bb9 | refs/heads/master | 2020-04-15T00:43:18.497042 | 2017-08-26T14:55:46 | 2017-08-26T14:55:46 | 164,250,306 | 1 | 0 | null | 2019-01-05T20:51:56 | 2019-01-05T20:51:56 | null | UTF-8 | Python | false | false | 5,441 | py | '''
Author: Adil Moujahid
Description: Script for analyzing tweets to compare the popularity of 3 programming languages: Python, Javascript and ruby
Reference: http://adilmoujahid.com/posts/2014/07/twitter-analytics/
'''
import json
import pandas as pd
import matplotlib.pyplot as plt
import re
def word_in_text(word, text):
word = word.lower()
text = text.lower()
match = re.search(word, text)
if match:
return True
return False
def extract_link(text):
regex = r'https?://[^\s<>"]+|www\.[^\s<>"]+'
match = re.search(regex, text)
if match:
return match.group()
return ''
def main():
# Reading Tweets
print('Reading Tweets\n')
tweets_data_path = 'output.txt'
tweets_data = []
tweets_file = open(tweets_data_path, "r")
for line in tweets_file:
try:
tweet = json.loads(line)
tweets_data.append(tweet)
except:
continue
# Structuring Tweets
print('Structuring Tweets\n')
tweets = pd.DataFrame()
tweets['text'] = list([tweet['text'] for tweet in tweets_data])
tweets['lang'] = list([tweet['lang'] for tweet in tweets_data])
tweets['country'] = list([tweet['place']['country'] if tweet['place'] != None else None for tweet in tweets_data])
# Analyzing Tweets by Language
print('Analyzing tweets by language\n')
tweets_by_lang = tweets['lang'].value_counts()
fig, ax = plt.subplots()
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=10)
ax.set_xlabel('Languages', fontsize=15)
ax.set_ylabel('Number of tweets', fontsize=15)
ax.set_title('Top 5 languages', fontsize=15, fontweight='bold')
tweets_by_lang[:5].plot(ax=ax, kind='bar', color='red')
plt.savefig('tweet_by_lang', format='png')
# Analyzing Tweets by Country
print('Analyzing tweets by country\n')
tweets_by_country = tweets['country'].value_counts()
fig, ax = plt.subplots()
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=10)
ax.set_xlabel('Countries', fontsize=15)
ax.set_ylabel('Number of tweets', fontsize=15)
ax.set_title('Top 5 countries', fontsize=15, fontweight='bold')
tweets_by_country[:5].plot(ax=ax, kind='bar', color='blue')
plt.savefig('tweet_by_country', format='png')
# Adding programming languages columns to the tweets DataFrame
print('Adding programming languages tags to the data\n')
tweets['python'] = tweets['text'].apply(lambda tweet: word_in_text('python', tweet))
tweets['javascript'] = tweets['text'].apply(lambda tweet: word_in_text('javascript', tweet))
tweets['ruby'] = tweets['text'].apply(lambda tweet: word_in_text('ruby', tweet))
# Analyzing Tweets by programming language: First attempt
print('Analyzing tweets by programming language: First attempt\n')
prg_langs = ['python', 'javascript', 'ruby']
tweets_by_prg_lang = [tweets['python'].value_counts()[True], tweets['javascript'].value_counts()[True],
tweets['ruby'].value_counts()[True]]
x_pos = list(range(len(prg_langs)))
width = 0.8
fig, ax = plt.subplots()
plt.bar(x_pos, tweets_by_prg_lang, width, alpha=1, color='g')
ax.set_ylabel('Number of tweets', fontsize=15)
ax.set_title('Ranking: python vs. javascript vs. ruby (Raw data)', fontsize=10, fontweight='bold')
ax.set_xticks([p + 0.4 * width for p in x_pos])
ax.set_xticklabels(prg_langs)
plt.grid()
plt.savefig('tweet_by_prg_language_1', format='png')
# Targeting relevant tweets
print('Targeting relevant tweets\n')
tweets['programming'] = tweets['text'].apply(lambda tweet: word_in_text('programming', tweet))
tweets['tutorial'] = tweets['text'].apply(lambda tweet: word_in_text('tutorial', tweet))
tweets['relevant'] = tweets['text'].apply(
lambda tweet: word_in_text('programming', tweet) or word_in_text('tutorial', tweet))
# Analyzing Tweets by programming language: Second attempt
print('Analyzing tweets by programming language: First attempt\n')
import IPython
IPython.embed()
def get_value_counts(tweets, language):
try:
return tweets[tweets['relevant'] == True][language].value_counts()[True]
except KeyError:
return 0
tweets_by_prg_lang = [get_value_counts(tweets, 'python'),
get_value_counts(tweets, 'javascript'),
get_value_counts(tweets, 'ruby')]
x_pos = list(range(len(prg_langs)))
width = 0.8
fig, ax = plt.subplots()
plt.bar(x_pos, tweets_by_prg_lang, width, alpha=1, color='g')
ax.set_ylabel('Number of tweets', fontsize=15)
ax.set_title('Ranking: python vs. javascript vs. ruby (Relevant data)', fontsize=10, fontweight='bold')
ax.set_xticks([p + 0.4 * width for p in x_pos])
ax.set_xticklabels(prg_langs)
plt.grid()
plt.savefig('tweet_by_prg_language_2', format='png')
# Extracting Links
tweets['link'] = tweets['text'].apply(lambda tweet: extract_link(tweet))
tweets_relevant = tweets[tweets['relevant'] == True]
tweets_relevant_with_link = tweets_relevant[tweets_relevant['link'] != '']
print('\nBelow are some Python links that we extracted\n')
print(tweets_relevant_with_link[tweets_relevant_with_link['python'] == True]['link'].head())
if __name__ == '__main__':
main()
| [
"bossi.ernestog@gmail.com"
] | bossi.ernestog@gmail.com |
32f0ebd68a4cf905217afbd34ce240f7c6c03b8e | 5a42ce780721294d113335712d45c62a88725109 | /project/graphdata/module/yiyiyuan/model/yi_loan_flows.py | df4e31090b6b58f8024d0997f2403261bc3c5356 | [] | no_license | P79N6A/project_code | d2a933d53deb0b4e0bcba97834de009e7bb78ad0 | 1b0e863ff3977471f5a94ef7d990796a9e9669c4 | refs/heads/master | 2020-04-16T02:06:57.317540 | 2019-01-11T07:02:05 | 2019-01-11T07:02:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | # -*- coding: utf-8 -*-
# 注意这里使用了阿里云本地库的通讯录
# sqlacodegen mysql://root:123!@#@127.0.0.1/xhh_test --outfile yyy.py --flask
from lib.application import db
from .base_model import BaseModel
class YiUserLoanFlow(db.Model, BaseModel):
__bind_key__ = 'own_yiyiyuan'
__tablename__ = 'yi_user_loan_flows'
id = db.Column(db.Integer, primary_key=True)
loan_id = db.Column(db.Integer, nullable=False)
admin_id = db.Column(db.Integer, nullable=False)
loan_status = db.Column(db.Integer)
relative = db.Column(db.String(1024))
reason = db.Column(db.String(1024))
create_time = db.Column(db.DateTime)
admin_name = db.Column(db.String(64))
type = db.Column(db.Integer, server_default=db.FetchedValue()) | [
"wangyongqiang@ihsmf.com"
] | wangyongqiang@ihsmf.com |
b706a298261f243da96e72be47215ae11ce4a86f | 4a9dada02c749e9e5277fe1e35357d7b2b28ad5c | /顾天媛2018010980/操作系统实验/作业2.py | 780d1e608f2afcdbef5a6f093500ede8eb25ba41 | [] | no_license | wanghan79/2020_Option_System | 631cc80f52829390a128a86677de527472470348 | f37b870614edf7d85320da197d932df2f25a5720 | refs/heads/master | 2021-01-09T13:10:05.630685 | 2020-07-10T03:30:39 | 2020-07-10T03:30:39 | 242,312,271 | 13 | 9 | null | 2020-07-04T16:13:11 | 2020-02-22T09:12:56 | Python | UTF-8 | Python | false | false | 308 | py | # !/usr/bin/python3
# -*- coding: UTF-8 -*-
"""
Author: Ty.Gu
Purpose: os.system
Created: 24/6/2020
"""
# 作业2. 采用python语言实现windows命令行调用;提示:采用Python内置工具包os.system
import os
os.system('cd D:\\QQ & mkdir test' )
a = os.system('python 作业1.py')
print(a)
| [
"noreply@github.com"
] | wanghan79.noreply@github.com |
111386fe4bfdf0ed187f70cc4815af53d87d2cf4 | 0274c84e6bf546a325ba2a981426a9cad33cdcfc | /pycoin/ecdsa/Curve.py | c022efb03779d2035b240e74c8c42f2d5b4d640b | [
"MIT"
] | permissive | jaschadub/pycoin | 2cc646461415d68b69ae5a0c4496c6a6b37740e2 | 1e8d0d9fe20ce0347b97847bb529cd1bd84c7442 | refs/heads/master | 2021-07-05T05:31:47.734966 | 2019-03-16T18:33:47 | 2019-03-16T18:33:47 | 140,237,273 | 0 | 0 | MIT | 2019-03-16T18:41:54 | 2018-07-09T05:51:01 | Python | UTF-8 | Python | false | false | 5,076 | py | # Adapted from code written in 2005 by Peter Pearson and placed in the public domain.
from .Point import Point
def _leftmost_bit(x):
# this is closer to constant time than bit-twiddling hacks like those in
# https://graphics.stanford.edu/~seander/bithacks.html
assert x > 0
result = 1
while result <= x:
result <<= 1
return result >> 1
class Curve(object):
"""
This class implements an `Elliptic curve <https://en.wikipedia.org/wiki/Elliptic_curve>`_ intended
for use in `Elliptic curve cryptography <https://en.wikipedia.org/wiki/Elliptic-curve_cryptography>`_
An elliptic curve ``EC<p, a, b>`` for a (usually large) prime p and integers a and b is a
`group <https://en.wikipedia.org/wiki/Group_(mathematics)>`_. The members of the group are
(x, y) points (where x and y are integers over the field of integers modulo p) that satisfy the relation
``y**2 = x**3 + a*x + b (mod p)``. There is a group operation ``+`` and an extra point known
as the "point at infinity" thrown in to act as the identity for the group.
The group operation is a marvelous thing but unfortunately this margin is too narrow to contain
a description of it, so please refer to the links above for more information.
:param p: a prime
:param a: an integer coefficient
:param b: an integer constant
:param order: (optional) the order of the group made up by the points on the
curve. Any point on the curve times the order is the identity for this
group (the point at infinity). Although this is optional, it's required
for some operations.
"""
def __init__(self, p, a, b, order=None):
"""
"""
self._p = p
self._a = a
self._b = b
self._order = order
self._infinity = Point(None, None, self)
def p(self):
"""
:returns: the prime modulus of the curve.
"""
return self._p
def order(self):
"""
:returns: the order of the curve.
"""
return self._order
def infinity(self):
""":returns: the "point at infinity" (also known as 0, or the identity)."""
return self._infinity
def contains_point(self, x, y):
"""
:param x: x coordinate of a point
:param y: y coordinate of a point
:returns: True if the point (x, y) is on the curve, False otherwise
"""
if x is None and y is None:
return True
return (y * y - (x * x * x + self._a * x + self._b)) % self._p == 0
def add(self, p0, p1):
"""
:param p0: a point
:param p1: a point
:returns: the sum of the two points
"""
p = self._p
infinity = self._infinity
if p0 == infinity:
return p1
if p1 == infinity:
return p0
x0, y0 = p0
x1, y1 = p1
if (x0 - x1) % p == 0:
if (y0 + y1) % p == 0:
return infinity
else:
slope = ((3 * x0 * x0 + self._a) * self.inverse_mod(2 * y0, p)) % p
else:
slope = ((y1 - y0) * self.inverse_mod(x1 - x0, p)) % p
x3 = (slope * slope - x0 - x1) % p
y3 = (slope * (x0 - x3) - y0) % p
return self.Point(x3, y3)
def multiply(self, p, e):
"""
multiply a point by an integer.
:param p: a point
:param e: an integer
:returns: the result, equivalent to adding p to itself e times
"""
if self._order:
e %= self._order
if p == self._infinity or e == 0:
return self._infinity
e3 = 3 * e
i = _leftmost_bit(e3) >> 1
result = p
while i > 1:
result += result
if (e3 & i):
v = [result, result+p]
else:
v = [result-p, result]
result = v[0 if (e & i) else 1]
i >>= 1
return result
def inverse_mod(self, a, m):
"""
:param a: an integer
:param m: another integer
:returns: the value ``b`` such that ``a * b == 1 (mod m)``
"""
if a < 0 or m <= a:
a = a % m
# From Ferguson and Schneier, roughly:
c, d = a, m
uc, vc, ud, vd = 1, 0, 0, 1
while c != 0:
q, c, d = divmod(d, c) + (c,)
uc, vc, ud, vd = ud - q*uc, vd - q*vc, uc, vc
# At this point, d is the GCD, and ud*a+vd*m = d.
# If d == 1, this means that ud is a inverse.
assert d == 1
if ud > 0:
return ud
else:
return ud + m
def Point(self, x, y):
"""
:returns: a :class:`Point <.Point>` object with coordinates ``(x, y)``
"""
return Point(x, y, self)
def __repr__(self):
return '{}({!r},{!r},{!r})'.format(self.__class__.__name__, self._p, self._a, self._b)
def __str__(self):
return 'y^2 = x^3 + {}*x + {} (mod {})'.format(self._a, self._b, self._p)
| [
"him@richardkiss.com"
] | him@richardkiss.com |
67e7af372c2ef8efcd69e1cb3b3609ff36a04e9b | ed491b7539193c30ffefcc52af276f77fc98f979 | /train_LSTM.py | bf356af65321d25c10d22c36291f2a455380ec77 | [
"Apache-2.0"
] | permissive | FredHuang16/cnn-lstm-bilstm-deepcnn-clstm-in-pytorch | fb5dcb345379deadc632a43f73a53c436a02ea42 | 8ef5b1321cf79063ee4b146c7502a31815e9f33b | refs/heads/master | 2021-01-16T00:46:33.781766 | 2017-08-10T06:51:21 | 2017-08-10T06:51:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,640 | py | import os
import sys
import torch
import torch.autograd as autograd
import torch.nn.functional as F
import torch.nn.utils as utils
import random
import shutil
random.seed(336)
torch.manual_seed(1234)
def train(train_iter, dev_iter, test_iter, model, args):
if args.cuda:
model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
# optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
steps = 0
model_count = 0
model.train()
for epoch in range(1, args.epochs+1):
print("## 第{} 轮迭代,共计迭代 {} 次 !##".format(epoch, args.epochs))
# the attr of shuffle in train_iter haved initialed True during data.Iterator.splits()
for batch in train_iter:
feature, target = batch.text, batch.label.data.sub_(1)
target =autograd.Variable(target)
if args.cuda:
feature, target = feature.cuda(), target.cuda()
optimizer.zero_grad()
# model.zero_grad()
model.hidden = model.init_hidden(args.lstm_num_layers, args.batch_size)
if feature.size(1) != args.batch_size:
model.hidden = model.init_hidden(args.lstm_num_layers, feature.size(1))
logit = model(feature)
# target values >=0 <=C - 1 (C = args.class_num)
loss = F.cross_entropy(logit, target)
loss.backward()
# prevent grads boom
# utils.clip_grad_norm(model.parameters(), args.max_norm)
# the up line will make overfitting quickly, however, the line slow the overfitting,
# so,the speed of overfitting depend on the max_norm size, more big, moe quickly
utils.clip_grad_norm(model.parameters(), max_norm=1e-4)
optimizer.step()
steps += 1
if steps % args.log_interval == 0:
train_size = len(train_iter.dataset)
corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()
accuracy = float(corrects)/batch.batch_size * 100.0
sys.stdout.write(
'\rBatch[{}/{}] - loss: {:.6f} acc: {:.4f}%({}/{})'.format(steps,
train_size,
loss.data[0],
accuracy,
corrects,
batch.batch_size))
if steps % args.test_interval == 0:
eval(dev_iter, model, args)
if steps % args.save_interval == 0:
if not os.path.isdir(args.save_dir): os.makedirs(args.save_dir)
save_prefix = os.path.join(args.save_dir, 'snapshot')
save_path = '{}_steps{}.pt'.format(save_prefix, steps)
torch.save(model.state_dict(), save_path)
test_eval(test_iter, model, save_path, args)
model_count += 1
print("model_count \n", model_count)
return model_count
def eval(data_iter, model, args):
model.eval()
corrects, avg_loss = 0, 0
for batch in data_iter:
feature, target = batch.text, batch.label.data.sub_(1)
target = autograd.Variable(target)
if args.cuda:
feature, target = feature.cuda(), target.cuda()
model.hidden = model.init_hidden(args.lstm_num_layers, batch.batch_size)
logit = model(feature)
loss = F.cross_entropy(logit, target, size_average=False)
avg_loss += loss.data[0]
corrects += (torch.max(logit, 1)
[1].view(target.size()).data == target.data).sum()
size = len(data_iter.dataset)
# avg_loss = loss.data[0]/size
avg_loss = float(avg_loss)/size
accuracy = float(corrects)/size * 100.0
model.train()
print('\nEvaluation - loss: {:.6f} acc: {:.4f}%({}/{}) \n'.format(avg_loss,
accuracy,
corrects,
size))
def test_eval(data_iter, model, save_path, args):
model.eval()
corrects, avg_loss = 0, 0
for batch in data_iter:
feature, target = batch.text, batch.label.data.sub_(1)
target = autograd.Variable(target)
if args.cuda:
feature, target = feature.cuda(), target.cuda()
model.hidden = model.init_hidden(args.lstm_num_layers, batch.batch_size)
logit = model(feature)
loss = F.cross_entropy(logit, target, size_average=False)
avg_loss += loss.data[0]
corrects += (torch.max(logit, 1)
[1].view(target.size()).data == target.data).sum()
size = len(data_iter.dataset)
# avg_loss = loss.data[0]/size
avg_loss = float(avg_loss) / size
accuracy = float(corrects)/size * 100.0
model.train()
print('\nEvaluation - loss: {:.6f} acc: {:.4f}%({}/{}) \n'.format(avg_loss,
accuracy,
corrects,
size))
# test result
if os.path.exists("./Test_Result.txt"):
file = open("./Test_Result.txt", "a")
else:
file = open("./Test_Result.txt", "w")
file.write("model " + save_path + "\n")
file.write("Evaluation - loss: {:.6f} acc: {:.4f}%({}/{}) \n".format(avg_loss,
accuracy,
corrects,
size))
file.write("\n")
file.close()
shutil.copy("./Test_Result.txt", "./snapshot/" + args.mulu + "/Test_Result.txt")
def predict(text, model, text_field, label_feild):
assert isinstance(text, str)
model.eval()
text = text_field.tokenize(text)
text = text_field.preprocess(text)
text = [[text_field.vocab.stoi[x] for x in text]]
x = text_field.tensor_type(text)
x = autograd.Variable(x, volatile=True)
print(x)
output = model(x)
_, predicted = torch.max(output, 1)
return label_feild.vocab.itos[predicted.data[0][0]+1]
| [
"bamtercelboo@163.com"
] | bamtercelboo@163.com |
da2e3cb7bc9bcca1f4ca59c793fb86ef425b338d | 0b2833b1b129d72ff805e3d775df00f79f421bfb | /parse_noise.py | 53bbdcb73f13007c68694e2313de58134e3b8308 | [] | no_license | xssChauhan/torino-noise | 95f2d8b8c9937c3135221f53da990716c010be38 | 10d58bdea11dc0a716e22ea325b901a4b6b59191 | refs/heads/master | 2023-05-31T16:52:34.217809 | 2021-06-27T19:56:43 | 2021-06-27T19:56:43 | 380,284,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | import pandas as pd
import numpy as np
from datetime import datetime
from datetime import time
def read_noise_csv(file):
df = pd.read_csv(file, delimiter=";", skiprows=list(range(8)))
return df
def combine_date_hour(row):
date = row["Data"]
hour = row["Ora"]
parsed_date = datetime.strptime(
date, "%d-%m-%Y"
)
parsed_hour = datetime.strptime(
hour, "%H:%M"
).time()
combined_date = datetime.combine(
parsed_date, parsed_hour
)
return combined_date
def convert_float(num):
if isinstance(num, str):
return float(num.replace(",",".")) | [
"shikhar.chauhan@live.com"
] | shikhar.chauhan@live.com |
90734113d3bb75eb87aa511a4584a9e3234f7918 | 625d113273cf9bd6474e8ac12cd8afe2e4f8d50b | /Disposable Teleports.py | 9147fab6c0a205e64c33d3d93de55a5efabb3f8e | [] | no_license | hrvach/CheckiO | 9e4b04cf93c8f4da7bd61b55a3cf9bdcaea0836f | d25bbdb3b33441f45f4b587fd2a651c5c086484a | refs/heads/master | 2020-03-25T02:25:57.273381 | 2018-08-02T12:13:28 | 2018-08-02T12:13:28 | 143,289,238 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | def checkio(teleports_string):
teleports, paths = {frozenset(i) for i in teleports_string.split(',')}, []
def walk(station, links, path):
if len(set(path)) == 8 and station == '1':
paths.append(path+station)
for link in filter(lambda x: station in x, links):
walk(next(iter(link-{station})), links-{link}, path+station)
walk('1', teleports, '')
return min(paths, key=len) | [
"github@hrvoje.org"
] | github@hrvoje.org |
60fe823701b35b1c4581a56117b8f31fd2dd832e | 7d6bc1812e5ab4f55c53ae168a6f2317b35e037c | /l10n_cl_hr_payroll/model/hr_bonus.py | 2485352eca5af3bda8f1f15699506ec45a91e6c5 | [] | no_license | suningwz/ODOO13-3 | f172c2e16d2d3310f7c0cf88ff5b0fb5f1e1d15c | 527732e22807be0d6a692ab808b74794cb027914 | refs/heads/master | 2022-12-15T10:19:51.328186 | 2020-09-17T22:16:14 | 2020-09-17T22:16:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,491 | py | from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
RO_STATES = {'draft': [('readonly', False)]}
class HrBonusSheet(models.Model):
_name = 'hr.bonus.sheet'
_description = 'Hoja de Bonos RRHH'
_inherit = ['mail.thread']
_order = 'date_issue desc, name desc, id desc'
name = fields.Char('Ref', copy=False, default='Nuevo')
date_issue = fields.Date('Fecha solicitud', default=fields.Date.today, copy=False, required=True, readonly=True, states=RO_STATES)
state = fields.Selection([
('draft', 'Borrador'),
('pending', 'Por aprobar'),
('approve', 'Aprobado'),
('cancel', 'Cancelado'),
('done', 'En nómina')
], 'Estado', default='draft', copy=False, tracking=True)
description = fields.Text('Descripción', compute='_compute_description')
line_ids = fields.One2many('hr.bonus.line', 'sheet_id', 'Bonos', readonly=True, states=RO_STATES)
currency_id = fields.Many2one('res.currency', default=lambda self: self.env.company.currency_id)
bonus_count = fields.Integer('Cant. bonos', compute='_compute_total')
amount_total = fields.Monetary('Monto Total', compute='_compute_total')
@api.depends('line_ids')
def _compute_description(self):
for record in self:
record.description = ', '.join(record.mapped('line_ids.balance_id.desc'))
@api.depends('line_ids')
def _compute_total(self):
for record in self:
record.amount_total = sum(record.line_ids.filtered(lambda l: l.balance_id.um == '$').mapped('amount'))
record.bonus_count = len(record.line_ids)
def back_draft(self):
for record in self:
if record.state in ['approve', 'done', 'cancel'] and not self.env.user.has_group('hr.group_hr_manager'):
raise ValidationError(_('No tiene permisos para devolver a borrador el documento %s.') % record.name)
self.write({'state': 'draft'})
def confirm(self):
self.write({'state': 'pending'})
def approve(self):
self.write({'state': 'approve'})
def cancel(self):
self.write({'state': 'cancel'})
@api.model
def create(self, vals):
vals['name'] = self.env['ir.sequence'].next_by_code('hr.bonus.sheet')
return super(HrBonusSheet, self).create(vals)
def unlink(self):
for record in self:
if record.state != 'draft':
raise ValidationError(_('%s debe estar en estado Borrador para poder borrarlo.') % record.name)
return super(HrBonusSheet, self).unlink()
def create_haberesydesc(self):
employees = {}
for line in self.mapped('line_ids'):
# Se deben evaluar los casos:
# 1.- Línea a insertar no tiene fecha fin y el hyd tampoco tiene: Se toma siempre
# 2.- Línea a insertar tiene fecha fin y el hyd no tiene: fecha fin de línea debe ser mayor a fecha inicio de hyd
# 3.- Línea a insertar no tiene fecha fin y el hyd si tiene: fecha inicio de línea debe ser menor a fecha fin de hyd debe ser
# 4.- Línea a insertar y hyd tienen fecha fin: se compara rango exacto de fechas
if line.date_to:
hyd_ids = line.employee_id.balance_ids.filtered(lambda hd: hd.balance_id == line.balance_id and line.date_to >= hd.fecha_desde and (line.date_from <= hd.fecha_hasta or not hd.fecha_hasta))
else:
hyd_ids = line.employee_id.balance_ids.filtered(lambda hd: hd.balance_id == line.balance_id and (line.date_from < hd.fecha_hasta or not hd.fecha_hasta))
if hyd_ids:
hyd_ids.monto = line.amount
vals = False
else:
vals = (0, 0, {
'balance_id': line.balance_id.id,
'date_from': line.date_from,
'date_to': line.date_to,
'amount': line.amount
})
if vals:
if line.employee_id in employees:
employees[line.employee_id].append(vals)
else:
employees[line.employee_id] = [vals]
for employee, values in employees.items():
employee.balance_ids = values
self.write({'state': 'done'})
class HrBonusLine(models.Model):
_name = 'hr.bonus.line'
_description = 'Línea de bonos RRHH'
_rec_name = 'balance_id'
balance_id = fields.Many2one('hr.balance', 'Haber/Descuento', required=True, ondelete='cascade')
employee_id = fields.Many2one('hr.employee', 'Empleado', required=True, ondelete='cascade')
date_from = fields.Date('Fecha desde', required=True, default=fields.Date.today)
date_to = fields.Date('Fecha hasta')
amount = fields.Float('Monto', required=True)
sheet_id = fields.Many2one('hr.bonus.sheet', 'Hoja de bonos', ondelete='cascade')
um = fields.Selection([
('$', '$'),
('u', 'u'),
('%', '%')], 'UM', related='balance_id.um', readonly=True)
@api.constrains('date_from', 'date_to')
def _check_dates(self):
for record in self:
if record.date_to and record.date_from > record.date_to:
raise ValidationError(_('Fecha desde no puede ser mayor a Fecha hasta.'))
@api.constrains('amount')
def _check_amount(self):
for record in self:
if record.amount <= 0:
raise ValidationError(_('Monto debe ser mayor a 0.'))
| [
"adrt271988@gmail.com"
] | adrt271988@gmail.com |
7fda55dbef1e0a4b55a7c58ca917e5e021f7952d | 5d28c38dfdd185875ba0edaf77281e684c81da0c | /dev/update_ml_package_versions.py | 4d3487199c6883792b944628ee0461737a34b53a | [
"Apache-2.0"
] | permissive | imrehg/mlflow | 3a68acc1730b3ee6326c1366760d6ddc7e66099c | 5ddfe9a1b48e065540094d83125040d3273c48fa | refs/heads/master | 2022-09-24T05:39:02.767657 | 2022-09-20T00:14:07 | 2022-09-20T00:14:07 | 244,945,486 | 1 | 0 | Apache-2.0 | 2020-03-04T16:11:54 | 2020-03-04T16:11:53 | null | UTF-8 | Python | false | false | 3,569 | py | """
A script to update the maximum package versions in 'mlflow/ml-package-versions.yml'.
# Prerequisites:
$ pip install packaging pyyaml
# How to run (make sure you're in the repository root):
$ python dev/update_ml_package_versions.py
"""
import argparse
import json
from packaging.version import Version
import re
import sys
import urllib.request
import yaml
def read_file(path):
with open(path) as f:
return f.read()
def save_file(src, path):
with open(path, "w") as f:
f.write(src)
def get_package_versions(package_name):
url = "https://pypi.python.org/pypi/{}/json".format(package_name)
with urllib.request.urlopen(url) as res:
data = json.load(res)
def is_dev_or_pre_release(version_str):
v = Version(version_str)
return v.is_devrelease or v.is_prerelease
return [
version
for version, dist_files in data["releases"].items()
if len(dist_files) > 0 and not is_dev_or_pre_release(version)
]
def get_latest_version(candidates):
return sorted(candidates, key=Version, reverse=True)[0]
def update_max_version(src, key, new_max_version, category):
"""
Examples
========
>>> src = '''
... sklearn:
... ...
... models:
... minimum: "0.0.0"
... maximum: "0.0.0"
... xgboost:
... ...
... autologging:
... minimum: "1.1.1"
... maximum: "1.1.1"
... '''.strip()
>>> new_src = update_max_version(src, "sklearn", "0.1.0", "models")
>>> new_src = update_max_version(new_src, "xgboost", "1.2.1", "autologging")
>>> print(new_src)
sklearn:
...
models:
minimum: "0.0.0"
maximum: "0.1.0"
xgboost:
...
autologging:
minimum: "1.1.1"
maximum: "1.2.1"
"""
pattern = r"({key}:.+?{category}:.+?maximum: )\".+?\"".format(
key=re.escape(key), category=category
)
# Matches the following pattern:
#
# <key>:
# ...
# <category>:
# ...
# maximum: "1.2.3"
return re.sub(pattern, r'\g<1>"{}"'.format(new_max_version), src, flags=re.DOTALL)
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--path",
help="Path to the ML package versions yaml (default: mlflow/ml-package-versions.yml)",
default="mlflow/ml-package-versions.yml",
required=False,
)
return parser.parse_args(args)
def main(args):
args = parse_args(args)
yml_path = args.path
old_src = read_file(yml_path)
new_src = old_src
config_dict = yaml.load(old_src, Loader=yaml.SafeLoader)
for flavor_key, config in config_dict.items():
for category in ["autologging", "models"]:
if (category not in config) or config[category].get("pin_maximum", False):
continue
print("Processing", flavor_key, category)
package_name = config["package_info"]["pip_release"]
max_ver = config[category]["maximum"]
versions = get_package_versions(package_name)
unsupported = config[category].get("unsupported", [])
versions = set(versions).difference(unsupported) # exclude unsupported versions
latest_version = get_latest_version(versions)
if max_ver == latest_version:
continue
new_src = update_max_version(new_src, flavor_key, latest_version, category)
save_file(new_src, yml_path)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"noreply@github.com"
] | imrehg.noreply@github.com |
0184f7b0157794daaa04d557ec2d252d71771506 | 13625dd7375297b066ccd69d6c229e9a1535c9b2 | /payment/migrations/0002_auto_20201222_1654.py | 0b3a4704493f451e693f9d894903346df0675eeb | [] | no_license | rajman01/investfy | 9d5fa3ed7593ec13db575016fc839664630318af | a4c8bf16ba7a1ce38d1370e4779284a4d6426733 | refs/heads/main | 2023-09-01T19:10:18.411861 | 2023-08-28T02:30:23 | 2023-08-28T02:30:23 | 320,408,218 | 0 | 1 | null | 2023-08-28T02:30:24 | 2020-12-10T22:46:03 | null | UTF-8 | Python | false | false | 592 | py | # Generated by Django 3.1.3 on 2020-12-22 15:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('payment', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='account',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='accounts', to=settings.AUTH_USER_MODEL),
),
]
| [
"alameenraji31@gmail.com"
] | alameenraji31@gmail.com |
b0b869cbd233806fd2ad95371f63f04ff9c5a250 | 634514a9c10e32051964b179cc807d089d31124e | /S2l/Thesis_Ch3/Exp1_reach3dof/Scripts/gym_test_random.py | 4aef80b71d849bb60fdb335eafc4e754e3b2312f | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | leopauly/Observation-Learning-Simulations | 34009872a1f453ffc4ae7ddced7447a74ff704c4 | 462c04a87c45aae51537b8ea5b44646afa31d3a5 | refs/heads/master | 2021-08-04T10:55:42.900015 | 2021-07-05T13:41:09 | 2021-07-05T13:41:09 | 129,761,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,872 | py | #### Random agent in gym env
##Imports
import numpy as np
import gym
import matplotlib.pyplot as plt
## Defining env
env = gym.make('Pusher3DOFReal-v1')
env.switch=-5
env.initialize_env()
#env = gym.make('Pusher7DOF-v1')
print(env.observation_space.shape[0])
print(env.action_space.shape[0])
## Defining vars
LR = 1e-3
goal_steps = 500
score_requirement = 50
initial_games = 10000
def some_random_games_first():
# Each of these is its own game.
for episode in range(200):
env.reset()
#env.render(mode='human')
#img=env.render(mode='rgb_array') # Get the observation
#plt.imshow(img)
#plt.show()
# this is each frame, up to 200...but we wont make it that far.
while(True):
# This will just create a sample action in any environment.
# In this environment, the action can be 0 or 1, which is left or right
action = env.action_space.sample()
# This will display the environment
# Only display if you really want to see it.
# Takes much longer to display it.
#env.render(mode='human')
#img=env.render(mode='human') # Get the observation
#plt.imshow(img)
#plt.show()
print(env.action_space.shape,np.array(env.action_space.high).tolist())
# this executes the environment with an action,
# and returns the observation of the environment,
# the reward, if the env is over, and other info.
observation, reward, done, info = env.step([1,-2,-1])
env.render(mode='human')
#observation, reward, done, info = env.step([0,1,0,0])
#print(env.get_eval())
#if done:
# break
some_random_games_first()
| [
"meetleopauly@yahoo.com"
] | meetleopauly@yahoo.com |
70dde7f0aba2392ea2ae24df204e883b3e0f9b2a | 1f177b5e7bdaca49076c6ff806f5e2be9a86e834 | /algorithm/190121_array/practice_01.py | e802a0b6dc1111ee6056bb6ebc4cea381642c8ca | [] | no_license | silverlyjoo/TIL | 9e19ba407a9dc82c231e66e352f1c7783e767782 | 98a139770a6d19598d787674bcf20d2fe744ced0 | refs/heads/master | 2021-08-17T02:10:35.101212 | 2019-08-26T08:21:32 | 2019-08-26T08:21:32 | 162,099,046 | 6 | 1 | null | 2021-06-10T21:20:36 | 2018-12-17T08:32:39 | Jupyter Notebook | UTF-8 | Python | false | false | 530 | py |
arr = [[1,3,4,5,9],[10,11,2,13,14],[15,6,7,8,16],[17,18,12,19,20],[21,22,23,24,25]]
# arr.append(map(int, input().split()))
arr = [[0 for ze in range(5)] for ro in range(5)]
for i in range(5):
arr[i] = list(map(int, input().split()))
def iswall(testX, testY):
if testX < 0 or testX >=5:
return False
if testY <0 or testY >= 5:
return False
return True
def my_abs(num):
if num < 0:
return num *= -1
ans_list = []
for i in range(len(arr)):
for j in range(len(arr[i])):
| [
"silverlyjoo@gmail.com"
] | silverlyjoo@gmail.com |
9322317b4bb5487ab61113643c8fa3661466d872 | 491dd5a8f7f53ff05e4efc599bd6f32e840c8806 | /scripts/compose_api.py | 222b4da5e4e41e1d6039090fbf5b875e0c36ec6e | [
"CC0-1.0"
] | permissive | forgeRW/WALKOFF | 9b50ffa4eff42286d709c1f13069f836a3af59b5 | 52b688f427e5360f7acab6cdae701fe67f7a0712 | refs/heads/master | 2021-01-17T04:25:21.154843 | 2018-02-06T19:39:27 | 2018-02-06T19:39:27 | 82,941,065 | 0 | 0 | null | 2017-02-23T15:29:03 | 2017-02-23T15:29:03 | null | UTF-8 | Python | false | false | 1,390 | py | import logging
import os
import sys
sys.path.append(os.path.abspath('.'))
from walkoff.config import paths
logger = logging.getLogger(__name__)
def read_and_indent(filename, indent):
indent = ' ' * indent
with open(filename, 'r') as file_open:
return ['{0}{1}'.format(indent, line) for line in file_open]
def compose_api():
with open(os.path.join(paths.api_path, 'api.yaml'), 'r') as api_yaml:
final_yaml = []
for line_num, line in enumerate(api_yaml):
if line.lstrip().startswith('$ref:'):
split_line = line.split('$ref:')
reference = split_line[1].strip()
indentation = split_line[0].count(' ')
try:
final_yaml.extend(read_and_indent(os.path.join(paths.api_path, reference), indentation))
final_yaml.append(os.linesep)
except (IOError, OSError):
logger.error('Could not find or open referenced YAML file {0} in line {1}'.format(reference,
line_num))
else:
final_yaml.append(line)
with open(os.path.join(paths.api_path, 'composed_api.yaml'), 'w') as composed_yaml:
composed_yaml.writelines(final_yaml)
if __name__ == '__main__':
compose_api()
| [
"Tervala_Justin@bah.com"
] | Tervala_Justin@bah.com |
045662700cb8d10239ea775433abc0ace3cb9c57 | c71b00b530efe7e8fe4e666b9156ff30976bd72d | /_draft/coordtransform/coordtransform.py | e917e065e6bf08cbb18cf2aff5008487a6d0c4a0 | [
"MIT",
"BSD-3-Clause"
] | permissive | IfeanyiEkperi/autopew | af714537d3438100d01d3c988930fa3e8d270581 | 964c379f7e3a7b15259672df37629baee46e158c | refs/heads/master | 2020-08-15T12:14:01.165061 | 2019-10-15T16:08:27 | 2019-10-15T16:08:27 | 215,340,368 | 0 | 0 | NOASSERTION | 2019-10-15T16:02:54 | 2019-10-15T16:02:53 | null | UTF-8 | Python | false | false | 3,094 | py | import logging
import itertools
from autopew.transform import (
affine_from_AB,
transform_from_affine,
inverse_affine_transform,
)
class CoordinateTransform(object):
library = []
def __init__(self, source, dest, *args, **kwargs):
self.forward = None
self.reverse = None
self.source = source
self.dest = dest
# todo: methods for dealing with maximum dimensionality of the transform
# if you create a 3D-3D transform you can keep all dims, but dims will be lost
# for 3D-2D, and any subsequent transforms.
self._register()
if not (self.dest, self.source) in self._links:
self._invert # register inverse
self._iter_library()
@property
def _links(self):
return set(
zip([i.source for i in self.library], [i.dest for i in self.library])
)
@property
def _domains(self):
return set([i.dest for i in self.library] + [i.source for i in self.library])
def _register(self):
"""
Register the Coordinate Transform in the Transform Library
"""
if self not in self.library:
self.library.append(self)
else:
logger.warning("Transform Already Exists in Library")
def _iter_library(self):
"""
Calibrate all relevant transforms between available sources and destination
coordinate systems.
"""
logger.debug("Iterating over transform library.")
# identify all coordinate reference systems
crs = self._domains
present = set([(c.source, c.dest) for c in self.library])
possible = itertools.product(crs, repeat=2)
for a, b in possible:
if (a != b) and ((a, b) not in present):
print("Need to add ({}, {})".format(a, b))
pass
@property
def _invert(self):
logger.debug("Creating inverse for {}".format(str(self)))
self.inverse = CoordinateTransform(self.dest, self.source)
self.inverse.inverse = self
self.inverse.forward, self.inverse.reverse = self.reverse, self.forward
return self.inverse
def calibrate(self, sourcepoints, destpoints):
logger.debug("Calibrating {}".format(str(self)))
self.affine = affine_from_AB(pixelpoints, transformpoints)
self.forward = affine_transform(self.affine)
self.reverse = inverse_affine_transform(self.affine)
self.inverse.forward, self.inverse.reverse, self.inverse.affine = (
self.reverse,
self.forward,
np.linalg.inv(self.affine),
)
def __eq__(self, other):
if other.__class__ == self.__class__:
return (self.source == other.source) and (self.dest == other.dest)
else:
return False
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.source, self.dest)
def __str__(self):
return "{} from {} to {}".format(
self.__class__.__name__, self.source, self.dest
)
| [
"morgan.j.williams@hotmail.com"
] | morgan.j.williams@hotmail.com |
e7f8f5e7cca6d1c74426ec28482859f35ce8c31f | e68a59efcf3591a7efedb2f66d26d0f01607e288 | /simulator/main.py | 07df44ae9e227d6cdcb934e3c3462ced3d185252 | [] | no_license | fagan2888/core-1 | efc0fb9e4a3139c1174d7caf539163f34b966898 | 97930712b71ebdb6ad587a2dee2bf6b8ac0dbac7 | refs/heads/master | 2020-12-01T22:46:18.836996 | 2017-12-15T16:43:30 | 2017-12-15T16:43:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,633 | py | from flask import Flask, url_for
from flask import jsonify
from flask import request
from connectivity.bitstamp_api import BitstampAPI
from constants import SIMULATOR_REPLAYER_DATA_FILE
from constants import SIMULATOR_USE_REPLAYER, API_URL_V2_TICKER
from constants import TRADING_DEFAULT_CURRENCY_PAIR
from simulator.logic import send_order, market_order, UserAccount
from simulator.replayer import Replayer
app = Flask(__name__)
user = UserAccount()
if SIMULATOR_USE_REPLAYER:
replayer = Replayer(data_file=SIMULATOR_REPLAYER_DATA_FILE)
else:
replayer = None
@app.errorhandler(404)
def page_not_found(err):
return 'Endpoint not found. Browse / to list all the endpoints.'
@app.route('/reset/', methods=['GET', 'POST'], strict_slashes=False)
def reset():
global user, replayer
user = UserAccount()
if replayer is not None:
replayer.reset()
return 'Reset.'
@app.route('/', methods=['GET', 'POST'], strict_slashes=False)
def list_all_end_points():
def has_no_empty_params(rule_):
defaults = rule_.defaults if rule_.defaults is not None else ()
arguments = rule_.arguments if rule_.arguments is not None else ()
return len(defaults) >= len(arguments)
links = []
for rule in app.url_map.iter_rules():
if 'GET' in rule.methods and has_no_empty_params(rule):
url = url_for(rule.endpoint, **(rule.defaults or {}))
links.append((url, rule.endpoint))
return '<b>' + '<br/><br/>'.join(
sorted(['<a href="{0}">{0}</a> -> {1}()'.format(l, v) for (l, v) in links])) + '</b>'
@app.route('/ticker/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/ticker/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
def ticker():
if SIMULATOR_USE_REPLAYER:
return jsonify(replayer.next())
else:
tick = BitstampAPI.ticker(API_URL_V2_TICKER)
tick.update({k: str(v) for (k, v) in tick.items()})
return jsonify(tick)
@app.route('/balance/', methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/balance/', methods=['GET', 'POST'], strict_slashes=False)
def balance():
return jsonify(user.balance())
# if request.method == 'GET':
# # for the web interface.
# return json.dumps(user.balance(), indent=4).replace('\n', '<br/>')
# # post. returns a JSON.
# return user.balance()
@app.route('/buy/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/buy/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
def buy():
return jsonify(send_order(user, is_buy=True))
@app.route('/sell/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/sell/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
def sell():
return jsonify(send_order(user, is_buy=False))
@app.route('/buy/market/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/buy/market/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
def buy_market():
return jsonify(market_order(user, is_buy=True))
@app.route('/sell/market/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/sell/market/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
def sell_market():
return jsonify(market_order(user, is_buy=False))
@app.route('/cancel_order/', methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/cancel_order/', methods=['GET', 'POST'], strict_slashes=False)
def cancel_order():
if request.method == 'POST':
try:
data = request.form
print(data)
order_id = data['id']
del user.open_orders[order_id]
return 'Order canceled.'
except:
return jsonify({'error': 'Invalid order id'})
else:
return 'Only available through POST.'
# {'id': '320464858', 'key': '***', 'signature': '***', 'nonce': '1506251383558164'}
@app.route('/order_status/', methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/order_status/', methods=['GET', 'POST'], strict_slashes=False)
def order_status():
if request.method == 'POST':
data = request.form
print(data)
order_id = data['id']
return user.order_statuses[order_id]
@app.route('/open_orders/all/', methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/open_orders/all/', methods=['GET', 'POST'], strict_slashes=False)
def open_orders_all():
return b'[]'
@app.route('/transactions/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/transactions/{}/'.format(TRADING_DEFAULT_CURRENCY_PAIR), methods=['GET', 'POST'], strict_slashes=False)
def transactions():
raise user.transactions
@app.route('/user_transactions/', methods=['GET', 'POST'], strict_slashes=False)
@app.route('/v2/user_transactions/', methods=['GET', 'POST'], strict_slashes=False)
def user_transactions():
return user.transactions[::-1]
if __name__ == '__main__':
"""
curl -X POST http://127.0.0.1:5000/buy/btceur/ -d "{'amount': 0.003, 'key': '***', 'signature': '***', 'nonce': '1506251375795815'}"
curl -X POST http://127.0.0.1:5000/buy/market/btceur/ -d "amount=0.03&nounce=033"
"""
app.run()
# export PYTHONPATH=../:$PYTHONPATH; python3 main.py
| [
"premy@reactive.co.jp"
] | premy@reactive.co.jp |
4a4a7d6aa42fc38b95683641c7ce780278223e45 | fff94a56c2992b6930d9964f63dba1ddb32c3193 | /setup.py | 5c6206248384bc36faeeb5f551001680e81a22bd | [
"MIT"
] | permissive | veltzer/pylogconf | fb94ee0bf16168ab7d169b10038fa28f72df5f3b | b1a965fa25199768ad1432990ac6ec8d9bb9ac5e | refs/heads/master | 2023-07-06T11:12:30.656069 | 2023-07-01T14:40:27 | 2023-07-01T14:40:27 | 78,429,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,519 | py | import setuptools
def get_readme():
with open('README.rst') as f:
return f.read()
setuptools.setup(
# the first three fields are a must according to the documentation
name="pylogconf",
version="0.0.37",
packages=[
"pylogconf",
],
# from here all is optional
description="correctly configure python logging",
long_description=get_readme(),
long_description_content_type="text/x-rst",
author="Mark Veltzer",
author_email="mark.veltzer@gmail.com",
maintainer="Mark Veltzer",
maintainer_email="mark.veltzer@gmail.com",
keywords=[
"python",
"logging",
"configuration",
"easy",
"yaml",
"json",
"debug",
],
url="https://veltzer.github.io/pylogconf",
download_url="https://github.com/veltzer/pylogconf",
license="MIT",
platforms=[
"python3",
],
install_requires=[
"pyfakeuse",
"logging_tree",
"pyyaml",
],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
)
| [
"mark.veltzer@gmail.com"
] | mark.veltzer@gmail.com |
1e45b04e9cbebb6d6b50aa7b99ef5585e8af6788 | 23bc70263cc5355a247dd242d9dc35fb64d1ffbc | /portfoliyo/tests/view/users/test_forms.py | 12ec9945d65a75e37e0b901ac5174ffc64df7832 | [] | no_license | sdevani/portfoliyo | c8abd2c7328a4a7b75d630db5ff74f2e20bbd749 | e85ae37ccbc404a26751539ea756fce484b2db62 | refs/heads/master | 2021-01-16T23:01:27.906389 | 2013-04-26T19:29:34 | 2013-04-26T19:29:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,808 | py | """Tests for user-related forms."""
from django.test.utils import override_settings
from portfoliyo.tests import factories, utils
from portfoliyo.view.users import forms
class TestRegistrationForm(object):
"""Tests for RegistrationForm."""
base_data = {
'name': 'Some Body',
'email': 'some@example.com',
'password': 'sekrit',
'password_confirm': 'sekrit',
'role': 'Some Role',
'country_code': 'us',
}
def test_register(self, db):
"""Registration creates active school_staff w/ unconfirmed email."""
form = forms.RegistrationForm(self.base_data.copy())
assert form.is_valid()
profile = form.save()
assert not profile.email_confirmed
assert profile.school_staff
assert profile.user.is_active
assert profile.country_code == 'us'
def test_source_phone(self, db):
"""Source phone is set according to country code."""
data = self.base_data.copy()
data['country_code'] = 'ca'
ca_phone = '+13216543987'
with override_settings(PORTFOLIYO_NUMBERS={'ca': ca_phone}):
form = forms.RegistrationForm(data)
assert form.is_valid()
profile = form.save()
assert profile.country_code == 'ca'
assert profile.source_phone == ca_phone
def test_unmatched_passwords(self, db):
"""Registration form not valid if passwords don't match."""
data = self.base_data.copy()
data['password'] = 'other-sekrit'
form = forms.RegistrationForm(data)
assert not form.is_valid()
assert form.errors['__all__'] == [u"The passwords didn't match."]
def test_dupe_email(self, db):
"""Registration form not valid if email already in use."""
factories.UserFactory.create(email='some@example.com')
form = forms.RegistrationForm(self.base_data.copy())
assert not form.is_valid()
assert form.errors['email'] == [
u"This email address is already in use. "
u"Please supply a different email address."
]
def test_add_school(self, db):
"""If addschool is True, create a new school and use it."""
data = self.base_data.copy()
data['addschool'] = '1'
data['addschool-name'] = "New School"
data['addschool-postcode'] = "12345"
form = forms.RegistrationForm(data)
assert form.is_valid()
profile = form.save()
school = profile.school
assert school.name == u"New School"
assert school.postcode == u"12345"
def test_add_school_takes_user_country(self, db):
"""New school takes country of new user."""
data = self.base_data.copy()
data['country_code'] = 'ca'
data['addschool'] = '1'
data['addschool-name'] = "New School"
data['addschool-postcode'] = "12345"
form = forms.RegistrationForm(data)
assert form.is_valid()
profile = form.save()
school = profile.school
assert school.country_code == 'ca'
def test_add_school_validation_error(self, db):
"""If addschool is True but fields not complete, validation error."""
data = self.base_data.copy()
data['addschool'] = 'True'
data['addschool-name'] = "New School"
data['addschool-postcode'] = ""
form = forms.RegistrationForm(data)
assert not form.is_valid()
assert form.errors['__all__'] == [u"Could not add a school."]
assert form.addschool_form.errors['postcode'] == [
u"This field is required."]
def test_no_addschool_validation_error_if_addschool_false(self, db):
"""If addschool is False, addschool form not bound."""
data = self.base_data.copy()
data['addschool'] = 'False'
data['email'] = 'not a valid email'
form = forms.RegistrationForm(data)
assert not form.is_valid()
assert not form.addschool_form.is_bound
def test_no_school(self, db):
"""If no school selected, create one."""
form = forms.RegistrationForm(self.base_data.copy())
assert form.is_valid()
profile = form.save()
school = profile.school
assert school.auto
assert not school.postcode
def test_add_dupe_school(self, db):
"""No integrity error on school-creation race condition."""
data = self.base_data.copy()
data['addschool'] = '1'
data['addschool-name'] = "My School"
data['addschool-postcode'] = "12345"
form = forms.RegistrationForm(data)
assert form.is_valid()
school = factories.SchoolFactory.create(
name="My School",
postcode="12345",
)
profile = form.save()
assert profile.school == school
class TestEditProfileForm(object):
def test_update_relationships(self, db):
"""
Updating role updates matching relationship descriptions to empty.
If I have my role set in my profile as 'foo' and I change it to 'bar',
any relationships where I am the elder and the relationship description
is 'foo' will be updated to '' (which falls back to profile role).
"""
rel1 = factories.RelationshipFactory.create(
description='foo', from_profile__role='foo')
rel2 = factories.RelationshipFactory.create(
description='bar', from_profile=rel1.elder)
form = forms.EditProfileForm(
{'name': 'New', 'role': 'new'}, instance=rel1.elder)
assert form.is_valid()
form.save()
rel1 = utils.refresh(rel1)
rel2 = utils.refresh(rel2)
assert rel1.description == ''
assert rel2.description == 'bar'
| [
"carl@oddbird.net"
] | carl@oddbird.net |
4bc4e9d63a5d83d6aa54557c80f837a7946c8875 | a97f60aaa261a0e54f674c2bd1587694c41fd50d | /bitsofpluto.py | 89c242e1333178b1686d146641168d9a19c5a9ac | [] | no_license | asears/bitsofpluto | 7c4bef986740d4518972e3c1c8418db97e173600 | 1cb6a80a9863e429693640b36608e6f2b360895e | refs/heads/master | 2020-11-26T10:34:50.752896 | 2019-09-16T07:15:00 | 2019-09-16T07:15:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,617 | py | #!/usr/bin/env python
# encoding: utf-8
"""
Twitter bot. Tweeting a different bit of Pluto every six hours.
Photo by NASA's New Horizons spacecraft.
https://www.nasa.gov/image-feature/the-rich-color-variations-of-pluto/…
"""
from __future__ import print_function
import argparse
import os.path
import random
import sys
import tempfile
import webbrowser
import twitter # pip install twitter
import yaml # pip install PyYAML
import closest_point_to_pluto
from PIL import Image # pip install pillow
WIDTHS = [600, 800, 1000, 1200, 2000]
def load_yaml(filename):
"""
File should contain:
consumer_key: TODO_ENTER_YOURS
consumer_secret: TODO_ENTER_YOURS
access_token: TODO_ENTER_YOURS
access_token_secret: TODO_ENTER_YOURS
"""
with open(filename) as f:
data = yaml.safe_load(f)
keys = data.viewkeys() if sys.version_info.major == 2 else data.keys()
if not keys >= {
"access_token",
"access_token_secret",
"consumer_key",
"consumer_secret",
}:
sys.exit("Twitter credentials missing from YAML: " + filename)
return data
def tweet_it(string, credentials, image=None):
""" Tweet string using credentials """
if len(string) <= 0:
return
# Create and authorise an app with (read and) write access at:
# https://dev.twitter.com/apps/new
# Store credentials in YAML file
auth = twitter.OAuth(
credentials["access_token"],
credentials["access_token_secret"],
credentials["consumer_key"],
credentials["consumer_secret"],
)
t = twitter.Twitter(auth=auth)
print("TWEETING THIS:\n", string)
if args.test:
print("(Test mode, not actually tweeting)")
else:
if image:
print("Upload image")
# Send images along with your tweets.
# First just read images from the web or from files the regular way
with open(image, "rb") as imagefile:
imagedata = imagefile.read()
t_up = twitter.Twitter(domain="upload.twitter.com", auth=auth)
id_img = t_up.media.upload(media=imagedata)["media_id_string"]
else:
id_img = None # Does t.statuses.update work with this?
lat, long = closest_point_to_pluto.closest_point_to_pluto()
result = t.statuses.update(
status=string,
media_ids=id_img,
lat=lat,
long=long,
display_coordinates=True,
)
url = (
"http://twitter.com/"
+ result["user"]["screen_name"]
+ "/status/"
+ result["id_str"]
)
print("Tweeted:\n" + url)
if not args.no_web:
webbrowser.open(url, new=2) # 2 = open in a new tab, if possible
def bitsofpluto(pluto_filename):
""" Get a bit of Pluto """
pluto = Image.open(pluto_filename)
print(pluto.size)
while True:
width = random.choice(WIDTHS)
height = width * 3 / 4
print("width, height:", width, height)
x = random.randrange(0, pluto.width - width + 1)
y = random.randrange(0, pluto.height - height + 1)
print("x, y: ", x, y)
print("x + width, y + height: ", x + width, y + height)
bit_of_pluto = pluto.crop((x, y, x + width, y + height))
top = 0
left = 0
bottom = bit_of_pluto.height - 1
right = bit_of_pluto.width - 1
points = [
(left, top),
(right, top),
(right / 2, top),
(left, bottom / 2),
(right, bottom / 2),
(right / 2, bottom / 2),
(left, bottom),
(right, bottom),
(right / 2, bottom),
]
total_brightness = 0
total_dark_points = 0
for point in points:
r, g, b = bit_of_pluto.getpixel(point)
brightness = sum([r, g, b]) / 3 # 0 is black and 255 is white
print("r, g, b, brightness: ", r, g, b, brightness)
total_brightness += brightness
if brightness < 10:
total_dark_points += 1
print("total_brightness: ", total_brightness)
print("total_dark_points: ", total_dark_points)
if total_dark_points <= 6:
# bit_of_pluto.show()
break
outfile = os.path.join(tempfile.gettempdir(), "bitofpluto.jpg")
print("outfile: " + outfile)
bit_of_pluto.save(outfile, quality=95)
return outfile
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Tweeting a different bit of Pluto every six hours.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-y",
"--yaml",
default="M:/bin/data/bitsofpluto.yaml",
help="YAML file location containing Twitter keys and secrets",
)
parser.add_argument(
"-nw",
"--no-web",
action="store_true",
help="Don't open a web browser to show the tweeted tweet",
)
parser.add_argument(
"-x",
"--test",
action="store_true",
help="Test mode: go through the motions but don't tweet anything",
)
parser.add_argument(
"-p",
"--pluto",
default="M:/bin/data/pluto/crop_p_color2_enhanced_release.7000x7000.png",
help="Path to a big photo of Pluto",
)
args = parser.parse_args()
credentials = load_yaml(args.yaml)
image = bitsofpluto(args.pluto)
tweet = "A bit of Pluto"
tweet_it(tweet, credentials, image)
# End of file
| [
"hugovk@users.noreply.github.com"
] | hugovk@users.noreply.github.com |
6c3f7cf99c6ff475862e67b3741ac051d547d733 | 0db97db08743783019efe022190f409d22ff95bd | /aliyun/api/rest/Rds20130528DescribeSecurityIpsRequest.py | 8fba72f95fde069ab59928950cd9bfc12d5138d4 | [
"Apache-2.0"
] | permissive | snowyxx/aliyun-python-demo | 8052e2a165f1b869affe632dda484d6ca203bd9b | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | refs/heads/master | 2021-01-10T03:37:31.657793 | 2016-01-21T02:03:14 | 2016-01-21T02:03:14 | 49,921,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | '''
Created by auto_sdk on 2014.10.21
'''
from aliyun.api.base import RestApi
class Rds20130528DescribeSecurityIpsRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.DBInstanceId = None
def getapiname(self):
return 'rds.aliyuncs.com.DescribeSecurityIps.2013-05-28'
| [
"snowyxx@126.com"
] | snowyxx@126.com |
b3f8a8af7f853575ba3e3f9c063337ec7292bf2e | 2f91251d41f32346f2f6eb5f0a6e957f253f005f | /bit manipulation/python/leetcode289_Game_of_Life.py | 02b9f64191bcfdb7a15dd241bf098483a1844fea | [
"Apache-2.0"
] | permissive | wenxinjie/leetcode | 8a5f666a1548d0d205cea09cb87fc2c65aec2b58 | c459a01040c8fe0783e15a16b8d7cca4baf4612a | refs/heads/master | 2020-03-24T21:56:00.662969 | 2018-09-06T21:25:58 | 2018-09-06T21:25:58 | 143,058,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,228 | py |
# According to the Wikipedia's article: "The Game of Life, also known simply as Life, is a cellular automaton devised by the British mathematician John Horton Conway in 1970."
# Given a board with m by n cells, each cell has an initial state live (1) or dead (0). Each cell interacts with its eight neighbors (horizontal, vertical, diagonal) using the following four rules (taken from the above Wikipedia article):
# Any live cell with fewer than two live neighbors dies, as if caused by under-population.
# Any live cell with two or three live neighbors lives on to the next generation.
# Any live cell with more than three live neighbors dies, as if by over-population..
# Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.
# Write a function to compute the next state (after one update) of the board given its current state. The next state is created by applying the above rules simultaneously to every cell in the current state, where births and deaths occur simultaneously.
# Example:
# Input:
# [
# [0,1,0],
# [0,0,1],
# [1,1,1],
# [0,0,0]
# ]
# Output:
# [
# [0,0,0],
# [1,0,1],
# [0,1,1],
# [0,1,0]
# ]
class Solution(object):
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: void Do not return anything, modify board in-place instead.
"""
if not board or len(board) == 0: return
m, n = len(board), len(board[0])
for i in range(m):
for j in range(n):
live = self.countLive(board, m, n, i, j)
if board[i][j] == 1 and live in (2,3):
board[i][j] = 3
if board[i][j] == 0 and live == 3:
board[i][j] = 2
for i in range(m):
for j in range(n):
board[i][j] = board[i][j] >> 1
def countLive(self, board, m, n, i, j):
live = 0
for x in range(max(i-1, 0), min(i+2, m)):
for y in range(max(j-1,0), min(j+2, n)):
live += board[x][y] & 1
live -= board[i][j] &
return live
# Time: O(n^2)
# Space: O(1)
# Difficulty: medium
| [
"wenxinjieinnyc@gmail.com"
] | wenxinjieinnyc@gmail.com |
fae288441e708509afa743d412bbf58f5f5f1d63 | 6c37d1d2437a08e43b13d621d4a8da4da7135b3a | /yt_dlp/extractor/gotostage.py | 112293bef56c46a7ec238dacf85905690703b07e | [
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
] | permissive | yt-dlp/yt-dlp | be040bde10cc40258c879c75ab30215686352824 | d3d81cc98f554d0adb87d24bfd6fabaaa803944d | refs/heads/master | 2023-09-05T21:15:21.050538 | 2023-09-05T20:35:23 | 2023-09-05T20:35:23 | 307,260,205 | 52,742 | 5,376 | Unlicense | 2023-09-14T05:22:08 | 2020-10-26T04:22:55 | Python | UTF-8 | Python | false | false | 2,727 | py | from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
try_get,
url_or_none
)
import json
class GoToStageIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gotostage\.com/channel/[a-z0-9]+/recording/(?P<id>[a-z0-9]+)/watch'
_TESTS = [{
'url': 'https://www.gotostage.com/channel/8901680603948959494/recording/60bb55548d434f21b9ce4f0e225c4895/watch',
'md5': 'ca72ce990cdcd7a2bd152f7217e319a2',
'info_dict': {
'id': '60bb55548d434f21b9ce4f0e225c4895',
'ext': 'mp4',
'title': 'What is GoToStage?',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 93.924711
}
}, {
'url': 'https://www.gotostage.com/channel/bacc3d3535b34bafacc3f4ef8d4df78a/recording/831e74cd3e0042be96defba627b6f676/watch?source=HOMEPAGE',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
metadata = self._download_json(
'https://api.gotostage.com/contents?ids=%s' % video_id,
video_id,
note='Downloading video metadata',
errnote='Unable to download video metadata')[0]
registration_data = {
'product': metadata['product'],
'resourceType': metadata['contentType'],
'productReferenceKey': metadata['productRefKey'],
'firstName': 'foo',
'lastName': 'bar',
'email': 'foobar@example.com'
}
registration_response = self._download_json(
'https://api-registrations.logmeininc.com/registrations',
video_id,
data=json.dumps(registration_data).encode(),
expected_status=409,
headers={'Content-Type': 'application/json'},
note='Register user',
errnote='Unable to register user')
content_response = self._download_json(
'https://api.gotostage.com/contents/%s/asset' % video_id,
video_id,
headers={'x-registrantkey': registration_response['registrationKey']},
note='Get download url',
errnote='Unable to get download url')
return {
'id': video_id,
'title': try_get(metadata, lambda x: x['title'], compat_str),
'url': try_get(content_response, lambda x: x['cdnLocation'], compat_str),
'ext': 'mp4',
'thumbnail': url_or_none(try_get(metadata, lambda x: x['thumbnail']['location'])),
'duration': try_get(metadata, lambda x: x['duration'], float),
'categories': [try_get(metadata, lambda x: x['category'], compat_str)],
'is_live': False
}
| [
"noreply@github.com"
] | yt-dlp.noreply@github.com |
ea7239225feecb34fa333d4d532077816030969f | 312a86122cd08be0a8b7d0aacf87c8445aa6dead | /cmsplugin_blog/migrations/0007_auto__del_pygmentsplugin__chg_field_entry_tags.py | 19ebbd3218c864309e4b6d9f986286c701b081bb | [] | no_license | arnaudbenard/fle-website | f4cbaaa10d96de6eca9a18a5ba558480892d268d | d90d6be9909cd962ca22c72b3af8c43966a33e71 | refs/heads/master | 2021-01-21T01:33:37.534567 | 2013-05-21T22:16:12 | 2013-05-21T22:16:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,214 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_blog.entry': {
'Meta': {'object_name': 'Entry'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'tags': ('tagging.fields.TagField', [], {'default': "''"})
},
'cmsplugin_blog.entrytitle': {
'Meta': {'object_name': 'EntryTitle'},
'entry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmsplugin_blog.Entry']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cmsplugin_blog.latestentriesplugin': {
'Meta': {'object_name': 'LatestEntriesPlugin', 'db_table': "'cmsplugin_latestentriesplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'current_language_only': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'limit': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['cmsplugin_blog']
| [
"jamalex@gmail.com"
] | jamalex@gmail.com |
cf4a5ac19de8240a8e03404df64ae4461030be7d | b6ff7b98079bcc8e7d8ad248e2de1f61bfabc0db | /multiples_sum_average.py | 296bbc63022fdf2e0973adcce247a959ee3cfd83 | [] | no_license | bakker4444/multiples_sum_average | 4b55808276dcd853d8688e255581c363b6c4cabb | efbc13dda3cc3459b44e0eb853dcb1f11ddca767 | refs/heads/master | 2020-03-09T08:35:48.384936 | 2018-04-09T00:17:45 | 2018-04-09T00:17:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # Multiples
# Part 1 - print odd numbers from 1 to 1000
for i in range(1, 1001, 2):
print i
# Multiples
# Part 2 - print all the multiples of 5 from 5 to 1,000,000
for i in range(5, 1000001, 5):
print i
# Sum List
# Create a program that prints the sum of all the values in the list
a = [1, 2, 5, 10, 255, 3]
print sum(a)
# Average Lists
# Create a program that prints the average of the values in the list
b = [1, 2, 5, 10, 255, 3]
print sum(b)/len(b)
| [
"bakker4444@gmail.com"
] | bakker4444@gmail.com |
f8aaa4290c282f8bce6f11f9e721a671c7ceb6bc | 696dec6a8d1eba189d36049afedec36da47c08f3 | /dataset_utilities/_core_utils/_unet_attn.py | e87f1eef42bece2a971fe9aa3e96cd671e454917 | [] | no_license | JoelRaymann/polyp-segmentation | d99079f56bb3ae0886fb4c610c4abcc420137781 | 38da6c8bf47df2d2382d31f04faf63649b7d8ab0 | refs/heads/master | 2023-04-10T06:17:34.720237 | 2021-04-14T22:04:36 | 2021-04-14T22:04:36 | 358,053,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | """
Script Implementing U-Net Attention dice's data generator core functionality and all its helper functions
"""
# Import necessary packages
import tensorflow as tf
# Handling U-Net Attention dice
def _load_data_unet_attn(img, seg):
"""
Function to prepare the images and return the data in accordance with the U-Net Attention dice model for training.
Parameters
----------
img : tf.Tensor
The image input tensor.
seg : tf.Tensor
The segmentation input tensor
Returns
-------
tuple
The tuple output (image, segmentation)
"""
# Get the numpy version
img = img.numpy().copy()
seg = seg.numpy().copy()
# Normalize
img = img / 255.0
seg = seg / 255.0
return img, tf.expand_dims(seg, axis=-1)
| [
"joelraymann@gmail.com"
] | joelraymann@gmail.com |
a5636d5656eb4187372f3d8f24142e7df9c7fa89 | 74473f650f36ad78d8eee801dcbdea8a4825d4a2 | /voltron/remote_debugger.py | 49b88fea297b75670aad50a41c662f1e7ac3c45f | [] | no_license | buttslol/voltron | f2a20d808ca165feaee80d3f6350695a4b7334fc | 1b00674276acd465ca69b78d6ea91aa9467ed92f | refs/heads/master | 2021-01-16T21:40:23.455224 | 2013-10-12T11:36:38 | 2013-10-12T11:36:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | import pdb
import socket
import sys
# Trying to debug a quirk in some code that gets called async by {ll,d}db?
#
# from .remote_debugger import Rdb
# Rdb().set_trace()
#
# Then: telnet localhost 4444
socks = {}
# Only bind the socket once
def _sock(port):
if port in socks:
return socks[port]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("127.0.0.1", port))
socks[port] = s
return s
class Rdb(pdb.Pdb):
def __init__(self, port=4444):
self.old_stdout = sys.stdout
self.old_stdin = sys.stdin
self.skt = _sock(port)
self.skt.listen(1)
(clientsocket, address) = self.skt.accept()
handle = clientsocket.makefile('rw')
pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
sys.stdout = sys.stdin = handle
| [
"richo@psych0tik.net"
] | richo@psych0tik.net |
f64a3a28daa398d4d3500352ba4349bb2d82e861 | a404f504febdc835b69b72c4ac28b153885fc119 | /DungeonsKitgard/KithgardBrawl/KithgardBrawl3.py | edd41caf028501808f7009ea6568177cec161633 | [] | no_license | Chaboi45/CodeCombat | 6093a2eae29ef00c0c277653c4ffd075c9e2ac4c | 6e008a94e65bb72ca9292e303d391a4142de16f5 | refs/heads/master | 2021-07-02T23:24:52.050154 | 2017-09-21T07:42:21 | 2017-09-21T07:42:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,233 | py | while True:
enemys = hero.findEnemies()
index = 0
closest_soldier = None
soldier_dist = 999
closest_archer = None
archer_dist = 999
closest = None
dist = 999
close_count = 0;
priority = None
while (index < len(enemys)):
distance = hero.distanceTo(enemys[index])
shield = False;
if (enemys[index].type == 'shaman' and distance < 20):
priority = enemys[index];
if (enemys[index].type == 'Ogre' and distance < 10):
shield = True
if (distance < 10):
close_count += 1
if (enemys[index].health > 0):
if (enemys[index].type == 'archer' and distance < archer_dist):
archer_dist = distance
closest_archer = enemys[index]
if (enemys[index].type == 'soldier' and distance < soldier_dist):
soldier_dist = distance
closest_soldier = enemys[index]
if (distance < dist):
soldier_dist = dist
closest = enemys[index]
index += 1
if (priority):
enemy = priority
# elif(closest_archer and archer_dist<15):
# enemy = closest_archer
# elif(closest_soldier and soldier_dist<10):
# enemy = closest_soldier
else:
enemy = closest
if (hero.health < hero.maxHealth / 3):
item = hero.findNearest(hero.findItems())
if (item):
if (hero.isReady("jump")):
hero.jumpTo(item.pos)
else:
hero.move(item.pos)
elif (enemy):
if (hero.isReady("jump") and hero.distanceTo > 10):
hero.jumpTo(enemy.pos)
elif (hero.isReady("bash")):
hero.bash(enemy)
elif (hero.isReady("power-up")):
hero.powerUp()
hero.attack(enemy)
elif (hero.isReady("cleave") and close_count >= 7):
hero.cleave(enemy)
elif (shield):
hero.shield()
elif (close_count < 10 or priority):
hero.attack(enemy)
else:
hero.shield()
| [
"vadim-job-hg@yandex.ru"
] | vadim-job-hg@yandex.ru |
1970ad0f37eea983e19e8edfa26c8d95bbb9dd45 | 70450f0c551adf47b450468e424f4f90bebfb58d | /dataclasses/resources/test/test_I3RecoPulseSeriesMapMask_pybindings.py | ef13165679e0c4ac5ba061bf58050c184596e4c9 | [
"MIT"
] | permissive | hschwane/offline_production | ebd878c5ac45221b0631a78d9e996dea3909bacb | e14a6493782f613b8bbe64217559765d5213dc1e | refs/heads/master | 2023-03-23T11:22:43.118222 | 2021-03-16T13:11:22 | 2021-03-16T13:11:22 | 280,381,714 | 0 | 0 | MIT | 2020-07-17T09:20:29 | 2020-07-17T09:20:29 | null | UTF-8 | Python | false | false | 2,626 | py | #!/usr/bin/env python
import unittest
import sys
from icecube import icetray,dataclasses
class I3RecoPulseSeriesMapMaskTest(unittest.TestCase):
def setUp(self):
self.frame = icetray.I3Frame(icetray.I3Frame.Physics)
pulses = dataclasses.I3RecoPulseSeriesMap()
key1 = icetray.OMKey(42, 7)
vec = dataclasses.I3RecoPulseSeries()
pulse = dataclasses.I3RecoPulse()
pulse.time = 1.0
pulse.charge = 2.3
vec.append(pulse)
pulse.time = 2.0
vec.append(pulse)
pulse.time = 15.0
vec.append(pulse)
pulses[key1] = vec
key2 = icetray.OMKey(7,7)
vec = dataclasses.I3RecoPulseSeries()
pulse.time = 1.0
pulse.charge = 2.3
vec.append(pulse)
pulse.time = 2.0
vec.append(pulse)
pulse.time = 15.0
vec.append(pulse)
pulses[key2] = vec
self.frame['Pulses'] = pulses
mask1 = dataclasses.I3RecoPulseSeriesMapMask(self.frame, 'Pulses')
mask1.set(key1, 1, False)
self.frame['Mask1'] = mask1
mask2 = dataclasses.I3RecoPulseSeriesMapMask(self.frame, 'Pulses')
mask2.set(key2, 1, False)
self.frame['Mask2'] = mask2
def testApply(self):
mask = self.frame['Mask1']
pulses = mask.apply(self.frame)
self.assertEquals(len(pulses), 2)
self.assertEquals(len(pulses.values()[0]), 3)
self.assertEquals(len(pulses.values()[1]), 2)
mask = self.frame['Mask2']
pulses = mask.apply(self.frame)
self.assertEquals(len(pulses), 2)
self.assertEquals(len(pulses.values()[0]), 2)
self.assertEquals(len(pulses.values()[1]), 3)
def testCombine(self):
mask1 = self.frame['Mask1']
mask2 = self.frame['Mask2']
combined = mask1 & mask2
pulses = combined.apply(self.frame)
self.assertEquals(len(pulses), 2)
self.assertEquals(len(pulses.values()[0]), 2)
self.assertEquals(len(pulses.values()[1]), 2)
combined = mask1 | mask2
pulses = combined.apply(self.frame)
self.assertEquals(len(pulses), 2)
self.assertEquals(len(pulses.values()[0]), 3)
self.assertEquals(len(pulses.values()[1]), 3)
def testQuery(self):
mask1 = self.frame['Mask1']
self.assertEquals(mask1.any(), True)
self.assertEquals(mask1.all(), False)
self.assertEquals(mask1.sum(), 5)
mask1.set_none()
self.assertEquals(mask1.any(), False)
self.assertEquals(mask1.sum(), 0)
def testEqual(self):
mask1 = self.frame['Mask1']
mask2 = self.frame['Mask2']
mask3 = dataclasses.I3RecoPulseSeriesMapMask(mask1)
self.assertNotEquals(mask1,mask2)
self.assertEquals(mask1.source,mask3.source)
self.assertEquals(mask1.bits,mask3.bits)
self.assertEquals(mask1,mask3)
self.assertEquals(mask1 != mask3,False)
if __name__ == '__main__':
unittest.main()
| [
"aolivas@umd.edu"
] | aolivas@umd.edu |
d405f7741c1355404c6409d8713c69c2ea95bd5b | d873f3e6c322c930068711e795bfc635ed98fd6a | /mainapp/dao/Product/ProductImageDao.py | 2b71855d6d277ecf129805bf13f3abe5a28ce74d | [] | no_license | trunganhvu/personalweb | 3d912366045448b20ec9b7722e9190197cef4f61 | b7afc1a32665e578cbd546b1d2c375597b060914 | refs/heads/master | 2023-08-18T04:30:40.302843 | 2021-09-30T15:46:24 | 2021-09-30T15:46:24 | 401,062,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | from mainapp.model.ProductImage import ProductImage
from datetime import datetime
from django.utils import timezone
def get_all_image_in_product(product_id):
"""
Get all image in product
"""
list_image = ProductImage.objects.filter(product_id=product_id)
return list_image
def get_one_image_in_product(product_id):
"""
Get one image in product
"""
image = ProductImage.objects.filter(product_id=product_id).first()
return image
def get_product_image_by_image_id(product_image_id):
"""
Get product image by image id
"""
product_image = ProductImage.objects.get(pk=product_image_id)
return product_image
def insert_image(product_image):
"""
Insert image
"""
p_image = ProductImage(product_image_name=product_image.product_image_name,
product_image_path=product_image.product_image_path,
product_id=product_image.product_id,
created_at=datetime.now(tz=timezone.utc))
p_image.save()
return p_image
def update_image(product_image):
"""
Update image
"""
p_image = ProductImage.objects.get(pk=product_image.product_image_id)
p_image.product_image_name=product_image.product_image_name
p_image.product_image_path=product_image.product_image_path
p_image.save()
return p_image
def delete_image_by_id(product_image_id):
"""
Delete image by id
"""
p_image = ProductImage.objects.get(pk=product_image_id)
p_image.delete()
def delete_all_product_image_by_product_id(product_id):
"""
Delete product image by product id
"""
p_image = ProductImage.objects.filter(product_id=product_id)
p_image.delete() | [
"vutrunganh1@gmail.com"
] | vutrunganh1@gmail.com |
e36ef19e8a22954393d4d6945028df91e13e2086 | 2432996ac1615cd36d61f0feeff8a359d2b438d8 | /env/lib/python3.8/site-packages/PyInstaller/hooks/rthooks/pyi_rth_glib.py | c8a30ed042ca186466fa26d6db022c72d8a049d1 | [
"Apache-2.0"
] | permissive | Parveshdhull/AutoTyper | dd65d53ece7c13fbc1ead7ce372947483e05e2e3 | 7fabb30e15b770d790b69c2e4eaf9bbf5a4d180c | refs/heads/main | 2023-05-08T14:10:35.404160 | 2023-05-07T20:43:15 | 2023-05-07T20:43:15 | 315,415,751 | 26 | 18 | Apache-2.0 | 2023-05-07T20:43:16 | 2020-11-23T19:13:05 | Python | UTF-8 | Python | false | false | 560 | py | #-----------------------------------------------------------------------------
# Copyright (c) 2015-2020, PyInstaller Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: Apache-2.0
#-----------------------------------------------------------------------------
import os
import sys
os.environ['XDG_DATA_DIRS'] = os.path.join(sys._MEIPASS, 'share')
| [
"parvesh.dhullmonu@gmail.com"
] | parvesh.dhullmonu@gmail.com |
a899d1f17d27c6ae466745f8cc745c25c5c96fe5 | dbeb1e145eba012a200073038d8a8965ae0c6f5d | /MachineLearning/MLaPP/MixtureModels/mixGaussMLvsMAP.py | 29000b8f1da18391afd40be017477335f1fe0a4f | [] | no_license | hellJane/Python_DataAnalysis | b7027cb9d8e75a98b5626a58ee85b64f62c54c9c | 966ee5d732e074e9d124333f13d3e3e23ade1edc | refs/heads/master | 2021-05-17T01:57:24.092791 | 2017-12-01T15:32:32 | 2017-12-01T15:32:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,409 | py | import numpy as np
import scipy.stats as ss
import scipy.linalg as sl
import sklearn.preprocessing as sp
import matplotlib.pyplot as plt
from mixGaussFit import *
np.random.seed(0)
# Generate Data, fixed K = 3
def MakeCov(origMat, D):
m11 = origMat
m21 = np.zeros((D - 2, 2)) # origMat必须为(2, 2)
m12 = np.zeros((2, D - 2))
m22 = np.eye(D - 2)
return np.r_[np.c_[m11, m12], np.c_[m21, m22]]
def GetInitial(D, K=3):
mu_init = np.random.rand(K, D)
mixWeights_init = np.tile(1/K, K)
return mu_init, mixWeights_init
def Sample(D, N=100):
K = 3 # 固定的
mean_1 = np.r_[-1, 1, np.zeros(D-2)]
mean_2 = np.r_[1, -1, np.zeros(D-2)]
mean_3 = np.r_[3, -1, np.zeros(D-2)]
cov_1 = MakeCov([[1, -0.7],
[-0.7, 1]], D)
cov_2 = MakeCov([[1, 0.7],
[0.7, 1]], D)
cov_3 = MakeCov([[1, 0.9],
[0.9, 1]], D)
n = [0.5, 0.3, 0.2] # 采样的数量
x1 = ss.multivariate_normal(mean_1, cov_1).rvs((int)(n[0] * N))
x2 = ss.multivariate_normal(mean_2, cov_2).rvs((int)(n[1] * N))
x3 = ss.multivariate_normal(mean_3, cov_3).rvs((int)(n[2] * N))
x = np.r_[x1, x2, x3]
sigma_init = np.array([cov_1, cov_2, cov_3])
return x, sigma_init # 返回的是数据样本集, 以及协方差的初始值(固定的)
# Fit model with MLE or MAP
def Fit(x, pi, mu, cov, isMAP=False):
success = True
try:
maxIter = 30
cov_old = cov
pi_old = pi
mu_old = mu
prior = MakeNIWPrior(x)
for i in range(maxIter):
if isMAP:
r, pi_new, mu_new, cov_new = EM_MAP(x, pi_old, mu_old, cov_old, prior)
else:
r, pi_new, mu_new, cov_new = EM(x, pi_old, mu_old, cov_old)
#print('{0:-^60}'.format('Iteration: ' + str(i + 1)))
#print('pi: ', pi_new)
if np.allclose(pi_new, pi):
print('converged')
break
pi_old = pi_new
mu_old = mu_new
cov_old = cov_new
except Exception as e:
print(e)
success = False
return success
# Fit with several trials
def GetFailRatio(D, trials=10):
print('D = ', D)
x, cov = Sample(D)
x = sp.StandardScaler().fit_transform(x)
MLE_fail, MAP_fail = 0, 0
for i in range(trials):
mu, pi = GetInitial(D) # 每次尝试,不一样的初始值
if not Fit(x, pi, mu, cov, True):
MAP_fail += 1
if not Fit(x, pi, mu, cov, False):
MLE_fail += 1
print('MLE_fail, MAP_fail: ', MLE_fail, MAP_fail)
return [MLE_fail / trials, MAP_fail / trials]
D = np.arange(10, 101, 10)
ratios = []
for i in range(len(D)):
Di = D[i]
ratios.append(GetFailRatio(Di))
ratios = np.array(ratios)
print('ratios: \n', ratios)
# plots
fig = plt.figure()
fig.canvas.set_window_title("mixGaussMLvsMAP")
plt.subplot()
plt.axis([5, 105, -0.04, 1.04])
plt.xticks(np.arange(10, 101, 10))
plt.yticks(np.arange(0, 1.01, 0.1))
plt.xlabel('dimensionality')
plt.ylabel('fraction of times EM for GMM fails')
plt.plot(D, ratios[:, 0], 'r-', marker='o', fillstyle='none', label='MLE')
plt.plot(D, ratios[:, 1], 'k:', marker='s', fillstyle='none', label='MAP')
plt.legend()
plt.show()
| [
"noreply@github.com"
] | hellJane.noreply@github.com |
994b6e8a30b8b059f3ada223aba127b030d365e9 | fafb89a3552e4dbb47d134966462ef5f3f37f576 | /KEMP/v0.7_pml/fdtd3d/naive/pml.py | f6409279223ecf31ef52a0051bacb14a914258ca | [] | no_license | EMinsight/fdtd_accelerate | 78fa1546df5264550d12fba3cf964838b560711d | a566c60753932eeb646c4a3dea7ed25c7b059256 | refs/heads/master | 2021-12-14T03:26:52.070069 | 2012-07-25T08:25:21 | 2012-07-25T08:25:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,491 | py | from __future__ import division
import numpy as np
from kemp.fdtd3d.util import common
from fields import Fields
class Pml:
def __init__(self, fields, directions, npml=50, sigma_max=0.5, kappa_max=1, alpha_max=0, m_sigma=3, m_alpha=1):
common.check_type('fields', fields, Fields)
common.check_type('directions', directions, (list, tuple), str)
assert len(directions) == 3
for axis in directions:
assert axis in ['+', '-', '+-', '']
# local variables
dt = fields.dt
nx, ny, nz = fields.ns
dtype = fields.dtype
# allocations
psi_xs = [np.zeros((2*npml + 2, ny, nz), dtype) for i in range(4)]
psi_ys = [np.zeros((nx, 2*npml + 2, nz), dtype) for i in range(4)]
psi_zs = [np.zeros((nx, 2*npml + 2, nz), dtype) for i in range(4)]
i_e = np.arange(0.5, npml)
i_h = np.arange(1, npml+1)
sigma_e = sigma_max# * (i_e / npml) ** m_sigma
sigma_h = sigma_max# * (i_h / npml) ** m_sigma
print 'sigma_e', sigma_e
print 'sigma_h', sigma_h
kappa_e = 1 + (kappa_max - 1) * (i_e / npml) ** m_sigma
kappa_h = 1 + (kappa_max - 1) * (i_h / npml) ** m_sigma
alpha_e = alpha_max * ((npml - i_e) / npml) ** m_alpha
alpha_h = alpha_max * ((npml - i_h) / npml) ** m_alpha
com_e = (kappa_e * alpha_e + sigma_e) * dt + 2 * kappa_e
com_h = (kappa_h * alpha_h + sigma_h) * dt + 2 * kappa_h
pca_e = 4 * kappa_e / com_e - 1
pca_h = 4 * kappa_h / com_h - 1
pcb_e = (alpha_e * dt - 2 + 4 * kappa_e) / com_e - 1
pcb_h = (alpha_h * dt - 2 + 4 * kappa_h) / com_h - 1
pcc_e = (alpha_e * dt + 2) / com_e - 1
pcc_h = (alpha_h * dt + 2) / com_h - 1
# global variables
self.mainf = fields
self.directions = directions
self.npml = npml
self.psi_xs = psi_xs
self.psi_ys = psi_ys
self.psi_zs = psi_zs
self.pcs_e = [pca_e, pcb_e, pcc_e]
self.pcs_h = [pca_h, pcb_h, pcc_h]
# append to the update list
self.priority_type = 'pml'
fields.append_instance(self)
def update(self, sl, sls, slc, f1, f2, f3, f4, psi1, psi2, psi3, psi4, pca, pcb, pcc, c1, c2):
'''
print 'sl', sl
print 'sls', sls
print 'f1', f1[sl].shape
print 'c1', c1[sl].shape
print 'psi4', psi4[sls].shape
print 'psi4', psi4[sl].shape
'''
f1[sl] -= c1[sl] * (psi4[sls] - psi4[sl])
f2[sl] += c2[sl] * (psi3[sls] - psi3[sl])
psi1[sl] += pcc[slc] * f1[sl]
psi2[sl] += pcc[slc] * f2[sl]
psi3[sls] = pca[slc] * psi3[sls] + pcb[slc] * f3[sls]
psi4[sls] = pca[slc] * psi4[sls] + pcb[slc] * f4[sls]
def update_e(self):
npml = self.npml
ex, ey, ez, hx, hy, hz = self.mainf.ehs
cex, cey, cez = self.mainf.ces
psi_eyx, psi_ezx, psi_hyx, psi_hzx = self.psi_xs
psi_ezy, psi_exy, psi_hzy, psi_hxy = self.psi_ys
psi_exz, psi_eyz, psi_hxz, psi_hyz = self.psi_zs
pca_h, pcb_h = self.pcs_h[:2]
pcc_e = self.pcs_e[2]
directions = self.directions
sln = slice(None, None)
nax = np.newaxis
if '+' in directions[0]:
sl = (slice(-npml-1, -1), sln, sln)
sls = (slice(-npml, None), sln, sln)
slc = (sln, nax, nax)
self.update(sl, sls, slc, ey, ez, hy, hz, psi_eyx, psi_ezx, psi_hyx, psi_hzx, pca_h, pcb_h, pcc_e, cey, cez)
'''
pca, pcb, pcc = pcs
ey[sl] -= cey[sl] * (psi_hzx[sls] - psi_hzx[sl])
ez[sl] += cez[sl] * (psi_hyx[sls] - psi_hyx[sl])
psi_eyx[sl] += pcc[slc] * ey[sl]
psi_ezx[sl] += pcc[slc] * ez[sl]
psi_hyx[sls] = pca[slc] * psi_hyx[sls] + pcb[slc] * hy[sls]
psi_hzx[sls] = pca[slc] * psi_hzx[sls] + pcb[slc] * hz[sls]
'''
'''
if '-' in directions[0]:
sl = (slice(None, npml), sln, sln)
sls = (slice(1, npml+1), sln, sln)
slc = (slice(None, None, -1), nax, nax)
self.update(sl, sls, slc, pcs, ey, ez, hy, hz, psi_eyx, psi_ezx, psi_hyx, psi_hzx, cey, cez)
if '+' in directions[1]:
sl = (sln, slice(-npml-1, -1), sln)
sls = (sln, slice(-npml, None), sln)
slc = (sln, nax)
self.update(sl, sls, slc, pcs, ez, ex, hz, hx, psi_ezy, psi_exy, psi_hzy, psi_hxy, cez, cex)
if '-' in directions[1]:
sl = (sln, slice(None, npml), sln)
sls = (sln, slice(1, npml+1), sln)
slc = (slice(None, None, -1), nax)
self.update(sl, sls, slc, pcs, ez, ex, hz, hx, psi_ezy, psi_exy, psi_hzy, psi_hxy, cez, cex)
if '+' in directions[2]:
sl = (sln, sln, slice(-npml-1, -1))
sls = (sln, sln, slice(-npml, None))
slc = sln
self.update(sl, sls, slc, pcs, ex, ey, hx, hy, psi_exz, psi_eyz, psi_hxz, psi_hyz, cex, cey)
if '-' in directions[2]:
sl = (sln, sln, slice(None, npml))
sls = (sln, sln, slice(1, npml+1))
slc = slice(None, None, -1)
self.update(sl, sls, slc, pcs, ex, ey, hx, hy, psi_exz, psi_eyz, psi_hxz, psi_hyz, cex, cey)
'''
def update_h(self):
npml = self.npml
ex, ey, ez, hx, hy, hz = self.mainf.ehs
chx, chy, chz = self.mainf.chs
psi_eyx, psi_ezx, psi_hyx, psi_hzx = self.psi_xs
psi_ezy, psi_exy, psi_hzy, psi_hxy = self.psi_ys
psi_exz, psi_eyz, psi_hxz, psi_hyz = self.psi_zs
pca_e, pcb_e = self.pcs_e[:2]
pcc_h = self.pcs_h[2]
directions = self.directions
sln = slice(None, None)
nax = np.newaxis
if '+' in directions[0]:
sl = (slice(-npml, None), sln, sln)
sls = (slice(-npml-1, -1), sln, sln)
slc = (sln, nax, nax)
self.update(sl, sls, slc, hz, hy, ez, ey, psi_hzx, psi_hyx, psi_ezx, psi_eyx, pca_e, pcb_e, pcc_h, chz, chy)
'''
if '-' in directions[0]:
sl = (slice(1, npml+1), sln, sln)
sls = (slice(None, npml), sln, sln)
slc = (slice(None, None, -1), nax, nax)
self.update(sl, sls, slc, pcs, hz, hy, ez, ey, psi_hzx, psi_hyx, psi_ezx, psi_eyx, chz, chy)
if '+' in directions[1]:
sl = (sln, slice(-npml, None), sln)
sls = (sln, slice(-npml-1, -1), sln)
slc = (sln, nax)
self.update(sl, sls, slc, pcs, hx, hz, ex, ez, psi_hxy, psi_hzy, psi_exy, psi_ezy, chx, chz)
if '-' in directions[1]:
sl = (sln, slice(1, npml+1), sln)
sls = (sln, slice(None, npml), sln)
slc = (slice(None, None, -1), nax)
self.update(sl, sls, slc, pcs, hx, hz, ex, ez, psi_hxy, psi_hzy, psi_exy, psi_ezy, chx, chz)
if '+' in directions[2]:
sl = (sln, sln, slice(-npml, None))
sls = (sln, sln, slice(-npml-1, -1))
slc = sln
self.update(sl, sls, slc, pcs, hy, hx, ey, ex, psi_hyz, psi_hxz, psi_eyz, psi_exz, chy, chx)
if '-' in directions[2]:
sl = (sln, sln, slice(1, npml+1))
sls = (sln, sln, slice(None, npml))
slc = slice(None, None, -1)
self.update(sl, sls, slc, pcs, hy, hx, ey, ex, psi_hyz, psi_hxz, psi_eyz, psi_exz, chy, chx)
'''
| [
"kh.kim@kiaps.org"
] | kh.kim@kiaps.org |
2235fd21f94d6254b3b1d3e5f3b629814f5968d0 | 3eff0ac549dd24fbade02d63c3a541ab88db1e5b | /ultimate_python/piglatin/piglatin/urls.py | 36974b0b38594cf07ff90f2517eda8e50821e820 | [] | no_license | lisaolson/udemy | 618410fb548db864b7878de5a2231e8293daa2ad | f40f947f6f79d692748f3efba02176fb360f0c4e | refs/heads/master | 2020-03-28T20:14:23.028759 | 2018-09-18T19:45:32 | 2018-09-18T19:45:32 | 149,051,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py |
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.home, name='home'),
url(r'^translate/', views.translate, name='translate'),
url(r'^about/', views.about, name='about'),
]
| [
"olson.lisa94@gmail.com"
] | olson.lisa94@gmail.com |
ddf8b17e1cfddfb7814b90814bc8b3951bbd659a | e2081f2f873825a3cc8b529614eb784f5cf5e8c5 | /permutations2.py | 01b9d9cb4078de5aa049df6d087af9df69cca105 | [] | no_license | yilinanyu/Leetcode-with-Python | 17b454058c673381dbafa5a2a154c4e84b449399 | a55d2a3e383f858477170effbf8f6454e5dfd218 | refs/heads/master | 2021-01-21T04:55:31.025194 | 2016-07-11T20:10:18 | 2016-07-11T20:10:18 | 36,630,923 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | class Solution:
# @param num, a list of integer
# @return a list of lists of integers
def permuteUnique(self, num):
length = len(num)
if length == 0: return []
if length == 1: return [num]
num.sort()
res = []
previousNum = None
for i in range(length):
if num[i] == previousNum: continue
previousNum = num[i]
for j in self.permuteUnique(num[:i] + num[i+1:]):
res.append([num[i]] + j)
return res | [
"ly783@nyu.edu"
] | ly783@nyu.edu |
1be5bd328ca7a8254b806833e9502b03feb20333 | e71ecfe679dd8c800e8b0960d4ba68e19401a4fc | /get_actual_news_from_rss_ya/webserver/common.py | 7a3cea0336a59bdd1034fefde3a6b74098048a66 | [] | no_license | igizm0/SimplePyScripts | 65740038d36aab50918ca5465e21c41c87713630 | 62c8039fbb92780c8a7fbb561ab4b86cc2185c3d | refs/heads/master | 2021-04-12T10:48:17.769548 | 2017-06-15T18:53:04 | 2017-06-15T18:53:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,908 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# # TODO: костыль для винды, для исправления проблем с исключениями
# # при выводе юникодных символов в консоль винды
# # Возможно, не только для винды, но и для любой платформы стоит использовать
# # эту настройку -- мало какие проблемы могут встретиться
# import sys
# if sys.platform == 'win32':
# import codecs
# sys.stdout = codecs.getwriter(sys.stdout.encoding)(sys.stdout.detach(), 'backslashreplace')
# sys.stderr = codecs.getwriter(sys.stderr.encoding)(sys.stderr.detach(), 'backslashreplace')
DB_FILE_NAME = 'database.sqlite'
def create_connect():
import sqlite3
return sqlite3.connect(DB_FILE_NAME)
def init_db():
# Создание базы и таблицы
connect = create_connect()
try:
connect.executescript('''
CREATE TABLE IF NOT EXISTS News (
id INTEGER PRIMARY KEY,
title TEXT NOT NULL,
url TEXT NOT NULL,
interest TEXT NOT NULL,
is_read BOOLEAN NOT NULL DEFAULT 0,
CONSTRAINT news_url_unique UNIQUE (url)
);
''')
connect.commit()
# NOTE: Пример, когда нужно в таблице подправить схему:
# cursor.executescript('''
# DROP TABLE Game2;
#
# CREATE TABLE IF NOT EXISTS Game2 (
# id INTEGER PRIMARY KEY,
#
# name TEXT NOT NULL,
# price TEXT DEFAULT NULL,
# modify_date TIMESTAMP DEFAULT NULL,
# kind TEXT NOT NULL,
# check_steam BOOLEAN NOT NULL DEFAULT 0
# );
#
# INSERT INTO Game2 SELECT * FROM Game;
#
# DROP TABLE Game;
# ALTER TABLE Game2 RENAME TO Game;
#
# ''')
#
# connect.commit()
finally:
connect.close()
def append_list_news(list_news: [str, str], interest: str):
connect = create_connect()
def insert_news(title, url, interest):
# Для отсеивания дубликатов
has = connect.execute("SELECT 1 FROM News WHERE url = ?", (url,)).fetchone()
if has:
return
print('Добавляю новость "{}" ({})'.format(title, interest))
connect.execute("INSERT OR IGNORE INTO News (title, url, interest) VALUES (?,?,?)", (title, url, interest))
try:
for title, url in list_news:
insert_news(title, url, interest)
connect.commit()
finally:
connect.close()
def get_news_list(interest: str=None, last: int=None) -> ([str, str, str], int):
connect = create_connect()
try:
if interest:
news_list = connect.execute("SELECT title, url, interest from News where interest = ?", (interest,)).fetchall()
else:
news_list = connect.execute("SELECT title, url, interest from News").fetchall()
total = len(news_list)
# TODO: лучше вытаскивать из базы последние <last> записей чем так
if last and last != -1:
news_list = news_list[-last:]
return news_list, total
finally:
connect.close()
def get_news_list_and_mark_as_read(interest: str=None, count: int=None) -> ([str, str, str], int):
connect = create_connect()
try:
if interest:
news_list = connect.execute("SELECT id, title, url, interest from News where interest = ? and is_read = 0",
(interest,)).fetchall()
else:
news_list = connect.execute("SELECT id, title, url, interest from News where is_read = 0").fetchall()
# Всего непрочитанных новостей
total = len(news_list)
# TODO: лучше вытаскивать из базы <count> записей чем так
if count and count != -1:
news_list = news_list[:count]
# Осталось непрочитанных новостей
total -= len(news_list)
# Устанавливаем новостям флаг того что они прочитаны
for _id, _, _, interest in news_list:
connect.execute("UPDATE News SET is_read = 1 WHERE id = ?", (_id,))
connect.commit()
return [(title, url, interest) for _id, title, url, interest in news_list], total
finally:
connect.close()
def reset_all_is_read():
connect = create_connect()
try:
connect.execute("UPDATE News SET is_read = 0")
connect.commit()
finally:
connect.close()
| [
"ilya.petrash@inbox.ru"
] | ilya.petrash@inbox.ru |
ffe374573d512fb90b5fbf3f362cd00fdf9f63c2 | d8820bda3cfa93151255cd07290332dd50cb3ae4 | /videos/utils.py | 0b223f7d5fe234e867666b39f953345d0cede7cb | [] | no_license | defance/video-app | 249aae4f81a4b89ce4b8ddadbf43332a05beb137 | c8c3128dbd41a651d26cba0022d80bb644eaaf8a | refs/heads/master | 2021-01-23T06:20:29.072852 | 2017-06-01T07:32:00 | 2017-06-01T07:32:00 | 93,020,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,495 | py | from __future__ import unicode_literals
from collections import defaultdict
from django.core.files.storage import DefaultStorage
from django.utils.translation import pgettext_lazy as _p, ugettext_lazy as _u
from re import search as re_search
from subprocess import check_output
from .models import Video
TIME_DICT = {
'h': (_p('duration', 'hrs'), None),
'm': (_p('duration', 'min'), None),
's': (_p('duration', 'sec'), '.2'),
}
def get_video_info(video):
"""
Execute ffprobe and extract file info in following format:
>> duration=XX:XX:XX.XXXXXXX
>> height=XXX
>> width=XXX
Return output as string.
:param video: (Video) Video to extract info about
:return: (str) Extracted info of duration, height and width
"""
return check_output([
'ffprobe', '-v', 'error',
'-show_entries', 'format=duration', '-sexagesimal',
'-show_entries', 'stream=height,width',
'-of', 'default=noprint_wrappers=1', video.video.path
])
def extract_raw_duration_info(output):
"""
Extract rew duration info from ffprobe output. Returns str containing video
duration in format:
>> hours:minutes:seconds.micros
:param output: (str) Previous output of ffprobe
:return: (str) Str with durations
"""
re_exp = 'duration=(\d*:\d*:\d*\.\d*)'
re_res = re_search(re_exp, output)
return re_res.groups()[0] if re_res is not None else None
def extract_duration_info(output):
"""
Extract duration info from ffprobe output. Returns dict containing video
duration in hours (h), minutes (m) and seconds (s).
:param output: (str) Previous output of ffprobe
:return: (dict: str => float) Dict with durations
"""
re_exp = '(?P<h>\d*):(?P<m>\d*):(?P<s>\d*.\d*)'
re_res = re_search(re_exp, output or '')
return dict(map(
lambda (key, val): (key, float(val)),
re_res.groupdict().items()
)) if re_res else {}
def get_duration_str(info):
"""
Builds duration string with duration info.
Note: it is language-dependant.
:param info: (dict: str -> float)
:return: (str)
"""
if not info:
return _u('Unknown')
base_str = "{{dur{pr}}} {{desc}}"
# This probably should be split into 3 modules, though all tests are ok
def get_desc(dur):
if not info.get(dur, 0):
return None
desc, precision = TIME_DICT.get(dur, (None, None))
if desc is None:
return None
precision = ":{}f".format(precision) if precision else ''
return base_str.format(pr=precision).format(dur=info[dur], desc=desc)
return ' '.join(filter(None, map(get_desc, ['h', 'm', 's']))) or \
_u('Unknown')
def generate_video_thumbnail(video, size=150):
"""
Executes ffmpeg to create thumbnail and saves it to static with a name of:
>> media_dir/preview/video_id.png
:param video: (Video) Video to create preview of
:param size: (int) Dimensional size (in px) of thumbnail, greater than 0,
default 150
:return: Nothing
"""
storage = DefaultStorage()
short_name = storage.get_available_name('preview/%s.png' % video.id)
filename = storage.path(short_name)
check_output([
'convert', video.video.path+'[1]',
'-resize', '{size}x{size}^'.format(size=size),
'-gravity', 'center',
'-extent', '{size}x{size}'.format(size=size),
filename
])
video.preview = short_name
def process_video(video):
try:
video.status = 'processing'
video.save()
output = get_video_info(video)
video.duration = extract_raw_duration_info(output)
generate_video_thumbnail(video)
video.status = 'ready'
video.save()
except Exception:
video.status = 'error'
video.save()
return False
return True
def process_videos(process=False):
"""
Process videos. Update its duration and create thumbnail.
:param process: (bool) Whether do the actual processing
:return: (int) Number of videos processed
"""
videos = Video.objects.filter(status__in=['loaded', 'queued'])
report = defaultdict(int)
for video in videos:
if video.status != 'queued':
video.status = 'queued'
video.save()
for video in videos:
if video.status == 'queued':
report[process_video(video) if process else True] += 1
return report
| [
"defance@gmail.com"
] | defance@gmail.com |
659fef29c9cd67a7f70fdb47f11176ca70521932 | 4c76c88f6421abb52a9e68ae48d33f32b0fcf5af | /google_api/run.py | b8dd4c7394f9e5d4177cc85794e3d903ab8a0d13 | [
"Apache-2.0"
] | permissive | aperturetechnology/starthinker | 76ba1d8883dbcf32eff4164f57f4342d0b912b70 | fd2d70e39f05cb29afc65b8a78ea38441e1e2b9a | refs/heads/master | 2020-04-09T22:19:14.752457 | 2018-11-27T13:52:49 | 2018-11-27T13:52:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | ###########################################################################
#
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from util.project import project
from util.google_api import API
from util.data import put_rows
def google_api():
if project.verbose: print 'GOOGLE_API', project.task['api'], project.task['version']
results = API(project.task).execute()
put_rows(
project.task['auth'],
project.task['out'],
'%s_%s.json' % (project.task['function'].replace('.', '_'), project.date),
results
)
if __name__ == "__main__":
project.load('google_api')
google_api()
| [
"kenjora@kenjora-macbookair.roam.corp.google.com"
] | kenjora@kenjora-macbookair.roam.corp.google.com |
f0a35b9b28d1264951cbe6cb5db8880991747ed4 | d6d5a3e94b7c9762a08b9c6e986ea5c4d78bcd64 | /problems/543/test.py | 49f8f23776892759d236a9495951ad1a13303c91 | [] | no_license | neuxxm/leetcode | 46a9c247982834d769731b06cb9e9587f68702a5 | fd6c8082f81bcd9eda084b347c77fd570cfbee4a | refs/heads/master | 2023-02-10T22:37:18.966470 | 2021-01-07T08:48:50 | 2021-01-07T08:48:50 | 265,777,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#16:18-16:21
def f(x, z):
if x == None:
return 0
l = f(x.left, z)
r = f(x.right, z)
t = l + r
if t > z[0]:
z[0] = t
return max(l, r) + 1
class Solution(object):
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
z = [0]
f(root, z)
return z[0]
| [
"neuxxm@gmail.com"
] | neuxxm@gmail.com |
ac0286e178de05ae2d4c2c5440ca018bc646c444 | 0e51d1be69b26a4bc2020db597b187b8b4784614 | /OOP Prep Exam 22 Aug 2020/project/rooms/room.py | eb117f0a7b64c7b6aefe8313f1b8fd37421631cb | [] | no_license | dplamen/04_Python_OOP | 81dbc21095ca776d9ce034dbe9959ca4903c8d82 | cb0880a70c903e252958587d7051527527f57af4 | refs/heads/main | 2023-09-03T03:15:28.313039 | 2021-11-13T16:51:27 | 2021-11-13T16:51:27 | 427,719,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | from project.appliances.appliance import Appliance
from project.people.child import Child
class Room:
def __init__(self, name, budget, members_count):
self.family_name = name
self.budget = budget
self.members_count = members_count
self.children = []
self.expenses = 0
@property
def expenses(self):
return self.__expenses
@expenses.setter
def expenses(self, value):
if value < 0:
raise ValueError("Expenses cannot be negative")
self.__expenses = value
def calculate_expenses(self, *args):
total_expenses = 0
for list_el in args:
for el in list_el:
if isinstance(el, Appliance):
total_expenses += el.get_monthly_expense()
elif isinstance(el, Child):
total_expenses += el.cost * 30
self.__expenses = total_expenses
| [
"plamenkostov@gmail.com"
] | plamenkostov@gmail.com |
aa0ed51e33bc948e2bb33149fca8c8d598babdae | 3902cee7d59ef823a4839576f984c63452dd0d23 | /Code/pso_v1_1.py | be60b6b90cb4669afa4acc58f3b006da7e8f8314 | [
"MIT"
] | permissive | zaman13/Particle-Swarm-Optimization-PSO-using-Python | 36a6f2afcf69fdd6d790b2f3a7f11a6a00a849d9 | 0b43df807182de993be4675cf683676f43531dd3 | refs/heads/master | 2023-01-12T07:23:37.686017 | 2022-12-27T04:06:28 | 2022-12-27T04:06:28 | 255,481,106 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,627 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 11 17:48:18 2020
@author: Mohammad Asif Zaman
Particle swarm optimization code
- General code, would work with fitness function of any dimensions (any no. of parameters)
- Vectorized fast code. Only one for loop is used to go over the iterations. Calculations
over all the dimensions and particles are done using matrix operations.
- One function call per iteration.
Tested in python 2.7.
v1_1: Added average fitness and average pbest plots for checking convergence
"""
from __future__ import print_function
import time
import math
import numpy as np
import pylab as py
py.rcParams.update({'font.size': 14})
# Control parameters
w = 0.5 # Intertial weight. In some variations, it is set to vary with iteration number.
c1 = 2.0 # Weight of searching based on the optima found by a particle
c2 = 2.0 # Weight of searching based on the optima found by the swarm
v_fct = 1 # Velocity adjust factor. Set to 1 for standard PSO.
Np = 40 # population size (number of particles)
D = 4 # dimension (= no. of parameters in the fitness function)
max_iter = 100 # maximum number of iterations
xL = np.zeros(D) - 4 # lower bound (does not need to be homogeneous)
xU = np.zeros(D) + 4 # upper bound (does not need to be homogeneous)
# Fitness function. The code maximizes the value of the fitness function
def fitness(x):
# x is a matrix of size D x Np
# The position of the entire swarmp is inputted at once.
# Thus, one function call evaluates the fitness value of the entire swarm
# F is a vector of size Np. Each element represents the fitness value of each particle in the swarm
F_sphere = 2.0 - np.sum(np.multiply(x,x),0) # modified sphere function
return F_sphere
pbest_val = np.zeros(Np) # Personal best fintess value. One pbest value per particle.
gbest_val = np.zeros(max_iter) # Global best fintess value. One gbest value per iteration (stored).
pbest = np.zeros((D,Np)) # pbest solution
gbest = np.zeros(D) # gbest solution
gbest_store = np.zeros((D,max_iter)) # storing gbest solution at each iteration
pbest_val_avg_store = np.zeros(max_iter)
fitness_avg_store = np.zeros(max_iter)
x = np.random.rand(D,Np) # Initial position of the particles
v = np.zeros((D,Np)) # Initial velocity of the particles
# Setting the initial position of the particles over the given bounds [xL,xU]
for m in range(D):
x[m,:] = xL[m] + (xU[m]-xL[m])*x[m,:]
# Initial evaluations (for iteration = 0)
# Function call. Evaluates the fitness of the initial swarms
fit = fitness(x) # vector of size Np
pbest_val = np.copy(fit) # initial personal best = initial fitness values. Vector of size Np
pbest = np.copy(x) # initial pbest solution = initial position. Matrix of size D x Np
# Calculating gbest_val and gbest. Note that gbest is the best solution within pbest
ind = np.argmax(pbest_val) # index where pbest_val is maximum.
gbest_val[0] = np.copy(pbest_val[ind]) # set initial gbest_val
gbest = np.copy(pbest[:,ind])
pbest_val_avg_store[0] = np.mean(pbest_val)
fitness_avg_store[0] = np.mean(fit)
print("Iter. =", 0, ". gbest_val = ", gbest_val[0])
print("gbest_val = ",gbest_val[0])
# Loop over the generations
for iter in range(1,max_iter):
r1 = np.random.rand(D,Np) # random numbers [0,1], matrix D x Np
r2 = np.random.rand(D,Np) # random numbers [0,1], matrix D x Np
v_global = np.multiply(((x.transpose()-gbest).transpose()),r2)*c2*(-1.0) # velocity towards global optima
v_local = np.multiply((pbest- x),r1)*c1 # velocity towards local optima (pbest)
v = w*v + (v_local + v_global) # velocity update
x = x + v*v_fct # position update
fit = fitness(x) # fitness function call (once per iteration). Vector Np
# pbest and pbest_val update
ind = np.argwhere(fit > pbest_val) # indices where current fitness value set is greater than pbset
pbest_val[ind] = np.copy(fit[ind]) # update pbset_val at those particle indices where fit > pbest_val
pbest[:,ind] = np.copy(x[:,ind]) # update pbest for those particle indices where fit > pbest_val
# gbest and gbest_val update
ind2 = np.argmax(pbest_val) # index where the fitness is maximum
gbest_val[iter] = np.copy(pbest_val[ind2]) # store gbest value at each iteration
gbest = np.copy(pbest[:,ind2]) # global best solution, gbest
gbest_store[:,iter] = np.copy(gbest) # store gbest solution
pbest_val_avg_store[iter] = np.mean(pbest_val)
fitness_avg_store[iter] = np.mean(fit)
print("Iter. =", iter, ". gbest_val = ", gbest_val[iter]) # print iteration no. and best solution at each iteration
# Plotting
py.close('all')
py.figure(1)
py.plot(gbest_val,label = 'gbest_val')
py.plot(pbest_val_avg_store, label = 'Avg. pbest')
py.plot(fitness_avg_store, label = 'Avg. fitness')
py.legend()
py.xlabel('iterations')
py.ylabel('fitness, gbest_val')
py.figure(2)
for m in range(D):
py.plot(gbest_store[m,:],label = 'D = ' + str(m+1))
py.legend()
py.xlabel('iterations')
py.ylabel('Best solution, gbest[:,iter]')
| [
"39745895+zaman13@users.noreply.github.com"
] | 39745895+zaman13@users.noreply.github.com |
337ede14a2180d4c5958f83d5e10ef2915d5266b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/295/66182/submittedfiles/testes.py | 3d79ec34943c90627f64c66fde2d551806cdeeb5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
print("Marlon Anderson Leandro de Lima Filho")
print("19")
print(11+1037)
print(9*35+160)/5
print("2.356,1925")
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
294e700bd5ebe76fe25fc3abeb124fb10fea0aae | 123e2e28017973eefedaffb273cb3a5164f582c5 | /tests/integrations/helpers.py | 68f57836c2a9a1c3fe727dc6531340c2feed0752 | [
"MIT"
] | permissive | pythonindia/junction | ef4c0bf64f8c396edd2407f6d91444ab60a36b02 | 208d1757bf39c4727cf78b52cd2285e902eec84d | refs/heads/master | 2023-08-17T09:30:50.961028 | 2023-08-10T06:44:34 | 2023-08-10T06:44:34 | 27,966,694 | 209 | 226 | MIT | 2023-08-10T06:44:35 | 2014-12-13T16:40:17 | Python | UTF-8 | Python | false | false | 241 | py | # -*- coding: utf-8 -*-
def assert_template_used(response, template_name):
res = False
for template in response.templates:
if template.name == template_name:
res = True
break
assert res is True
| [
"me@kracekumar.com"
] | me@kracekumar.com |
92531f86d7ff93e73ab54d1ab9712ecb3d1b4f22 | 67cf6d0e91253107a7d3a3dd879a31dcc1f5b36f | /0x0F-python-object_relational_mapping/3-my_safe_filter_states.py | 0bf2a4dcb156ae06009396d036ad114a99052c05 | [] | no_license | nzomobrian/holbertonschool-higher_level_programming | dd1646a2b8ccf21ecf41c39efbe8f7dac2771065 | 908ec393c1a3591dde500c10e8eb9c73e35d57f8 | refs/heads/master | 2023-03-19T00:16:15.893949 | 2020-05-15T00:55:06 | 2020-05-15T00:55:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | #!/usr/bin/python3
# Lists all states that match the argument given without injection
import sys
import MySQLdb
if __name__ == "__main__":
db = MySQLdb.connect(host="localhost", port=3306, user=sys.argv[1],
passwd=sys.argv[2], db=sys.argv[3])
cur = db.cursor()
cur.execute("SELECT id, name FROM states WHERE BINARY name=%s \
ORDER BY id", (sys.argv[4],))
states = cur.fetchall()
for state in states:
print((state[0], state[1]))
| [
"keener4christ@gmail.com"
] | keener4christ@gmail.com |
14d2fd5064686d8fcb86c06141f63c75e0dbfe99 | 8a82a83655f118208692e55d7804d9fa480ad4b6 | /src/dg/python_nlp/homework/ch09.py | df71319e81e7ee778a578cc8328adc2ac31535aa | [] | no_license | xenron/sandbox-da-python | 0814159da9a91923e4b66c5e40057e381f765e96 | ab8f1c0d57fdc6006355f613012b84165068c315 | refs/heads/master | 2020-04-12T05:41:33.182110 | 2016-12-14T22:57:33 | 2016-12-14T22:57:33 | 60,324,979 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,933 | py | # -*- coding: utf-8 -*-
from nltk.corpus import wordnet as wn
import nltk
from nltk.corpus import *
from matplotlib import pylab
from nltk import word_tokenize
import re
import jieba
import jieba.posseg
import jieba.analyse
from nltk.corpus import brown
import nltk
#2. ○ Write a tag pattern to match noun phrases containing plural head nouns, e.g.,
#many/JJ researchers/NNS, two/CD weeks/NNS, both/DT new/JJ positions/NNS. Try
#to do this by generalizing the tag pattern that handled singular noun phrases.
#2. ○写一个标记模式匹配包含复数中心名词在内的名词短语,如many/JJ researchers
#/NNS, two/CD weeks/NNS, both/DT new/JJ positions/NNS。通过泛化处理单
#数名词短语的标记模式,尝试做这个。
textchunk = [("many", "JJ"), ("researchers", "NNS"), ("two", "CD"), ("weeks", "NNS"), ("both","DT"), ("new", "JJ"), ("positions", "NNS")]
corpus = nltk.RegexpParser("NP:{<DT>?<CD>?<JJ>*<NNS>}")
result = corpus.parse(textchunk)
print result
result.draw()
#6. ◑ Write one or more tag patterns to handle coordinated noun phrases, e.g., July/
#NNP and/CC August/NNP, all/DT your/PRP$ managers/NNS and/CC supervisors/NNS,
#company/NN courts/NNS and/CC adjudicators/NNS.
#6. ◑写一个或多个标记模式处理有连接词的名词短语,如:July/NNP and/CC August
#/NNP,all/DT your/PRP$ managers/NNS and/CC supervisors/NNS,compa
#ny/NN courts/NNS and/CC adjudicators/NNS。
textchunk = [("July","NNP"), ("and","CC"), ("August","NNP"), ("all", "DT"), ("your", "PRP$"), ("managers", "NNS"), ("and", "CC"), ("supervisors", "NNS"), ("company","NN"), ("courts","NNS"), ("and","CC"), ("adjudicators","NNS")]
corpus = nltk.RegexpParser(" Coordinated noun: {<NNP><CC><NNP>|<DT><PRP\$><NNS><CC><NNS>|<NN><NNS><CC><NNS>}")
result = corpus.parse(textchunk)
print result
result.draw()
#7. ◑ Carry out the following evaluation tasks for any of the chunkers you have developed
#earlier. (Note that most chunking corpora contain some internal inconsistencies,
#such that any reasonable rule-based approach will produce errors.)
#a. Evaluate your chunker on 100 sentences from a chunked corpus, and report
#the precision, recall, and F-measure.
#b. Use the chunkscore.missed() and chunkscore.incorrect() methods to identify
#the errors made by your chunker. Discuss.
#c. Compare the performance of your chunker to the baseline chunker discussed
#in the evaluation section of this chapter.
#7. ◑用任何你之前已经开发的分块器执行下列评估任务。(请注意,大多数分块语料库包
#含一些内部的不一致,以至于任何合理的基于规则的方法都将产生错误。)
#a. 在来自分块语料库的100 个句子上评估你的分块器,报告精度、召回率和F 量度。
#b. 使用chunkscore.missed()和chunkscore.incorrect()方法识别你的分块器的
#错误,并讨论它。
#c. 与本章的评估部分讨论的基准分块器比较你的分块器的性能。
from nltk.corpus import conll2000
test_sents = conll2000.chunked_sents('test.txt', chunk_types=['NP'])[:100]
print len(test_sents)
# 不使用语法规则的分快器
cp = nltk.RegexpParser("")
print cp.evaluate(test_sents)
cp = nltk.RegexpParser('CHUNK: {<V.*> <TO> <V.*>}')
print cp.evaluate(test_sents)
cp = nltk.RegexpParser('NP: {<NN>+}')
print cp.evaluate(test_sents)
grammar = r"NP: {<[CDJNP].*>+}"
cp = nltk.RegexpParser(grammar)
print cp.evaluate(test_sents)
#使用unigram标注器对名词短语分块
class UnigramChunker(nltk.ChunkParserI):
def __init__(self, train_sents):
train_data = [[(t,c) for w,t,c in nltk.chunk.tree2conlltags(sent)]
for sent in train_sents]
self.tagger = nltk.UnigramTagger(train_data)
def parse(self, sentence):
pos_tags = [pos for (word,pos) in sentence]
tagged_pos_tags = self.tagger.tag(pos_tags)
chunktags = [chunktag for (pos, chunktag) in tagged_pos_tags]
conlltags = [(word, pos, chunktag) for ((word,pos),chunktag)
in zip(sentence, chunktags)]
return nltk.chunk.conlltags2tree(conlltags)
test_sents = conll2000.chunked_sents('test.txt', chunk_types=['NP'])
train_sents = conll2000.chunked_sents('train.txt', chunk_types=['NP'])
unigram_chunker = UnigramChunker(train_sents)
print unigram_chunker.evaluate(test_sents)
#17. ● An n-gram chunker can use information other than the current part-of-speech
#tag and the n-1 previous chunk tags. Investigate other models of the context, such
#as the n-1 previous part-of-speech tags, or some combination of previous chunk
#tags along with previous and following part-of-speech tags.
#17. ●一个n-gram 分块器可以使用除当前词性标记和n-1 个前面的块的标记以外其他信息。
#调查其他的上下文模型,如n-1 个前面的词性标记,或一个写前面块标记连同前面和后
#面的词性标记的组合。
| [
"xenron@hotmail.com"
] | xenron@hotmail.com |
88003732df9aca53f2788ef6179f052a5afd181e | 52585c8d95cef15199c18ba1a76899d2c31329f0 | /01Learn Python The Hard Way/11_input.py | f05f4e063f81a0eb96aa73d24c9841a108683cd7 | [] | no_license | greatabel/PythonRepository | c7a952257303a21083ed7d535274c339362bd126 | 836fcdd3f5c1b150122302685104fe51b5ebe1a3 | refs/heads/master | 2023-08-30T15:56:05.376391 | 2023-08-26T03:34:14 | 2023-08-26T03:34:14 | 29,392,599 | 33 | 6 | null | 2023-02-14T13:33:21 | 2015-01-17T13:54:58 | Python | UTF-8 | Python | false | false | 428 | py | print "How old are you?",
age=raw_input()
print "how tall are your?",
height=raw_input()
print "how much do you weight?",
weight=raw_input()
print "So, you're %r old, %r tall and %r heavy." % (
age, height, weight)
print "How old are you?"
age=raw_input()
print "how tall are your?"
height=raw_input()
print "how much do you weight?"
weight=raw_input()
print "So, you're %r old, %r tall and %r heavy." % (
age, height, weight)
| [
"greatabel1@126.com"
] | greatabel1@126.com |
0c674243effa19ac035cb6a58cd3db0be2c5155b | 25d2afe5d12fe58a97da7b51e23fdc55929e38f5 | /create_nt_error_subset_results.py | 1ebafa06051b3e413a1190b4c64414c17a83fbab | [] | no_license | apmoore1/tdsa_comparisons | 071396efe0c5e0bad297119d2ce48bf0c1cbb42f | ba613afece15239e6a38f277c455a035739f0b2d | refs/heads/master | 2021-06-23T16:00:49.803589 | 2021-05-25T09:32:53 | 2021-05-25T09:32:53 | 225,565,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,623 | py | from pathlib import Path
import pandas as pd
from target_extraction.data_types import TargetTextCollection
from target_extraction.analysis.sentiment_error_analysis import (ERROR_SPLIT_SUBSET_NAMES,
error_split_df, PLOT_SUBSET_ABBREVIATION,
error_analysis_wrapper,
subset_name_to_error_split)
from target_extraction.analysis.sentiment_metrics import accuracy
from target_extraction.analysis.util import add_metadata_to_df
def parse_path(path_string: str) -> Path:
path_string = Path(path_string).resolve()
return path_string
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('results_dir', type=parse_path,
help='Directory that contain results for each dataset')
parser.add_argument('save_fp', type=parse_path,
help='File path to save the results too')
args = parser.parse_args()
results_dir = args.results_dir
save_fp = args.save_fp
# Get the data
data_splits = ['test', 'val']
dataset_names = ['election', 'laptop', 'restaurant']
index_keys = ['prediction key', 'run number']
tssr_func = error_analysis_wrapper('TSSR')
ds_func = error_analysis_wrapper('DS')
nt_func = error_analysis_wrapper('NT')
all_relevant_error_funcs = [tssr_func, ds_func, nt_func]
splits = ['test', 'val']
dataset_names = ['Laptop', 'Restaurant', 'Election']
all_dfs = []
relevant_prediction_keys = ['predicted_target_sentiment_AE_GloVe_None_None',
'predicted_target_sentiment_CNN_GloVe_None_None',
'predicted_target_sentiment_IAN_GloVe_None_None',
'predicted_target_sentiment_TDLSTM_GloVe_None_None']
nt_error_names = ERROR_SPLIT_SUBSET_NAMES['NT']
ds_error_names = ERROR_SPLIT_SUBSET_NAMES['DS']
tssr_error_names = ERROR_SPLIT_SUBSET_NAMES['TSSR']
reduced_collection_subset_names = ds_error_names + tssr_error_names
nt_split_subsets = {'NT': ERROR_SPLIT_SUBSET_NAMES['NT']}
import time
overall_time = time.time()
for dataset_name in dataset_names:
print(f'Dataset {dataset_name}')
for split in splits:
one_time = time.time()
print(f'Data Split {split}')
data_fp = Path(results_dir, f'{dataset_name.lower()}_dataset',
f'{split}.json')
dataset = TargetTextCollection.load_json(data_fp)
for error_func in all_relevant_error_funcs:
error_func(None, dataset, True)
for reduced_collection_subset_name in reduced_collection_subset_names:
temp_df = error_split_df(None, dataset, relevant_prediction_keys,
'target_sentiments', nt_split_subsets, accuracy,
{'ignore_label_differences': True},
include_dataset_size=True,
collection_subsetting=[[reduced_collection_subset_name]],
table_format_return=False)
temp_df = add_metadata_to_df(temp_df, dataset,
'predicted_target_sentiment_key')
temp_df['reduced collection subset'] = reduced_collection_subset_name
temp_df['Dataset'] = dataset_name
temp_df['Split'] = split.capitalize()
all_dfs.append(temp_df)
print(time.time() - one_time)
print(f'total time {time.time() - overall_time}')
all_dfs = pd.concat(all_dfs, 0, ignore_index=True)
temp_dfs = all_dfs.copy(deep=True)
all_subset_names = [name for subset_names in ERROR_SPLIT_SUBSET_NAMES.values()
for name in subset_names]
temp_dfs['Reduced Error Split'] = temp_dfs.apply(lambda x: subset_name_to_error_split(x['reduced collection subset']), 1)
temp_dfs['Metric'] = temp_dfs['Metric'] * 100
temp_dfs = temp_dfs.rename(columns={'Metric': 'Accuracy'})
temp_dfs['NT'] = temp_dfs.apply(lambda x: PLOT_SUBSET_ABBREVIATION[x['subset names']], 1)
temp_dfs['Subset By'] = temp_dfs.apply(lambda x: PLOT_SUBSET_ABBREVIATION[x['reduced collection subset']], 1)
temp_dfs = temp_dfs.drop(columns=['reduced collection subset', 'subset names'])
temp_dfs.to_csv(save_fp, sep='\t') | [
"andrew.p.moore94@gmail.com"
] | andrew.p.moore94@gmail.com |
f9bda7505e7d23ead634575aa13665b1f86033de | c619ea6c1663c6ba0614b4dd63806cda5489adee | /wwtool/datasets/utils.py | 40da33f74a28ed219dced7e76fd44b7122a94162 | [] | no_license | ZhangRuixiang-WHU/wwtool | 35107e36fc4cce892c6d0c096ee90cbf8e1eeb97 | be87af3ad49a3befb331b2530e0cfdd5dd479a4a | refs/heads/master | 2020-12-02T15:26:22.383469 | 2019-12-25T15:02:42 | 2019-12-25T15:02:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,558 | py | import os
import numpy as np
import shutil
import wwtool
def shuffle_dataset(origin_dataset_dir, trainval_dir, test_dir, trainval_rate=0.8, image_format='.png', label_format='.txt', seed=0):
"""Generate trainval and test sets from origin set by copying files randomly.
Arguments:
origin_dataset_dir {str} -- path of origin dataset, contains `images` and `labels` folds
trainval_dir {str} -- path of trainval set, contains `images` and `labels` folds
test_dir {str} -- path of test set, contains `images` and `labels` folds
seed {int} -- seed of random function
Returns:
None
"""
np.random.seed(seed)
src_label_path = os.path.join(origin_dataset_dir, 'labels')
src_image_path = os.path.join(origin_dataset_dir, 'images')
trainval_dst_label_path = os.path.join(trainval_dir, 'labels')
wwtool.mkdir_or_exist(trainval_dst_label_path)
trainval_dst_image_path = os.path.join(trainval_dir, 'images')
wwtool.mkdir_or_exist(trainval_dst_image_path)
test_dst_label_path = os.path.join(test_dir, 'labels')
wwtool.mkdir_or_exist(test_dst_label_path)
test_dst_image_path = os.path.join(test_dir, 'images')
wwtool.mkdir_or_exist(test_dst_image_path)
file_names = [label_file.split('.txt')[0] for label_file in os.listdir(src_label_path)]
file_names = sorted(file_names)
np.random.shuffle(file_names)
trainval_file_names = file_names[0 : int(len(file_names) * trainval_rate)]
test_file_names = file_names[int(len(file_names) * trainval_rate):]
for trainval_file_name in trainval_file_names:
print("From {} to {}.".format(os.path.join(src_label_path, trainval_file_name), os.path.join(trainval_dst_label_path, trainval_file_name)))
shutil.copy(os.path.join(src_label_path, trainval_file_name + label_format), os.path.join(trainval_dst_label_path, trainval_file_name + label_format))
shutil.copy(os.path.join(src_image_path, trainval_file_name + image_format), os.path.join(trainval_dst_image_path, trainval_file_name + image_format))
for test_file_name in test_file_names:
print("From {} to {}.".format(os.path.join(src_label_path, test_file_name), os.path.join(test_dst_label_path, test_file_name)))
shutil.copy(os.path.join(src_label_path, test_file_name + label_format), os.path.join(test_dst_label_path, test_file_name + label_format))
shutil.copy(os.path.join(src_image_path, test_file_name + image_format), os.path.join(test_dst_image_path, test_file_name + image_format))
| [
"jwwangchn@outlook.com"
] | jwwangchn@outlook.com |
157478c0e27dedf77864a8acc05f49de4067093d | 40f4908483b98fc4f370ff4f2d520e1284d045b3 | /phase02/immortals_repo/harness/pymmortals/generated/com/securboration/immortals/ontology/fm/feature/featureselectioncriterion.py | 9cab2e071f0f03b3b5559b965410bbe4239d1247 | [] | no_license | TF-185/bbn-immortals | 7f70610bdbbcbf649f3d9021f087baaa76f0d8ca | e298540f7b5f201779213850291337a8bded66c7 | refs/heads/master | 2023-05-31T00:16:42.522840 | 2019-10-24T21:45:07 | 2019-10-24T21:45:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from enum import Enum
# noinspection PyPep8Naming
class FeatureSelectionCriterion(Enum):
SELECT_ALL = 'SELECT_ALL'
SELECT_ZERO_OR_MORE = 'SELECT_ZERO_OR_MORE'
SELECT_ONE_OR_MORE = 'SELECT_ONE_OR_MORE'
SELECT_EXACTLY_ONE = 'SELECT_EXACTLY_ONE'
| [
"awellman@bbn.com"
] | awellman@bbn.com |
434de5a96c6a7a561dffb0b43c686a35b6c46e0e | d7b89e6090759d242f0afc8fd5d9f5200c17371a | /20190904/BOJ_12100_kyeong.py | 384dd43ab88deea6efbc4bf457ad2d902a7dedd8 | [] | no_license | JISU-JEONG/algorithm- | 545c5f758d2ca15d2d59a706ab75be8fb71e39a6 | 02ff7df63dd8a8518005d37a2edec67d158ae6db | refs/heads/master | 2020-09-13T14:15:59.919347 | 2019-11-19T23:57:40 | 2019-11-19T23:57:40 | 222,812,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,745 | py | import sys
sys.stdin = open('12100.txt')
from pprint import pprint
from itertools import product as pd
from copy import deepcopy
def rotate(a):
n = len(a)
copy = [[0] * n for _ in range(n)]
for i in range(n):
for j in range(n):
copy[j][n-1-i] = a[i][j]
return copy
def push(a):
for i in range(N):
for j in range(N):
if a[i][j] == 0:
a[i].insert(a[i].pop(j),0)
def add(a):
for i in range(N):
for j in range(N-1,0,-1):
if a[i][j] ==a[i][j-1]:
a[i][j] *= 2
a[i].pop(j-1)
a[i].insert(0,0)
def mymax(a):
mxtmp = 0
for i in range(N):
if mxtmp < max(a[i]):
mxtmp = max(a[i])
return mxtmp
def solve(S, board):
if S == 1:
board = rotate(board)
push(board)
add(board)
board = rotate(board)
board = rotate(board)
board = rotate(board)
elif S == 2:
board = rotate(board)
board = rotate(board)
board = rotate(board)
push(board)
add(board)
board = rotate(board)
elif S == 3:
board = rotate(board)
board = rotate(board)
push(board)
add(board)
board = rotate(board)
board = rotate(board)
else:
push(board)
add(board)
return board
# 1,u 2,d,3,l
N = int(input())
board = []
tmp = []
S = 1
for i in range(N):
board.append(list(map(int, input().split())))
original = deepcopy(board)
for i in list(pd([1,2,3,4],[1,2,3,4],[1,2,3,4],[1,2,3,4],[1,2,3,4])):
new_board = original
for j in range(5):
new_board = solve(i[j],new_board)
tmp.append(mymax(new_board))
print(max(tmp)) | [
"jindex2411@naver.com"
] | jindex2411@naver.com |
c1158f4b21ea5e89da3ac0828829fe99d4e66d9f | 4d1cc7c794039d31044eb94e38ab2ce4e4d62a72 | /0x0B-python-input_output/5-to_json_string.py | 451186994587818da5fcf46fb5179ba0ced72e8d | [] | no_license | icculp/holbertonschool-higher_level_programming | 2aca8f9df999b8931fb77280cb96ec16d9dffe07 | 698792cdc9096a17f2da0212d33518fda76b5213 | refs/heads/master | 2022-12-21T21:07:58.720950 | 2020-09-25T00:33:44 | 2020-09-25T00:33:44 | 259,339,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | #!/usr/bin/python3
"""
Task 5 Module
"""
import json
def to_json_string(my_obj):
"""
returns json representation of string object
"""
return json.dumps(my_obj)
| [
"icculp@gmail.com"
] | icculp@gmail.com |
fe2598c389279bc2342d80a08973218dd6bba537 | c2de8119d4347fc34fd101b1e98588f3624a1f0a | /bestiary/schema.py | bff17176857457dc1ff50c9a15fbccc921772d29 | [
"Apache-2.0"
] | permissive | claytondaley/swarfarm | d2b44628fda3d6c14046ea177337cfa42e87c3db | 10c0d381ab6d29d98c8cb88070a034f41d4028b6 | refs/heads/master | 2020-04-03T13:00:15.997376 | 2019-04-05T17:53:03 | 2019-04-05T17:53:03 | 155,271,052 | 0 | 0 | Apache-2.0 | 2018-10-29T19:44:30 | 2018-10-29T19:44:29 | null | UTF-8 | Python | false | false | 7,623 | py | import graphene
from graphene import relay
from graphene_django.filter import DjangoFilterConnectionField
from graphene_django.types import DjangoObjectType
from .api_filters import MonsterFilter, SkillFilter
from .models import Monster, LeaderSkill, Skill, SkillEffect, SkillEffectDetail, Source, ScalingStat, MonsterCraftCost, \
CraftMaterial, Dungeon, Level
class LevelNode(DjangoObjectType):
class Meta:
model = Level
interfaces = (relay.Node,)
filter_fields = []
class DungeonNode(DjangoObjectType):
class Meta:
model = Dungeon
description = "Dungeon objects"
interfaces = (relay.Node,)
only_fields = [
'id',
'name',
'max_floors',
'slug',
'category',
'levels',
]
filter_fields = [
'id',
'name',
'category',
]
class LeaderSkillNode(DjangoObjectType):
class Meta:
model = LeaderSkill
interfaces = (relay.Node,)
filter_fields = []
class EffectNode(DjangoObjectType):
class Meta:
model = SkillEffect
interfaces = (relay.Node,)
filter_fields = []
class EffectDetailNode(DjangoObjectType):
class Meta:
model = SkillEffectDetail
interfaces = (relay.Node,)
filter_fields = []
class ScalingStatNode(DjangoObjectType):
class Meta:
model = ScalingStat
interfaces = (relay.Node,)
filter_fields = []
only_fields = [
'stat',
'com2us_desc',
'description',
]
class SkillNode(DjangoObjectType):
effects = graphene.List(of_type=EffectDetailNode)
scaling_stats = graphene.List(of_type=ScalingStatNode)
def resolve_effects(self, info, *args, **kwargs):
return self.skilleffectdetail_set.all()
def resolve_scaling_stats(self, info, *args, **kwargs):
return self.scaling_stats.all()
class Meta:
model = Skill
interfaces = (relay.Node,)
only_fields = [
'id',
'name',
'com2us_id',
'description',
'slot',
'effects',
'cooltime',
'hits',
'aoe',
'passive',
'max_level',
'level_progress_description',
'icon_filename',
'multiplier_formula',
'multiplier_formula_raw',
'scaling_stats',
]
class SourceNode(DjangoObjectType):
class Meta:
model = Source
interfaces = (relay.Node,)
filter_fields = []
class MonsterCraftCostNode(DjangoObjectType):
class Meta:
model = MonsterCraftCost
interfaces = (relay.Node,)
filter_fields = []
class CraftMaterialNode(DjangoObjectType):
class Meta:
model = CraftMaterial
interfaces = (relay.Node,)
filter_fields = []
class MonsterNode(DjangoObjectType):
base_stars = graphene.Int()
skills = graphene.List(of_type=SkillNode)
def resolve_skills(self, *args, **kwargs):
return self.skills.all()
class Meta:
model = Monster
interfaces = (relay.Node,)
only_fields = [
'id',
'name',
'com2us_id',
'family_id',
'image_filename',
'element',
'archetype',
'base_stars',
'obtainable',
'can_awaken',
'is_awakened',
'awaken_bonus',
'skills',
'skill_ups_to_max',
'leader_skill',
'raw_hp',
'raw_attack',
'raw_defense',
'base_hp',
'base_attack',
'base_defense',
'max_lvl_hp',
'max_lvl_attack',
'max_lvl_defense',
'speed',
'crit_rate',
'crit_damage',
'resistance',
'accuracy',
'homunculus',
'monstercraftcost_set',
'craft_cost',
'transforms_into',
'awakens_from',
'awakens_to',
'awaken_mats_fire_low',
'awaken_mats_fire_mid',
'awaken_mats_fire_high',
'awaken_mats_water_low',
'awaken_mats_water_mid',
'awaken_mats_water_high',
'awaken_mats_wind_low',
'awaken_mats_wind_mid',
'awaken_mats_wind_high',
'awaken_mats_light_low',
'awaken_mats_light_mid',
'awaken_mats_light_high',
'awaken_mats_dark_low',
'awaken_mats_dark_mid',
'awaken_mats_dark_high',
'awaken_mats_magic_low',
'awaken_mats_magic_mid',
'awaken_mats_magic_high',
'source',
'farmable',
'fusion_food',
'bestiary_slug'
]
def _optimized_monster_queryset():
return Monster.objects.all().select_related(
'leader_skill',
'awakens_from',
'awakens_to',
'transforms_into',
).prefetch_related(
'skills',
'skills__effect',
'skills__effect__effect',
'skills__scaling_stats',
'monstercraftcost_set',
'monstercraftcost_set__craft',
'source',
)
def _optimized_skill_queryset():
return Skill.objects.all().prefetch_related(
'scaling_stats',
'skilleffectdetail_set',
'skilleffectdetail_set__effect',
'skilleffectdetail_set__effect__effect',
)
class Query(object):
dungeon = relay.Node.Field(DungeonNode)
all_dungeons = DjangoFilterConnectionField(DungeonNode)
def resolve_dungeon(self, info, id, **kwargs):
return Dungeon.objects.prefetch_related('level_set').get(pk=id)
def resolve_all_dungeons(self, info, **kwargs):
return Dungeon.objects.all().prefetch_related('level_set')
level = relay.Node.Field(LevelNode)
all_levels = DjangoFilterConnectionField(LevelNode, )
def resolve_level(self, info, id, **kwargs):
return Level.objects.select_related('dungeon').get(pk=id)
def resolve_all_levels(self, info, **kwargs):
return Level.objects.all().select_related('dungeon')
monster = relay.Node.Field(MonsterNode)
all_monsters = DjangoFilterConnectionField(
MonsterNode,
filterset_class=MonsterFilter,
max_limit=200,
enforce_first_or_last=True,
)
def resolve_monster(self, info, id, **kwargs):
return _optimized_monster_queryset().get(pk=id)
def resolve_all_monsters(self, info, **kwargs):
return _optimized_monster_queryset()
source = graphene.Field(SourceNode)
all_sources = DjangoFilterConnectionField(SourceNode)
craftCost = relay.Node.Field(MonsterCraftCostNode)
craftMaterial = relay.Node.Field(CraftMaterialNode)
leader_skill = relay.Node.Field(LeaderSkillNode)
skill = relay.Node.Field(SkillNode)
all_skills = DjangoFilterConnectionField(
SkillNode,
filterset_class=SkillFilter,
max_limit=200,
enforce_first_or_last=True,
)
def resolve_skill(self, info, id, **kwargs):
return _optimized_skill_queryset().get(pk=id)
def resolve_all_skills(self, info, **kwargs):
return _optimized_skill_queryset()
skill_effect = relay.Node.Field(EffectNode)
all_skill_effects = DjangoFilterConnectionField(EffectNode)
scaling_stat = relay.Node.Field(ScalingStatNode)
all_scaling_stats = DjangoFilterConnectionField(ScalingStatNode)
| [
"peter@porksmash.com"
] | peter@porksmash.com |
9aff49686506d7d1a151a9dc5a4b2d8284b5c1ff | 01d390ba1ecdf8475ff6bc7655f3b1c86f4a4c10 | /phonepad_combinations.py | 9fc14da6f01a514e02f152208e6d0cb73ef4b0cf | [] | no_license | IkeyBenz/InterviewProblems | c7feb520119f15d18a21108720229c3d0b9b4951 | 55171fc49d30ae21f58000ea14d2a40f1a81d019 | refs/heads/master | 2020-06-02T15:36:44.466766 | 2020-05-12T00:49:47 | 2020-05-12T00:49:47 | 191,211,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | letters = {
'1': '',
'2': 'abc',
'3': 'def',
'4': 'ghi',
'5': 'jkl',
'6': 'mno',
'7': 'pqrs',
'8': 'tuv',
'9': 'wxyz',
'0': ' '
}
# Ikey's Way
def combos(nums, curr=[]):
if len(nums) == 0:
return curr
if len(curr) == 0:
return combos(nums[1:], list(letters[nums[0]]))
extended = []
for combo in combos:
for letter in letters[nums[0]]:
extended.append(combo + letter)
return combos(nums[1:], extended)
# Alan's Way (cleaner cause no optional param)
def combos2(nums):
if len(nums) == 0:
return []
if len(nums) == 1:
return list(letters[nums])
all_combos = []
for combo in combos2(nums[1:]):
for letter in letters[nums[0]]:
all_combos.append(combo + letter)
return all_combos
def possible_words(nums):
words = (combos(word) for word in nums.split('0'))
def test_combos():
assert combos('123') == [
'adg', 'adh', 'adi',
'aeg', 'aef'
]
| [
"ikey.benz@gmail.com"
] | ikey.benz@gmail.com |
d7133de348ab246f13ca3e1239122d3158674487 | 1f7fce552cc68731f683ded3f831e8f4650c7197 | /Axis16/Axis16/wsgi.py | 5139839645ceae062b39e5a7f5253511300b6776 | [] | no_license | tanaypatil/axis-website | 3985068cf1c52bb038b7174cbdf938b8b4084c03 | b5eda2906150a38b1bb0daf8b23c9194572b849c | refs/heads/master | 2020-06-13T03:14:05.855948 | 2019-06-30T13:12:11 | 2019-06-30T13:12:11 | 194,514,303 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for Axis16 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Axis16.settings")
application = get_wsgi_application()
| [
"tanaypatil197@gmail.com"
] | tanaypatil197@gmail.com |
2e991e0e4ef010d0049ff75e77aae840060ece57 | 577a40ff1c84d28b88a9ade84d265587d28ed2a3 | /0715/02.TextProcessingAPI.py | 708becb6d522488c2c631c2e45a37169303e25c6 | [] | no_license | bosl95/MachineLearning_Note | b167c182fcf5186f6466b8b062cde83b076b0b04 | 934714c5a62e4864f2b5338153c3aaeb3363abe9 | refs/heads/master | 2022-12-06T20:58:20.457567 | 2020-09-05T16:18:11 | 2020-09-05T16:18:11 | 279,835,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,812 | py | # STEP 1
from keras.preprocessing.text import Tokenizer
samples = ['The cat sat on the mat.','The dog ate my homework.']
# 가장 빈도가 높은 1000개의 단어만 선택하도록 tokenizer 객체 생성
tokenizer = Tokenizer(num_words=1000) # 자동으로 단어마다 번호를 부여하는 사전 생성.
tokenizer.fit_on_texts(samples)
# STEP 2. 문자열을 정수 인덱스의 리스트로 변환
sequences = tokenizer.texts_to_sequences(samples)
# print(sequences) # [[1, 2, 3, 4, 1, 5], [1, 6, 7, 8, 9]]
# 직접 one-hot binary vector 표현을 얻을 수 있다.
one_hot_results = tokenizer.texts_to_matrix(samples, mode='binary')
# print(one_hot_results)
# [[0. 1. 1. ... 0. 0. 0.]
# [0. 1. 0. ... 0. 0. 0.]]
# print(one_hot_results.shape) # (2, 1000)
# print(one_hot_results[0][3]) # 1.0 사전에서 찾은 경우 단어의 경우 1
# print(one_hot_results[0][10]) # 0.0 사전에서 찾지 못한 단어의 경우 0
# 몇개의 단어를 처리했는 지 개수
word_idx = tokenizer.word_index
print("Found {} unique tokens".format(len(word_idx)))
# STEP 3
from keras.datasets import imdb
from keras import preprocessing
import numpy as np
# 특성으로 사용할 단어의 수
max_features = 10000
# 사용할 텍스트의 길이(가장 빈번한 max_features 개의 단어만 사용합니다)
maxlen = 20 # 한 문장의 최대 단어 수
# 정수리스트로 데이터를 로드
(x_train, y_train),(x_test,y_test) = imdb.load_data(num_words=max_features)
# print(x_train.shape, y_train.shape, x_test.shape, y_test.shape) # (25000,) (25000,) (25000,) (25000,)
# 리스트를 (samples,maxlen) 크기의 2D 정수 텐서로 변환
x_train = preprocessing.sequence.pad_sequences(x_train,maxlen=maxlen)
x_test = preprocessing.sequence.pad_sequences(x_test,maxlen=maxlen)
# STEP 4. 모델 생성
from keras.models import Sequential
from keras.layers import Flatten,Dense,Embedding
model = Sequential()
# 나중에 임베딩 된 입력을 Flatten 층에서 펼치기 위해 Embedding 층에 input_length를 지정
# param1: 줄수(단어수)/param2: 필터 폭/ param3: 데이터 폭. 단어 최대 길이
# Embedding 층의 출력 크기는 (samples,maxlen,8) 이 된다
model.add(Embedding(10000,8,input_length=maxlen)) # 임베딩(끼워넣기) 층(데이터 개수, 필터 depth, 단어 최대 길이 폭)
# 첫번째 인자 = 단어 집합의 크기. 즉, 총 단어의 개수
# 두번째 인자 = 임베딩 벡터의 출력 차원. 결과로서 나오는 임베딩 벡터의 크기
# input_length = 입력 시퀀스의 길이
# 3D 임베딩 텐서를 (samples, maxlen * 8) 크기의 2D 텐서로 펼침
model.add(Flatten())
# 분류기 추가
model.add(Dense(1,activation='sigmoid'))
model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['acc'])
model.summary()
# 학습
history = model.fit(x_train,y_train,
epochs = 10, # 10번 반복
batch_size = 32,# 32마다
validation_split = 0.2) # 학습 데이터에서 20퍼 떼어내서 테스팅에 사용해라
# STEP 5. 테스팅
# sigmoid층으로부터 도출한 결과. 0.5보다 작으면 부정. 크면 긍정
pre = model.predict(x_test)
print(pre)
# STEP 6. 성능 확인
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1,len(acc)+1)
plt.plot(epochs,acc,'bo',label='Training acc')
plt.plot(epochs, val_acc,'b',label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss,'bo',label='Training loss')
plt.plot(epochs, val_loss,'b',label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
| [
"bosl95@naver.com"
] | bosl95@naver.com |
868da6ad7de2e92037f68b84e2de141d81e5376e | 9d6e747ed7204555199ece2033decff978295a09 | /Programmers/그리디/구명보트.py | e397a45f8b8c3318aef387de4ebb6add75f97628 | [] | no_license | leejaeyeong/Algorithm | 5b47ed9aa241990945cbf2451afe7f084984ced5 | 72072d1e0c28e72075fc00db9239a4bd444b68b6 | refs/heads/master | 2021-08-08T10:57:07.345943 | 2021-07-11T15:01:59 | 2021-07-11T15:01:59 | 238,156,464 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,238 | py | ''' 무인도에 갇힌 사람들을 구명보트를 이용하여 구출하려고 합니다.
구명보트는 작아서 한 번에 최대 2명씩 밖에 탈 수 없고, 무게 제한도 있습니다.
예를 들어, 사람들의 몸무게가 [70kg, 50kg, 80kg, 50kg]이고 구명보트의 무게 제한이 100kg이라면
2번째 사람과 4번째 사람은 같이 탈 수 있지만 1번째 사람과 3번째 사람의 무게의 합은 150kg이므로
구명보트의 무게 제한을 초과하여 같이 탈 수 없습니다.
구명보트를 최대한 적게 사용하여 모든 사람을 구출하려고 합니다.
사람들의 몸무게를 담은 배열 people과 구명보트의 무게 제한 limit가 매개변수로 주어질 때,
모든 사람을 구출하기 위해 필요한 구명보트 개수의 최솟값을 return 하도록 solution 함수를 작성해주세요.
'''
# 인덱스를 이용한 방법
def solution(people, limit):
people.sort()
escapeCnt, light, heavy = 0, 0, len(people)-1
while light < heavy :
if people[light] + people[heavy] <= limit :
light += 1
heavy -= 1
escapeCnt += 1
else :
heavy -= 1
return len(people) - escapeCnt
print(solution([70,50,80,50],100))
print(solution([70,80,50],100))
# list del 삭제 연산 후 list 재조정 시간으로 인해 효율성 1번 시간초과
''' def solution(people, limit):
people.sort()
cnt = 0
while len(people) > 1 :
if people[0] + people[-1] <= limit :
del people[0]
del people[-1]
cnt += 1
else :
del people[-1]
cnt += 1
return cnt + len(people) '''
# dequeue를 이용해 삭제를 처리한 경우
# list의 pop(), leftpop()이 list의 처음 또는 마지막 값이기 때문에 재조정이 따로 없는듯하다.
''' import collections
def solution(people, limit):
people = collections.deque(sorted(people))
cnt = 0
while len(people) > 1 :
if people[0] + people[-1] <= limit :
people.popleft()
people.pop()
cnt += 1
else :
people.pop()
cnt += 1
return cnt + len(people) ''' | [
"dldustn14@gmail.com"
] | dldustn14@gmail.com |
6b13c2d1b3333eb858d2f7ff6f6803b8edbd5e52 | f9e441608cbca1fd2a39de27cdc187cf676ef159 | /matplotlib/example26.py | 3a47fec94e67faf5639d189d893932d8b226684f | [] | no_license | tisnik/jupyter-notebook-examples | cdded1ce63f6637c76c33adbfb25d9efc13b5fcf | 66974b0590f8beef39ed9d364c9d2b1ee3bd2e63 | refs/heads/master | 2023-07-07T04:43:11.787115 | 2021-08-05T07:16:20 | 2021-08-05T07:16:20 | 257,216,516 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | # Jupyter Notebook
#
# Dvacátý šestý demonstrační příklad:
# - zobrazení 3D grafu funkce typu z=f(x,y)
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.gca(projection='3d')
delta = 0.1
# průběh nezávislé proměnné x
x = np.arange(-10.0, 10.0, delta)
# průběh nezávislé proměnné y
y = np.arange(-10.0, 10.0, delta)
# vytvoření dvou polí se souřadnicemi [x,y]
X, Y = np.meshgrid(x, y)
# vzdálenost od bodu [0,0]
R = np.sqrt(X*X+Y*Y)
# výpočet funkce, kterou použijeme při vykreslování grafu
Z = np.sin(R)/R
# zobrazení 3D grafu formou plochy
ax.plot_surface(X, Y, Z, rstride=2, cstride=2, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# zobrazení grafu
plt.show()
| [
"ptisnovs@redhat.com"
] | ptisnovs@redhat.com |
4de9459362cd51256cf0acfb5269084fb1d69ad5 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-swr/huaweicloudsdkswr/v2/model/show_user_repository_auth_response.py | dbf5288a6fb5ffa67492b7a9cdb0a81de2d9138d | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 6,467 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowUserRepositoryAuthResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'int',
'name': 'str',
'self_auth': 'UserAuth',
'others_auths': 'list[UserAuth]'
}
attribute_map = {
'id': 'id',
'name': 'name',
'self_auth': 'self_auth',
'others_auths': 'others_auths'
}
def __init__(self, id=None, name=None, self_auth=None, others_auths=None):
"""ShowUserRepositoryAuthResponse
The model defined in huaweicloud sdk
:param id: id
:type id: int
:param name: 组织名称。小写字母开头,后面跟小写字母、数字、小数点、下划线或中划线(其中下划线最多允许连续两个,小数点、下划线、中划线不能直接相连),小写字母或数字结尾,1-64个字符。
:type name: str
:param self_auth:
:type self_auth: :class:`huaweicloudsdkswr.v2.UserAuth`
:param others_auths: 其他用户的权限
:type others_auths: list[:class:`huaweicloudsdkswr.v2.UserAuth`]
"""
super(ShowUserRepositoryAuthResponse, self).__init__()
self._id = None
self._name = None
self._self_auth = None
self._others_auths = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if self_auth is not None:
self.self_auth = self_auth
if others_auths is not None:
self.others_auths = others_auths
@property
def id(self):
"""Gets the id of this ShowUserRepositoryAuthResponse.
id
:return: The id of this ShowUserRepositoryAuthResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ShowUserRepositoryAuthResponse.
id
:param id: The id of this ShowUserRepositoryAuthResponse.
:type id: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this ShowUserRepositoryAuthResponse.
组织名称。小写字母开头,后面跟小写字母、数字、小数点、下划线或中划线(其中下划线最多允许连续两个,小数点、下划线、中划线不能直接相连),小写字母或数字结尾,1-64个字符。
:return: The name of this ShowUserRepositoryAuthResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ShowUserRepositoryAuthResponse.
组织名称。小写字母开头,后面跟小写字母、数字、小数点、下划线或中划线(其中下划线最多允许连续两个,小数点、下划线、中划线不能直接相连),小写字母或数字结尾,1-64个字符。
:param name: The name of this ShowUserRepositoryAuthResponse.
:type name: str
"""
self._name = name
@property
def self_auth(self):
"""Gets the self_auth of this ShowUserRepositoryAuthResponse.
:return: The self_auth of this ShowUserRepositoryAuthResponse.
:rtype: :class:`huaweicloudsdkswr.v2.UserAuth`
"""
return self._self_auth
@self_auth.setter
def self_auth(self, self_auth):
"""Sets the self_auth of this ShowUserRepositoryAuthResponse.
:param self_auth: The self_auth of this ShowUserRepositoryAuthResponse.
:type self_auth: :class:`huaweicloudsdkswr.v2.UserAuth`
"""
self._self_auth = self_auth
@property
def others_auths(self):
"""Gets the others_auths of this ShowUserRepositoryAuthResponse.
其他用户的权限
:return: The others_auths of this ShowUserRepositoryAuthResponse.
:rtype: list[:class:`huaweicloudsdkswr.v2.UserAuth`]
"""
return self._others_auths
@others_auths.setter
def others_auths(self, others_auths):
"""Sets the others_auths of this ShowUserRepositoryAuthResponse.
其他用户的权限
:param others_auths: The others_auths of this ShowUserRepositoryAuthResponse.
:type others_auths: list[:class:`huaweicloudsdkswr.v2.UserAuth`]
"""
self._others_auths = others_auths
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowUserRepositoryAuthResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
1a1971c072b24595aa808de9a2af6fbceee0b46b | e03ffd4821bd278da3b0835cd8630b12958d4236 | /sply/grammar.py | 94b2ee96b6e7368047387bcdd4fca2b58a1f4c68 | [] | no_license | RussellLuo/sply | c659a33eabefe935db06ace846fe30e31bd841ba | d6f11f155803b874890428d173b45ee3f2b3fe76 | refs/heads/master | 2016-09-06T04:40:04.977841 | 2014-06-11T15:48:29 | 2014-06-11T15:48:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,903 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ### decorators ###
def token(regex):
def wrapper(func):
func.grammar = {
'type': 'token',
'definition': regex
}
return func
return wrapper
def production(rules):
def wrapper(func):
func.grammar = {
'type': 'production',
'definition': rules
}
return func
return wrapper
class Grammar(object):
"""Grammar-definition."""
keywords = ()
literals = ()
simple_tokens = ()
precedences = ()
def token_error_handler(self, t):
"""Error handler when parsing tokens.
Return value:
the number of characters should be skipped
to try parsing the next token.
0 -- no skip (terminate the current parsing process)
"""
print('Illegal characters "%s"' % t.value)
return 1
def production_error_handler(self, p):
if p:
print('Syntax error at %s' % p.value)
else:
print('Syntax error at EOF')
def get_token_names(self):
"""Get names of all tokens.
all_tokens = keywords + literals + simple_tokens + method_tokens
"""
token_names = (
list(self.keywords) +
list(self.literals) +
[name for name, _ in self.simple_tokens] +
[
method.__name__
for method, _ in self.get_grammar_methods('token')
]
)
return token_names
def get_grammar_methods(self, grammar_type):
methods = []
for attr_name in dir(self):
attr = getattr(self, attr_name)
grammar = getattr(attr, 'grammar', None)
if grammar and grammar['type'] == grammar_type:
methods.append((attr, grammar['definition']))
return methods
| [
"luopeng.he@gmail.com"
] | luopeng.he@gmail.com |
62c3025dd72cbd79bb67fc3b146f88adb5c53aa9 | 13830825b25ec01ec2874094a10f36b4b1336ac7 | /tf/languages/python/triggers/run.py | 1e16f634c9f07393965d850f7e8e98d94d0d240a | [] | no_license | yurimalheiros/textflow | db62047b43c44d43c6efc67ad94f8118f984b076 | c21ddf8aba58dc83d58a8db960d58d91ee2e5c74 | refs/heads/master | 2016-09-10T14:47:18.159229 | 2011-11-02T14:26:31 | 2011-11-02T14:26:31 | 1,927,215 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | # -*- coding: utf-8 -*-
#######################################################################
# Copyright © 2007-2009 Yuri Malheiros.
# Copyright © 2009 TextFlow Team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# the Free Software Foundation; version 2 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#######################################################################
"""
This module implements the trigger of "F5".
"""
import tf.app
from tf.languages.python.triggers.runfiles.terminal import RunTerminal
shortcut = unichr(65474) #F5
sticky = False
class Run(object):
def activate(self):
"""
Execute the current python file.
"""
self.document_manager = tf.app.document_manager
document = self.document_manager.get_active_document()
self.terminal = RunTerminal()
self.terminal.run("python", document.file_uri)
return True
| [
"yurimalheiros@gmail.com"
] | yurimalheiros@gmail.com |
8d7b14dcccda8ae9f7b2682fb5da36c5f06b8731 | 0534c55fd45878ee1ef12d6a9a2903c51cc6cd56 | /backend/task_profile/models.py | 0876c0b99a2c9c7e831a67edea5d5986436f2d77 | [] | no_license | crowdbotics-apps/lcbo-go-19501 | a7db5aa2265da23ab294de9c683951e54522ce22 | 96dd8ad7ce8c3dcb9526e2197a952d9b8028898f | refs/heads/master | 2022-11-26T07:36:01.090189 | 2020-08-11T20:23:52 | 2020-08-11T20:23:52 | 286,839,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,877 | py | from django.conf import settings
from django.db import models
class Notification(models.Model):
"Generated Model"
type = models.CharField(max_length=20,)
message = models.TextField()
user = models.ManyToManyField("users.User", related_name="notification_user",)
timestamp_created = models.DateTimeField(auto_now_add=True,)
class TaskerProfile(models.Model):
"Generated Model"
user = models.OneToOneField(
"users.User", on_delete=models.CASCADE, related_name="taskerprofile_user",
)
mobile_number = models.CharField(max_length=20,)
photo = models.URLField()
timestamp_created = models.DateTimeField(auto_now_add=True,)
last_updated = models.DateTimeField(auto_now=True,)
last_login = models.DateTimeField(null=True, blank=True,)
description = models.TextField(null=True, blank=True,)
city = models.CharField(null=True, blank=True, max_length=50,)
vehicle = models.CharField(null=True, blank=True, max_length=50,)
closing_message = models.TextField(null=True, blank=True,)
work_area_radius = models.FloatField(null=True, blank=True,)
class CustomerProfile(models.Model):
"Generated Model"
user = models.OneToOneField(
"users.User", on_delete=models.CASCADE, related_name="customerprofile_user",
)
mobile_number = models.CharField(max_length=20,)
photo = models.URLField()
timestamp_created = models.DateTimeField(auto_now_add=True,)
last_updated = models.DateTimeField(auto_now=True,)
last_login = models.DateTimeField(null=True, blank=True,)
class InviteCode(models.Model):
"Generated Model"
code = models.CharField(max_length=20,)
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="invitecode_user",
)
timestamp_created = models.DateTimeField(auto_now_add=True,)
# Create your models here.
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
bc7dfd6b2743ba065db5069510e641c5b4e23c65 | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/class스타크래프트프로젝트전반전_20200709104854.py | 1d7e1775de7a8f0280b1c3b4921af09ff2aa05b5 | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,771 | py | # 일반 유닛
class Unit:
def __init__(self, name, hp, speed):
self.name = name
self.hp = hp
self.speed = speed
print("{0} 유닛이 생성되었습니다.".format(name))
def move(self, location):
print("[지상 유닛 이동]")
print("{0} : {1} 방향으로 이동합니다. [속도 {2}]"
.format(self.name, location, self.speed))
def damaged(self, damage):
print("{0} : {1} 데미지를 입었습니다.".format(self.name, damage))
self.hp -= damage
print("{0} : 현재 체력은 {1}입니다.".format(self.name, self.hp))
if self.hp <= 0:
print("{0} : 파괴되었습니다.".format(self.name))
# 공격 유닛
class AttackUnit(Unit):
def __init__(self, name, hp, speed, damage):
Unit.__init__(self, name, hp, speed)
self.damage = damage
def attack(self, location): # 클래스 내에서 메소드 앞에는 항상 self를 적어주어야 한다.
print("{0} : {1} 방향으로 적군을 공격 합니다. [공격력 {2}]"
.format(self.name, location, self.damage))
# 마린
class Marine(AttackUnit):
def __init__(self):
AttackUnit.__init__(self, "마린", 40, 1, 5)
# 스팀팩 : 일정 시간 동안 이동 및 공격 속도를 증가, 체력 10 감소
def stimpack(self):
if self.hp > 10:
self.hp -= 10
print("{0} : 스팀팩을 사용합니다. (HP 10 감소)".format(self.name))
else:
print("{0} : 체력이 부족하여 스팀팩을 사용하지 않습니다.".format(self.name))
# 탱크
class Tank(AttackUnit):
# 시즈모드 : 탱크를 지상에 고정시켜, 더 높은 파워로 공격 가능. 이동 불가.
seize_developed = False # 시즈모드 개발 여부
def __init__(self):
AttackUnit.__init__(self, "탱크", 150, 1, 35)
self.seize_mode = False
def set_seize_mode(self):
if Tank.seize_developed == False:
return
# 현재 시즈모드가 아닐 때 -> 시즈모드
if self.seize_mode == False:
print("{0} : 시즈모드로 전환합니다.".format(self.name))
self.damage *= 2
self.seize_mode = True
# 현재 시즈모드일 때 -> 시즈모드 해제
else:
print("{0} : 시즈모드를 해제합니다.".format(self.name))
self.damaged /= 2
self.seize_mode = False
# 날 수 있는 기능을 가진 클래스
class Flyable:
def __init__(self, flying_speed):
self.flying_speed = flying_speed
def fly(self, name, location):
print("{0} : {1} 방향으로 날아갑니다. [속도 {2}]"
.format(name, location, self.flying_speed))
# 공중 공격 유닛 클래스
class FlyableAttackUnit(AttackUnit, Flyable):
def __init__(self, name, hp, damage, flying_speed):
AttackUnit.__init__(self, name, hp, 0, damage) # 지상 speed 0
Flyable.__init__(self, flying_speed)
def move(self, location):
print("[공중 유닛 이동]")
self.fly(self.name, location)
# 레이스
class Wraith(FlyableAttackUnit):
def __init__(self):
FlyableAttackUnit.__init__(self,"레이스", 80, 20, 5)
self.clocked = False # 클로킹 모드 (해제 상태)
def clocking(self):
if self.clocked == True: # 클로킹 모드 -> 모드 해제
print("{0} : 클로킹 모드 해제합니다.".format(self.name))
self.clocked == False
else: # 클로킹 모드 해제 -> 모드 설정
print("{0} : 클로킹 모드 설정합니다.".format(self.name))
self.clocked == True
| [
"sangha0719@gmail.com"
] | sangha0719@gmail.com |
1a6b70ae45e9fc29904a01cf685b64bf43a59743 | 0e9bd59e25d45adbc859cd187a5ebb00da4685ea | /tests/photometric_database/test_lightcurve_database.py | d3b21ca0bbdd55f26a9d21f5bb7727df9bc803b3 | [
"Apache-2.0"
] | permissive | REStickland/ramjet | cc836090d5afb868db4317bf9cb7416c26061c02 | ad69e284d5c45b6bd5e3d34e861e5d7b106d4589 | refs/heads/master | 2021-03-10T14:08:42.759728 | 2020-03-02T17:44:34 | 2020-03-02T17:44:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,968 | py | """
Tests for the LightcurveDatabase class.
"""
from typing import Any
from unittest.mock import Mock, patch
import numpy as np
import tensorflow as tf
import pytest
import ramjet.photometric_database.lightcurve_database
from ramjet.photometric_database.lightcurve_database import LightcurveDatabase
class TestLightcurveDatabase:
@pytest.fixture
def database(self):
"""Fixture of an instance of the class under test."""
return LightcurveDatabase()
@pytest.fixture
def database_module(self) -> Any:
import ramjet.photometric_database.lightcurve_database as database_module
return database_module
@pytest.fixture
def module(self) -> Any:
"""Fixture of the module under test."""
import ramjet.photometric_database.lightcurve_database as lightcurve_database_module
return lightcurve_database_module
def test_extraction_of_chunk_and_remainder_from_array(self, database, module):
module.np.random.shuffle = Mock()
array_to_chunk = np.array([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6]])
expected_chunk = np.array([[3, 3], [4, 4]])
expected_remainder = np.array([[1, 1], [2, 2], [5, 5], [6, 6]])
chunk, remainder = database.extract_shuffled_chunk_and_remainder(array_to_chunk, chunk_ratio=1 / 3,
chunk_to_extract_index=1)
assert np.array_equal(chunk, expected_chunk)
assert np.array_equal(remainder, expected_remainder)
def test_creating_a_padded_window_dataset_for_a_zipped_example_and_label_dataset(self, database):
# noinspection PyMissingOrEmptyDocstring
def examples_generator():
for example in [[1, 1], [2, 2], [3, 3], [4, 4, 4], [5, 5, 5], [6, 6, 6]]:
yield example
# noinspection PyMissingOrEmptyDocstring
def labels_generator():
for label in [[-1, -1], [-2, -2], [-3, -3], [-4, -4, -4], [-5, -5, -5], [-6, -6, -6]]:
yield label
example_dataset = tf.data.Dataset.from_generator(examples_generator, output_types=tf.float32)
label_dataset = tf.data.Dataset.from_generator(labels_generator, output_types=tf.float32)
dataset = tf.data.Dataset.zip((example_dataset, label_dataset))
padded_window_dataset = database.padded_window_dataset_for_zipped_example_and_label_dataset(
dataset=dataset, batch_size=3, window_shift=2, padded_shapes=([None], [None]))
padded_window_iterator = iter(padded_window_dataset)
batch0 = next(padded_window_iterator)
assert np.array_equal(batch0[0].numpy(), [[1, 1], [2, 2], [3, 3]])
batch1 = next(padded_window_iterator)
assert np.array_equal(batch1[0].numpy(), [[3, 3, 0], [4, 4, 4], [5, 5, 5]])
@patch.object(ramjet.photometric_database.lightcurve_database.np.random, 'randint')
def test_lightcurve_padding_can_be_made_non_random_for_evaluation(self, mock_randint, database, database_module):
mock_randint.return_value = 3
lightcurve0 = database.make_uniform_length(np.array([10, 20, 30, 40, 50]), length=9, randomize=True)
assert np.array_equal(lightcurve0, [30, 40, 50, 10, 20, 30, 40, 50, 10])
lightcurve1 = database.make_uniform_length(np.array([10, 20, 30, 40, 50]), length=9, randomize=False)
assert np.array_equal(lightcurve1, [10, 20, 30, 40, 50, 10, 20, 30, 40])
# Should also work for lightcurves with more than just 1 value over time.
lightcurve2 = database.make_uniform_length(np.array([[10], [20], [30], [40], [50]]), length=9, randomize=True)
assert np.array_equal(lightcurve2, [[30], [40], [50], [10], [20], [30], [40], [50], [10]])
lightcurve3 = database.make_uniform_length(np.array([[10], [20], [30], [40], [50]]), length=9, randomize=False)
assert np.array_equal(lightcurve3, [[10], [20], [30], [40], [50], [10], [20], [30], [40]])
| [
"golmschenk@gmail.com"
] | golmschenk@gmail.com |
22cd7d391cc9fbe273b5040f9df01d41521f8ad7 | 722ace259d9a92a7923a7ebd32608acbfddb073d | /Tag04/doz_a7.py | 69786d28c2f05b2563f8c924bbd53b3c49e9c70d | [] | no_license | anna-s-dotcom/python_final | 19da2d0d2a420f4d54bb2a9760593b655230dcea | 41275625c8905f55a7561cd50df51bbb7d4c39bd | refs/heads/master | 2020-12-08T15:30:04.336376 | 2020-01-10T10:17:15 | 2020-01-10T10:17:15 | 233,017,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | # Aufgabe 7)
# Erstelle eine Funktion, welche eine Zahl und einen Befehl übergeben bekommt.
# Der Befehl soll „lin“ oder „quad“ sein.
# Je nach dem soll die Funktion ausgeführt werden:
# lin = 4*x+5
# quad = 4*x2+5*x+6
# 1) Nutze ein if/else Konstrukt.
def linquad(x, f = 'lin'):
if f == 'lin':
return 4 * x + 5
elif f == 'quad':
return 4 * x**2 + 5*x + 6
else:
print('Falsche Eingabe')
y = linquad(5, 'lin')
print(y)
y = linquad(10, 'quad')
print(y)
# 2) Nutze ein Dictionary.
def linquad_dic(x, f):
d = {'lin': 4 * x + 5, 'quad': 4 * x**2 + 5*x + 6}
return d[f]
y = linquad_dic(5, 'lin')
print(y)
y = linquad_dic(10, 'quad')
print(y)
# wichtig, wenn d erstellt wird werden die werte ausgerechnet!
print()
x = 5
d = {'lin': 4 * x + 5, 'quad': 4 * x**2 + 5*x + 6}
print(d['lin'])
# neu wählen von x hat keine auswirkung mehr auf d
x = 10
print(d['lin'])
def f1():
print('F1')
def f2(x):
return 4 * x
# wenn funktion als value, dann ohne ()
# - wenn die funktion nicht direkt aufgerufen werden soll
d2 = {'f_1': f1, 'f_2': f2}
d2['f_1']()
y = d2['f_2'](5)
print(y)
| [
"noreply@github.com"
] | anna-s-dotcom.noreply@github.com |
524077872b20c6c49333c3c0e864ffe3114cc7fb | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2423/60899/261347.py | b07973f6cbd23601c9ce7344d44ec4505e1dade2 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | numOftests = int(input())
for i in range(numOftests):
list2 = list(map(int, input().split()))
length = list2[0]
list0 = list(map(int,input().split()))
list1 = list(map(int, input().split()))
list0.sort()
list1.sort()
lengthOfzero = len(list0)
list0.extend(list1)
list0 = list(set(list0))
if len(list0) == lengthOfzero:
print("Yes")
else:
print("No") | [
"1069583789@qq.com"
] | 1069583789@qq.com |
d191aa6944595ac49641b74daa31bb3cd5b34f34 | e5dad8e72f6c89011ae030f8076ac25c365f0b5f | /caret_scripts/smooth_medial_wall.py | 3ba2096b84ab538f66083977dd8820563423cb95 | [] | no_license | djsperka/caret | f9a99dc5b88c4ab25edf8b1aa557fe51588c2652 | 153f8e334e0cbe37d14f78c52c935c074b796370 | refs/heads/master | 2023-07-15T19:34:16.565767 | 2020-03-07T00:29:29 | 2020-03-07T00:29:29 | 122,994,146 | 0 | 1 | null | 2018-02-26T16:06:03 | 2018-02-26T16:06:03 | null | UTF-8 | Python | false | false | 4,412 | py | #!/usr/bin/python
#
# Imports
#
import os
import sys
#
# Global Variables
#
progName = "/Users/john/caret5_osx/caret_source/caret_command/caret_command"
#progName = "caret_command"
areaColorFileName = "Geography.areacolor"
separateBorderProjectionFileName = "MedialWall_Separate.borderproj"
mergedBorderProjectionFileName = "MedialWall_Merged.borderproj"
borderColorFileName = "LANDMARK.bordercolor"
medialWallBorderName = "LANDMARK.MEDIAL.WALL"
fiducialCoordFileName = "Human.colin.Cerebral.R.FIDUCIAL.TLRC.711-2B.71723.coord"
smoothedFiducialCoordFileName = "Human.colin.Cerebral.R.FIDUCIAL_SMOOTHED.TLRC.711-2B.71723.coord"
inflatedCoordFileName = "Human.colin.Cerebral.R.INFLATED.71723.coord"
paintFileName = "MedialWall.paint"
paintName = "MEDIAL.WALL"
roiFileName = "medial_wall.roi"
surfaceShapeFileName = "Curvature.surface_shape"
topologyFileName = "Human.colin.Cerebral.R.CLOSED.71723.topo"
##-----------------------------------------------------------------------------
#
# Run a command
#
def runCommand(cmdList) :
cmd = " ".join(cmdList) # join cmdList into a string separated by blanks
print "\nExecuting: %s\n" % cmd
result = os.system(cmd)
if (result != 0) :
print "COMMAND FAILED: "
print " ", cmd
os._exit(-1)
##-----------------------------------------------------------------------------
#
# Main
#
#
# Merge the two medial wall borders into a single border
#
cmdList = (progName,
"-surface-border-merge",
separateBorderProjectionFileName,
mergedBorderProjectionFileName,
medialWallBorderName,
"LANDMARK.MedWall.DORSAL",
"LANDMARK.MedWall.VENTRAL",
"-delete-input-border-projections",
"-close-border")
runCommand(cmdList)
#
# Resample the border
#
cmdList = (progName,
"-surface-border-resample",
fiducialCoordFileName,
topologyFileName,
mergedBorderProjectionFileName,
mergedBorderProjectionFileName,
str(2.0),
"-all")
runCommand(cmdList)
#
# Create a region of interest that contains nodes within
# the medial wall border projection
#
cmdList = (progName,
"-surface-region-of-interest-selection",
inflatedCoordFileName,
topologyFileName,
roiFileName,
roiFileName,
"-border-projection",
mergedBorderProjectionFileName,
medialWallBorderName,
"M",
"3D",
0,
"NORMAL")
runCommand(cmdList)
#
# Create the color for the medial wall paint
#
cmdList = (progName,
"-color-file-add-color",
areaColorFileName,
areaColorFileName,
paintName,
str(255),
str(0),
str(0))
runCommand(cmdList)
#
# Create the color for the medial wall border
#
cmdList = (progName,
"-color-file-add-color",
borderColorFileName,
borderColorFileName,
medialWallBorderName,
str(255),
str(0),
str(0))
runCommand(cmdList)
#
# Create a NEW paint file with one column named "Geography"
#
cmdList = (progName,
"-paint-file-create",
paintFileName,
str(1),
"-coordinate-file",
inflatedCoordFileName,
"-set-column-name 1 Geography")
runCommand(cmdList)
#
# Assign nodes in ROI to paint
#
cmdList = (progName,
"-paint-assign-to-nodes",
paintFileName,
paintFileName,
str(1),
paintName,
"-assign-from-roi-file",
roiFileName)
runCommand(cmdList)
#
# Smooth the medial wall
#
cmdList = (progName,
"-surface-smoothing",
fiducialCoordFileName,
smoothedFiducialCoordFileName,
topologyFileName,
str(1.0),
str(50),
str(-1),
"-roi-file ",
roiFileName)
runCommand(cmdList)
#
# Generate curvature
#
cmdList = (progName,
"-surface-curvature",
smoothedFiducialCoordFileName,
topologyFileName,
surfaceShapeFileName,
surfaceShapeFileName,
"-generate-mean-curvature",
"-mean-column-name \"Folding (Mean Curvature) MWS\"")
runCommand(cmdList)
| [
"michael.hanke@gmail.com"
] | michael.hanke@gmail.com |
2fda7128de47a50c6ff375d42206469d47952984 | 68a088346090ae4e929c208906b14181da0f92f6 | /第一阶段/2. Python01/day03/exercise/01_str_rectangle.py | 753207cbeb3641e48a534062ed628f0b1941a2de | [] | no_license | LONG990122/PYTHON | d1530e734ae48416b5f989a4d97bd1d66d165b91 | 59a2a2a0b033c8ad0cb33d6126c252e9d574eff7 | refs/heads/master | 2020-07-07T09:38:03.501705 | 2019-09-23T16:28:31 | 2019-09-23T16:28:31 | 203,316,565 | 0 | 0 | null | 2019-10-23T15:02:33 | 2019-08-20T06:47:44 | HTML | UTF-8 | Python | false | false | 401 | py | # 01_str_rectangle.py
# 写一个程序,打印一个高度为4的矩形方框
# 要求输入一个整数,此整数代表矩形的宽度,输出此矩形
# 如:
# 请输入宽度: 10
# 打印如下:
# ##########
# # #
# # #
# ##########
w = int(input("请输入宽度: "))
line1 = '#' * w
line2 = '#' + ' ' * (w - 2) + '#'
print(line1)
print(line2)
print(line2)
print(line1) | [
"54302090+LONG990122@users.noreply.github.com"
] | 54302090+LONG990122@users.noreply.github.com |
f8edac2da5a69b605d4359c2f8f216e0976f96d9 | e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d | /a10sdk/core/techsupport/techsupport_vcsdebug.py | 5ab73d62848edc2f0279a82d14c841396240e154 | [
"Apache-2.0"
] | permissive | amwelch/a10sdk-python | 4179565afdc76cdec3601c2715a79479b3225aef | 3e6d88c65bd1a2bf63917d14be58d782e06814e6 | refs/heads/master | 2021-01-20T23:17:07.270210 | 2015-08-13T17:53:23 | 2015-08-13T17:53:23 | 40,673,499 | 0 | 0 | null | 2015-08-13T17:51:35 | 2015-08-13T17:51:34 | null | UTF-8 | Python | false | false | 820 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class GeneratesAvcsDebugFile(A10BaseClass):
""" :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/techsupport/vcsdebug`.
Class Generates aVCS debug file supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "Generates aVCS debug file"
self.a10_url="/axapi/v3/techsupport/vcsdebug"
self.DeviceProxy = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"doug@parksidesoftware.com"
] | doug@parksidesoftware.com |
556f9a4f94a174e5febc071241d4cba8792817cd | fcecbfe364306bd4da1ac316d38e5dc4fc7942a2 | /model_util/test_diag.py | 2ab0bbfe19f5ccae96a8533164a08bc8ef4c68f1 | [] | no_license | darothen/crm-tools | ce7cda50ca8d49c801848d60a83372a6f3eef273 | 095da56aba6911e472093a2ebae8a73503ff0855 | refs/heads/master | 2016-09-06T14:53:28.088780 | 2014-05-05T14:12:13 | 2014-05-05T14:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | from crm_io import read_diag, save_diag
#filename = "/Volumes/legion_home/models/crm71_2d/OUTPUT/DIAG"
filename = "/Volumes/legion_storage02/crm_testing/kshv_2d_largedomain/kshv_500ccn_100in/DIAG"
nz = 65
spmd = True
ts = 20
nt = read_diag(filename, nz, spmd)
all_time, all_tdiag = save_diag(filename, nz, nt, spmd)
print all_time
print all_tdiag[ts,:,36]
from pylab import *
import numpy as np
fig = plt.figure(1)
plt.clf()
d = np.ma.masked_invalid(all_tdiag[ts,:,36])
d = np.ma.masked_outside(d, -10, 10)
d = np.ma.filled(d, 0.)
print d.shape
plt.plot(d, range(nz), "o") | [
"darothen@mit.edu"
] | darothen@mit.edu |
76a142e0bf1248e45f81f90da77866782710a3d6 | b39d9ef9175077ac6f03b66d97b073d85b6bc4d0 | /Terracortril_med_Polymyxin_B_eye_or_ear_ointment_SmPC.py | 1981573d4c6dae71344cf9ec7d1423466396b794 | [] | no_license | urudaro/data-ue | 2d840fdce8ba7e759b5551cb3ee277d046464fe0 | 176c57533b66754ee05a96a7429c3e610188e4aa | refs/heads/master | 2021-01-22T12:02:16.931087 | 2013-07-16T14:05:41 | 2013-07-16T14:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | {'_data': [['Common',
[['Eye',
u'Sveda vid instillation. Vid l\xe5ngtidsbehandling hornhinneskada och intraokul\xe4r tryckstegring.']]],
['Uncommon', [['Eye', u'Aktivering av viruskeratit.']]],
['Rare', [['Eye', u'Korneaperforation, katarakt vid l\xe5ngtidsbehandling.']]],
['Unknown',
[['Infections', u'Infektion, bakteriell infektion, svampinfektion'],
['Immune system', u'\xd6verk\xe4nslighetsreaktioner'],
['Eye',
u'\xd6kat t\xe5rfl\xf6de, glaukom, synnervsp\xe5verkan, br\xe4nnande k\xe4nsla, k\xe4nsla av fr\xe4mmande partiklar, f\xf6rs\xe4mrad l\xe4kning'],
['Skin',
u'Kontakdermatit Allm\xe4nna symtom och/eller symtom vid adminstreringsst\xe4llet']]]],
'_pages': [2, 3],
u'_rank': 7,
u'_type': u'LSFU'} | [
"daro@daro-ThinkPad-X220.(none)"
] | daro@daro-ThinkPad-X220.(none) |
970db4f9221a1a43b4d38341b08f46bb0370580c | 25403dd331c6f273190461ed57c12c9f633853b9 | /chiton/core/environment.py | 1e0113b34cfc4ea1dddea7ec596c12a3ce322fb8 | [] | no_license | justinlocsei/chiton | d1a91a57ff90f16aa375d0f4c6a0b093a66d2d38 | 6ca38962d08a6ca154434a1f78235155710ffeec | refs/heads/master | 2021-03-27T15:10:07.811691 | 2017-01-30T01:18:33 | 2017-01-30T01:18:33 | 48,400,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,738 | py | import os.path
import re
from voluptuous import All, Length, Invalid, MultipleInvalid, Schema
from chiton.core.exceptions import ConfigurationError
# All known log levels
LOG_LEVELS = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
def use_config(user_data={}):
"""Load an external JSON configuration file.
The contents of the JSON file are deeply merged with the defaults, ensuring
that the returned value is always valid.
Args:
user_data (dict): The user customizations to apply to the base configuration
Returns:
dict: The merged configuration file
Raises:
chiton.core.exceptions.ConfigurationError: If the user settings are invalid
"""
try:
_validate_config(user_data)
except MultipleInvalid as e:
raise ConfigurationError('Invalid configuration: %s' % e)
config_data = _default_config()
config_data.update(user_data)
return config_data
def _default_config():
"""Define the default configuration data."""
return {
'allow_api_browsing': False,
'allowed_hosts': [],
'amazon_associates_aws_access_key_id': None,
'amazon_associates_aws_secret_access_key': None,
'amazon_associates_tracking_id': None,
'conn_max_age': 0,
'database': {},
'debug': False,
'default_email': None,
'encryption_key': None,
'environment': None,
'file_logging': False,
'log_file': None,
'log_level': 'INFO',
'media_root': None,
'media_url': '/media/',
'previous_encryption_key': None,
'public_api': False,
'redis': {},
'secret_key': None,
'sentry_dsn': None,
'server_email': None,
'shopstyle_uid': None,
'static_root': None,
'static_url': '/static/',
'track_errors': False,
'trusted_proxy_ips': ['127.0.0.1']
}
def _validate_config(config):
"""Validate configuration data, raising an error for invalid data."""
Schema({
'allow_api_browsing': bool,
'allowed_hosts': [str],
'amazon_associates_aws_access_key_id': All(str, Length(min=1)),
'amazon_associates_aws_secret_access_key': All(str, Length(min=1)),
'amazon_associates_tracking_id': All(str, Length(min=1), _AmazonAssociatesTrackingID()),
'conn_max_age': int,
'database': Schema({
'engine': All(str, Length(min=1)),
'host': All(str, Length(min=1)),
'name': All(str, Length(min=1)),
'password': All(str, Length(min=1)),
'port': All(int),
'user': All(str, Length(min=1))
}),
'debug': bool,
'default_email': All(str, Length(min=1)),
'encryption_key': All(str, Length(min=1)),
'environment': All(str, Length(min=1)),
'file_logging': bool,
'log_file': All(str, Length(min=1), _AbsolutePath()),
'log_level': All(str, Length(min=1), _LogLevel()),
'media_root': All(str, Length(min=1), _AbsolutePath()),
'media_url': All(str, Length(min=1), _MediaUrl()),
'public_api': bool,
'previous_encryption_key': All(str, Length(min=1)),
'redis': Schema({
'db': int,
'host': All(str, Length(min=1)),
'port': int
}),
'secret_key': All(str, Length(min=1)),
'sentry_dsn': All(str, Length(min=1)),
'server_email': All(str, Length(min=1)),
'shopstyle_uid': All(str, Length(min=1)),
'static_root': All(str, Length(min=1), _AbsolutePath()),
'static_url': All(str, Length(min=1), _MediaUrl()),
'track_errors': bool,
'trusted_proxy_ips': [str]
})(config)
def _AbsolutePath():
"""Ensure that a string is an absolute file path."""
def validator(value):
if not os.path.isabs(value):
raise Invalid('%s must be an absolute path' % value)
return validator
def _AmazonAssociatesTrackingID():
"""Ensure that a string is an Amazon Associates tracking ID."""
def validator(value):
if not re.search('-2\d$', value):
raise Invalid('%s must be an Amazon Associates tracking ID' % value)
return validator
def _LogLevel():
"""Ensure that a string is a known log level."""
def validator(value):
if value not in LOG_LEVELS:
raise Invalid('%s must be a log level (%s)' % (value, ', '.join(LOG_LEVELS)))
return validator
def _MediaUrl():
"""Ensure that a URL is a Django-style media URL ending in a slash."""
def validator(value):
if not value.endswith('/'):
raise Invalid('%s does not have a trailing slash' % value)
return validator
| [
"justin.locsei@gmail.com"
] | justin.locsei@gmail.com |
59f8c7d57f0528ed4dfb3503f979f7eb63155d5d | 149660428ec7570b02b9e8b3d494dcd548e80005 | /01-04_python基础/05_高级数据类型/hm_17_字符串的查找和替换.py | 9801733fcb09d371234b006f6705420a8c32271f | [] | no_license | kenzzuli/hm_15 | 603eb178e476f946eb57b1cdf0c85ba5d65e8d58 | db8a6d13776e55aa4e05ff9f39e9c8e98d59d8ee | refs/heads/master | 2023-08-07T01:57:01.993474 | 2021-09-23T15:49:19 | 2021-09-23T15:49:19 | 359,322,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | hello_str = "hello world"
# 1.判断是否以指定字符串开始
print(hello_str.startswith("hello")) # 大小写敏感
# 2.判断是否以指定字符串结束
print(hello_str.endswith("orld"))
# 3.查找指定字符串
# index同样也可以查找指定的子字符串在大字符串中的索引
print(hello_str.find("wo"))
print("*" * 100)
print(hello_str.rindex('o'))
print(hello_str.index("o"))
print(hello_str.rindex("r"))
print(hello_str.index("r"))
print("*" * 100)
# find 如果指定的字符串不存在,会返回-1
# index 如果指定的字符串不存在,会报错
print(hello_str.find("abc"))
print(hello_str.index("abc"))
# 4.替换字符串
# replace方法执行后,会返回一个新的字符串,但不会修改原有字符串的内容!!!
print(hello_str.replace("ello", "i"))
print(hello_str)
hi_str = "Hi Are you happy?"
print(hi_str.lower().startswith("h"))
| [
"820710063@qq.com"
] | 820710063@qq.com |
36070783ac0da6e39d149ff6a9e6e406d58e1bb4 | 5e1bf5340b28288027d946f136fa0838ca2621dd | /RL_Udemy/optimistic_initial_values.py | 05f34a93a3960471d89c25c00170bf64dd4f12cf | [] | no_license | DavisDataScience/DataInterviewPrep | 5964006933fa8dd1f9b684be94744f5c9505eadb | 55921a8752e349b9cd08833148c38ebb9210d4ed | refs/heads/master | 2020-12-30T11:02:49.782409 | 2018-04-20T07:11:11 | 2018-04-20T07:11:11 | 98,841,799 | 5 | 0 | null | 2017-11-08T09:54:28 | 2017-07-31T02:52:15 | HTML | UTF-8 | Python | false | false | 1,662 | py | # https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python
# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python
import numpy as np
import matplotlib.pyplot as plt
class Bandit:
def __init__(self, m, upper_limit):
self.m = m
self.mean = upper_limit
self.N = 0
def pull(self):
return np.random.randn() + self.m
def update(self, x):
self.N += 1
# look at the formula for estimating_bandit_rewards
self.mean = (1 - 1.0/self.N)*self.mean + 1.0/self.N*x
def run_experiment(m1, m2, m3, eps, N):
bandits = [Bandit(m1,4), Bandit(m2,4), Bandit(m3,4)]
data = np.empty(N)
for i in range(N):
# epsilon greedy
j = np.argmax([b.mean for b in bandits])
x = bandits[j].pull()
bandits[j].update(x)
# for the plot
data[i] = x
cumulative_average = np.cumsum(data) / (np.arange(N) + 1)
# plot moving average ctr
plt.plot(cumulative_average)
plt.plot(np.ones(N)*m1)
plt.plot(np.ones(N)*m2)
plt.plot(np.ones(N)*m3)
plt.xscale('log')
plt.show()
for b in bandits:
print (b.mean)
return cumulative_average
if __name__ == '__main__':
c_1 = run_experiment(1.0, 2.0, 3.0, 0.1, 100000)
c_05 = run_experiment(1.0, 2.0, 3.0, 0.05, 100000)
c_01 = run_experiment(1.0, 2.0, 3.0, 0.01, 100000)
# log scale plot
plt.plot(c_1, label='eps = 0.1')
plt.plot(c_05, label='eps = 0.05')
plt.plot(c_01, label='eps = 0.01')
plt.legend()
plt.xscale('log')
plt.show()
# linear plot
plt.plot(c_1, label='eps = 0.1')
plt.plot(c_05, label='eps = 0.05')
plt.plot(c_01, label='eps = 0.01')
plt.legend()
plt.show()
| [
"fespinosa@ucdavis.edu"
] | fespinosa@ucdavis.edu |
a386ad32ce1908a2c38fe109aef2276beec8507d | d2c54233a96b0de3137d320a86de726f87f6d3b4 | /cnn/struct/updateset_module.py | 420fca72eebd6129c8038d6af319637cdeb3851b | [
"MIT"
] | permissive | hslee1539/cnn | aa93e6c41e994b409b5ebcd6e1abfaef98cd0c60 | 816418af0a0057d777f41ac072c3a97fea7e2027 | refs/heads/master | 2020-05-01T13:30:25.781805 | 2019-09-10T08:47:24 | 2019-09-10T08:47:24 | 177,493,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | from import_lib import lib
from tensor.main_module import *
from ctypes import Structure, c_int, POINTER, c_float
# Tensor는 POINTER(_Tensor) 임
class _UpdateSet(Structure):
_fields_ = [
('delta', Tensor),
('value', Tensor),
('momnt', Tensor)
]
lib.cnn_create_updateset.argtypes = (Tensor, Tensor)
lib.cnn_create_updateset.restype = POINTER(_UpdateSet)
lib.cnn_release_updateset_deep.argtypes = [POINTER(_UpdateSet)]
#lib.cnn_release_updateset.argtypes = [POINTER(_UpdateSet)]
def _create(delta, value):
return lib.cnn_create_updateset(delta, value)
def _release(self, deep = True):
if(deep):
lib.cnn_release_updateset_deep(self)
else:
lib.cnn_release_updateset(self)
del self # 검증 안됨
def _getDelta(self):
return self.contents.delta
def _getValue(self):
return self.contents.value
def _getMomnt(self):
return self.contents.momnt
def _setDelta(self, value):
self.contents.delta = value
def _setValue(self, value):
self.contents.value = value
def _setMomnt(self, value):
self.contents.momnt = value
UpdateSet = POINTER(_UpdateSet)
UpdateSet.__doc__ = "cnn_UpdateSet 구조체의 포인터에 프로퍼티와 메소드를 추가한 클래스입니다."
UpdateSet.delta = property(_getDelta, _setDelta)
UpdateSet.value = property(_getValue, _setValue)
UpdateSet.momnt = property(_getMomnt, _setMomnt)
UpdateSet.create = staticmethod(_create)
UpdateSet.release = _release
| [
"qq1539@naver.com"
] | qq1539@naver.com |
25a9c9a9109a8396fc521ce6af6ccc72553ddb0a | 77717d0024c8597fec83600259ea5547abbc183a | /configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py | 1c8c581bccb439fbd4d5f7a4f67777162361b1f8 | [
"Apache-2.0"
] | permissive | fengyouliang/wheat_detection | 0a090ef5eda7f2c5463996f4795f9ce06dd04050 | d056123426a1260c29b486cbb8e44a88a0a3c5bc | refs/heads/master | 2022-11-17T15:09:29.113493 | 2020-07-18T13:47:34 | 2020-07-18T13:47:34 | 276,532,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,044 | py | _base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='GARetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.4,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0),
center_ratio=0.2,
ignore_ratio=0.5)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
| [
"1654388696@qq.com"
] | 1654388696@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.