blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6f5371f76a88a1c3070db0af82abb2248dbd8564
|
456703d469684b99d849bb386707359729af4e1e
|
/data.py
|
50649d1c690eab4c7947b38c8524416fbd32fd2e
|
[] |
no_license
|
blacklemons/userlist_mongo
|
9c05949f4d2a8c4232c1126020b66ad892857bc6
|
b341628cc5da8248c39e46f5a0e974807d9986d1
|
refs/heads/main
| 2023-05-05T11:49:13.272471
| 2021-06-03T06:56:56
| 2021-06-03T06:56:56
| 372,990,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
def Articles(title,description,author,edit):
articles = {
'title' : title,
'description' : description,
'author' : author,
'edit' : edit,
}
return articles
def Users(name, email, username, password):
users = {
'name' : name,
'email' : email,
'username' : username,
'password': password
}
return users
|
[
"you@example.com"
] |
you@example.com
|
67d9459b98c02585b18afecaf8c3df0f88840736
|
95e9ec4b3b0d86063da53a0e62e138cf794cce3a
|
/python/20190430/模块/demo09.py
|
b8805798af569441c98514893091d564e1240813
|
[] |
no_license
|
wjl626nice/1902
|
c3d350d91925a01628c9402cbceb32ebf812e43c
|
5a1a6dd59cdd903563389fa7c73a283e8657d731
|
refs/heads/master
| 2023-01-05T23:51:47.667675
| 2019-08-19T06:42:09
| 2019-08-19T06:42:09
| 180,686,044
| 4
| 1
| null | 2023-01-04T07:35:24
| 2019-04-11T00:46:43
|
Python
|
UTF-8
|
Python
| false
| false
| 369
|
py
|
# pillow
from PIL import Image
# 打开一个jpg图像文件,注意是当前路径:
im = Image.open('1.png')
# 获得图像尺寸:
w, h = im.size
print('Original image size: %sx%s' % (w, h))
# 缩放到50%:
im.thumbnail((w // 2, h // 2))
print('Resize image to: %sx%s' % (w // 2, h // 2))
# 把缩放后的图像用jpeg格式保存:
im.save('thumbnail.png', 'png')
|
[
"18537160262@qq.com"
] |
18537160262@qq.com
|
a3cddb14f4568125e0ea0d932da937365312500e
|
ae7cb8543a98b7d65295a422c7971e7a37f921cd
|
/minerals/models.py
|
02bcb451cf3a5a60d68581cbcd838dfa627fcbad
|
[] |
no_license
|
frankRose1/mineral-catalog
|
a74386278073d1b9e92fe44e1cc348a1b498380e
|
88f91a55105532fe197c84d050a5d5bd67167a9d
|
refs/heads/master
| 2020-04-16T12:49:23.715372
| 2019-03-14T00:23:27
| 2019-03-14T00:23:27
| 165,597,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,699
|
py
|
from django.db import models
# Create your models here.
class Mineral(models.Model):
"""
Not all entries will have every field as shown in the json file in "mineral_data/mineral.json"
"""
name = models.CharField(max_length=255, blank=True, default='')
image_filename = models.CharField(max_length=255, blank=True, default='')
image_caption = models.TextField(blank=True, default='')
category = models.CharField(max_length=255, blank=True, default='')
formula = models.CharField(max_length=255, blank=True, default='')
strunz_classification = models.CharField(max_length=255, blank=True, default='')
color = models.CharField(max_length=255, blank=True, default='')
crystal_system = models.CharField(max_length=255, blank=True, default='')
unit_cell = models.CharField(max_length=255, blank=True, default='')
crystal_symmetry = models.CharField(max_length=255, blank=True, default='')
cleavage = models.CharField(max_length=255, blank=True, default='')
mohs_scale_hardness = models.CharField(max_length=255, blank=True, default='')
luster = models.CharField(max_length=255, blank=True, default='')
streak = models.CharField(max_length=255, blank=True, default='')
diaphaneity = models.CharField(max_length=255, blank=True, default='')
optical_properties = models.CharField(max_length=255, blank=True, default='')
refractive_index = models.CharField(max_length=255, blank=True, default='')
crystal_habit = models.CharField(max_length=255, blank=True, default='')
specific_gravity = models.CharField(max_length=255, blank=True, default='')
group = models.CharField(max_length=255, blank=True, default='')
|
[
"frank.rosendorf1@gmail.com"
] |
frank.rosendorf1@gmail.com
|
94099f7539dd29af5d9baf1b7e65aae919dc5eb1
|
b8e9dd6fd8f8b691cba5a3af2388467bcf6c90bb
|
/samples/openapi3/client/3_0_3_unit_test/python-experimental/unit_test_api/paths/response_body_post_enum_with1_does_not_match_true_response_body_for_content_types/post.py
|
6c03a7fcfbbe3990bd37e435d7afe3c07aa49f1e
|
[
"Apache-2.0"
] |
permissive
|
FallenRiteMonk/openapi-generator
|
f8b98940219eecf14dc76dced4b0fbd394522aa3
|
b6576d11733ecad6fa4a0a616e1a06d502a771b7
|
refs/heads/master
| 2023-03-16T05:23:36.501909
| 2022-09-02T01:46:56
| 2022-09-02T01:46:56
| 164,609,299
| 0
| 0
|
Apache-2.0
| 2019-01-08T09:08:56
| 2019-01-08T09:08:56
| null |
UTF-8
|
Python
| false
| false
| 4,699
|
py
|
# coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import urllib3
from urllib3._collections import HTTPHeaderDict
from unit_test_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
from unit_test_api.model.enum_with1_does_not_match_true import EnumWith1DoesNotMatchTrue
from . import path
SchemaFor200ResponseBodyApplicationJson = EnumWith1DoesNotMatchTrue
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
}
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
def _post_enum_with1_does_not_match_true_response_body_for_content_types_oapg(
self: api_client.Api,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
"""
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class PostEnumWith1DoesNotMatchTrueResponseBodyForContentTypes(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
def post_enum_with1_does_not_match_true_response_body_for_content_types(
self: BaseApi,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._post_enum_with1_does_not_match_true_response_body_for_content_types_oapg(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
def post(
self: BaseApi,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._post_enum_with1_does_not_match_true_response_body_for_content_types_oapg(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
|
[
"noreply@github.com"
] |
FallenRiteMonk.noreply@github.com
|
533a8674b5a0ea2d97c2032ad2269f7fe0835047
|
818173671edf15d7c6d775ed003bcd35608233f9
|
/demos/go/wscript
|
13ea4561dd2b6d1e18137a02eb74523dc6ffdb69
|
[] |
no_license
|
zsx/waf
|
a1e87e079e22443ae3ed98e08cefc705b5f73906
|
66d1c6ede4ceda66a98dbbf9dd473f1d5c5752ba
|
refs/heads/master
| 2021-01-13T12:56:12.379186
| 2010-07-12T17:27:13
| 2010-07-12T17:27:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,688
|
#!/usr/bin/env python
# encoding: utf-8
# Tom Wambold tom5760 gmail
# Thomas Nagy, 2010 (ita)
"""
if libgmp is present, try building with 'waf --exe'
"""
top = '.'
out = 'build'
def options(opt):
opt.add_option('--exe', action='store_true', default=False, help='Execute the program after it is compiled')
def configure(ctx):
ctx.check_tool('go')
try:
ctx.check_tool('gcc')
ctx.check_cc(fragment='#include <gmp.h>\nint main() {return 0;}\n', uselib_store='GMP', lib='gmp')
except ctx.errors.ConfigurationError:
ctx.env.TRY_CGO = False
else:
ctx.env.TRY_CGO = True
def build(ctx):
ctx(
features = 'go gopackage',
target = 'other',
source = [
'other/a.go',
#'other/b.go', # using two source files for gopack does not seem to work anymore
],
)
ctx(
features = 'go goprogram',
target = 'test',
uselib_local = 'other',
source = 'main.go',
includes = '.',
)
if ctx.env.TRY_CGO:
# see http://code.google.com/p/go/issues/detail?id=533
# so we have to move the files, grrrrr
ctx(name='cgo', rule='${CGO} ${SRC} && mv ${gen.path.abspath()}/${TGT[0].name} ${gen.path.abspath()}/${TGT[1].name} ${TGT[0].parent.abspath()}',
target='gmp.cgo1.go gmp.cgo2.c gmp.cgo2.c _cgo_gotypes.go _cgo_defun.c',
source='gmp.go',
shell=True)
ctx(features='c cshlib', source='gmp.cgo2.c', target=ctx.path.find_or_declare('cgo_gmp.so'), uselib='GMP')
ctx.add_group()
ctx(features='go goprogram', source='pi.go', target='pi')
from waflib import Options, Utils
if ctx.env.TRY_CGO and Options.options.exe:
def exe(bld):
p = Utils.subprocess.Popen('LD_LIBRARY_PATH=build ./build/pi', shell=True)
p.wait()
ctx.add_post_fun(exe)
|
[
"tnagy1024@f0382ac9-c320-0410-b3f0-b508d59f5a85"
] |
tnagy1024@f0382ac9-c320-0410-b3f0-b508d59f5a85
|
|
9700f7b32038e32409736e25ab200fda2427f5dd
|
46dd1ad6fe93777a4dce84166b64cb9adb679e62
|
/test/functional/interface_bitcoin_cli.py
|
be8c48fd1a8c0abb45f0f9939a095625cc50b098
|
[
"MIT"
] |
permissive
|
aleomartinez/EducaCoin
|
14266500dc3c5aabfe8bebf17c8903aecea0af8c
|
2282d6affdd2192a79efdce579ddd0d8576d950d
|
refs/heads/master
| 2020-03-31T01:28:49.612215
| 2018-10-05T14:43:36
| 2018-10-05T14:43:36
| 151,783,304
| 0
| 0
|
MIT
| 2018-10-05T22:11:11
| 2018-10-05T22:11:11
| null |
UTF-8
|
Python
| false
| false
| 3,669
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test educacoin-cli"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie
class TestBitcoinCli(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
self.log.info("Compare responses from gewalletinfo RPC and `educacoin-cli getwalletinfo`")
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info("Compare responses from getblockchaininfo RPC and `educacoin-cli getblockchaininfo`")
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
user, password = get_auth_cookie(self.nodes[0].datadir)
self.log.info("Test -stdinrpcpass option")
assert_equal(0, self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input=password).getblockcount())
assert_raises_process_error(1, "incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input="foo").echo)
self.log.info("Test -stdin and -stdinrpcpass")
assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo())
assert_raises_process_error(1, "incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input="foo").echo)
self.log.info("Make sure that -getinfo with arguments fails")
assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help)
self.log.info("Compare responses from `educacoin-cli -getinfo` and the RPCs data is retrieved from.")
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test")
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
# unlocked_until is not tested because the wallet is not encrypted
if __name__ == '__main__':
TestBitcoinCli().main()
|
[
"you@example.com"
] |
you@example.com
|
82cbf15cb149b4e72ab5811f65c9fee2d676ee8d
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/cirq_new/cirq_program/startCirq_noisy959.py
|
a9b2211c27ba41c3a2a043b44c15010fd25e7f71
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,480
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=23
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=15
c.append(cirq.H.on(input_qubit[0])) # number=16
c.append(cirq.H.on(input_qubit[0])) # number=20
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=21
c.append(cirq.H.on(input_qubit[0])) # number=22
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.Y.on(input_qubit[2])) # number=19
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=13
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=17
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=18
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
circuit = circuit.with_noise(cirq.depolarize(p=0.01))
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_noisy959.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
624f9ac70390eb2b0177c09ce025a3b00a2bd5ec
|
87a66fbed380353955cc6160c8fffe56dee785be
|
/bert2bert.py
|
3ff4c7f15e16f72c1b0e7a7ec1fec8f221d33179
|
[
"Apache-2.0"
] |
permissive
|
JngHyun/2021-BOAZ-bert2bert
|
ca84e8f4ad444f27a31ac8f74469826adefe3a19
|
63e95cc87b231ebf344950df80a43abc1139cb7d
|
refs/heads/main
| 2023-03-27T01:13:35.747955
| 2021-03-25T07:17:39
| 2021-03-25T07:17:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,735
|
py
|
from typing import List
from transformers import (
EncoderDecoderModel,
BertConfig,
EncoderDecoderConfig,
BertModel, BertTokenizer,
)
from transformers.modeling_bart import shift_tokens_right
from kobert_transformers import get_tokenizer
from lightning_base import LightningBase
import torch
class Bert2Bert(LightningBase):
def __init__(
self,
model_save_path: str,
batch_size: int,
num_gpus: int,
max_len: int = 128,
lr: float = 3e-5,
weight_decay: float = 1e-4,
save_step_interval: int = 1000,
accelerator: str = "ddp",
precision: int = 16,
use_amp: bool = True,
) -> None:
super(Bert2Bert, self).__init__(
model_save_path=model_save_path,
max_len=max_len,
batch_size=batch_size,
num_gpus=num_gpus,
lr=lr,
weight_decay=weight_decay,
save_step_interval=save_step_interval,
accelerator=accelerator,
precision=precision,
use_amp=use_amp,
)
encoder_config = BertConfig.from_pretrained("monologg/kobert")
decoder_config = BertConfig.from_pretrained("monologg/kobert")
config = EncoderDecoderConfig.from_encoder_decoder_configs(
encoder_config, decoder_config
)
self.model = EncoderDecoderModel(config)
self.tokenizer = KoBertTokenizer()
state_dict = BertModel.from_pretrained("monologg/kobert").state_dict()
self.model.encoder.load_state_dict(state_dict)
self.model.decoder.bert.load_state_dict(state_dict, strict=False)
# cross attention이랑 lm head는 처음부터 학습
def training_step(self, batch, batch_idx):
src, tgt = batch[0], batch[1]
src_input = self.tokenizer.encode_batch(src, max_length=self.max_len)
tgt_input = self.tokenizer.encode_batch(tgt, max_length=self.max_len)
input_ids = src_input["input_ids"].to(self.device)
attention_mask = src_input["attention_mask"].to(self.device)
labels = tgt_input["input_ids"].to(self.device)
decoder_input_ids = shift_tokens_right(
labels, self.tokenizer.token2idx["[PAD]"]
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
)
lm_logits = outputs[0]
loss_fn = torch.nn.CrossEntropyLoss(
ignore_index=self.tokenizer.token2idx["[PAD]"]
)
lm_loss = loss_fn(lm_logits.view(-1, lm_logits.shape[-1]), labels.view(-1))
self.save_model()
return {"loss": lm_loss}
class KoBertTokenizer(object):
def __init__(self):
self.tokenizer = get_tokenizer()
self.token2idx = self.tokenizer.token2idx
self.idx2token = {v: k for k, v in self.token2idx.items()}
def encode_batch(self, x: List[str], max_length):
max_len = 0
result_tokenization = []
for i in x:
tokens = self.tokenizer.encode(i, max_length=max_length, truncation=True)
result_tokenization.append(tokens)
if len(tokens) > max_len:
max_len = len(tokens)
padded_tokens = []
for tokens in result_tokenization:
padding = (torch.ones(max_len) * self.token2idx["[PAD]"]).long()
padding[: len(tokens)] = torch.tensor(tokens).long()
padded_tokens.append(padding.unsqueeze(0))
padded_tokens = torch.cat(padded_tokens, dim=0).long()
mask_tensor = torch.ones(padded_tokens.size()).long()
attention_mask = torch.where(
padded_tokens == self.token2idx["[PAD]"], padded_tokens, mask_tensor * -1
).long()
attention_mask = torch.where(
attention_mask == -1, attention_mask, mask_tensor * 0
).long()
attention_mask = torch.where(
attention_mask != -1, attention_mask, mask_tensor
).long()
return {
"input_ids": padded_tokens.long(),
"attention_mask": attention_mask.long(),
}
def decode(self, tokens):
# remove special tokens
# unk, pad, cls, sep, mask
tokens = [token for token in tokens
if token not in [0, 1, 2, 3, 4]]
decoded = [self.idx2token[token] for token in tokens]
if "▁" in decoded[0] and "▁" in decoded[1]:
# fix decoding bugs
tokens = tokens[1:]
return self.tokenizer.decode(tokens)
def decode_batch(self, list_of_tokens):
return [self.decode(tokens) for tokens in list_of_tokens]
|
[
"pjh09890989@gmail.com"
] |
pjh09890989@gmail.com
|
63bb8cfeb317f62af40c54d6d6ba604724dc264e
|
691d3f3e04d354e11772335064f33245e1ed8c28
|
/lib/galaxy/datatypes/indexers/coverage.py
|
2bfec91c25fb05f513831206a3471b71db5749ea
|
[
"CC-BY-2.5",
"MIT"
] |
permissive
|
dbcls/dbcls-galaxy
|
934a27cc13663549d5208158fc0b2821609399a8
|
6142165ef27f6a02aee42f26e0b94fed67ecc896
|
refs/heads/master
| 2016-09-05T22:53:27.553419
| 2009-09-09T06:35:28
| 2009-09-09T06:35:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,442
|
py
|
#!/usr/bin/env python
"""
Read a chromosome of coverage data, and write it as a npy array, as
well as averages over regions of progessively larger size in powers of 10
"""
from __future__ import division
import sys
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
import bx.wiggle
from bx.cookbook import doc_optparse
from bx import misc
max2 = max
pkg_resources.require("numpy>=1.2.1")
from numpy import *
import tempfile
import os
def write_chrom(max, out_base, instream):
scores = zeros( max, float32 ) * nan
# Fill array from wiggle
max_value = 0
min_value = 0
for line in instream:
line = line.rstrip("\n\r")
(chrom, pos, val) = line.split("\t")
pos, val = int(pos), float(val)
scores[pos] = val
# Write ra
fname = "%s_%d" % ( out_base, 1 )
save( fname, scores )
os.rename( fname+".npy", fname )
# Write average
for window in 10, 100, 1000, 10000, 100000:
input = scores.copy()
size = len( input )
input.resize( ( ( size / window ), window ) )
masked = ma.masked_array( input, isnan( input ) )
averaged = mean( masked, 1 )
averaged.set_fill_value( nan )
fname = "%s_%d" % ( out_base, window )
save( fname, averaged.filled() )
del masked, averaged
os.rename( fname+".npy", fname )
def main():
max = int( 512*1024*1024 )
# get chroms and lengths
chroms = {}
LEN = {}
for line in open(sys.argv[1],"r"):
line = line.rstrip("\r\n")
fields = line.split("\t")
(chrom, pos, forward) = fields[0:3]
reverse = 0
if len(fields) == 4: reverse = int(fields[3])
forward = int(forward)+reverse
pos = int(pos)
chrom_file = chroms.get(chrom, None)
if not chrom_file:
chrom_file = chroms[chrom] = tempfile.NamedTemporaryFile()
chrom_file.write("%s\t%s\t%s\n" % (chrom,pos,forward))
LEN[chrom] = max2( LEN.get(chrom,0), pos+1 )
for chrom, stream in chroms.items():
stream.seek(0)
prefix = os.path.join(sys.argv[2], chrom)
write_chrom( LEN[chrom], prefix, stream )
manifest_file = open( os.path.join( sys.argv[2], "manifest.tab" ),"w" )
for key, value in LEN.items():
print >> manifest_file, "%s\t%s" % (key, value)
manifest_file.close()
if __name__ == "__main__": main()
|
[
"h-morita@esm.co.jp"
] |
h-morita@esm.co.jp
|
cad859d418ce2e7f4caf5b06ea68e3865b327913
|
199145122c35976fbfc22f2d709458bf67772f95
|
/apps/hosts/urls.py
|
8d52a9078d69ec7982ad7b9679e2e9841d265abf
|
[
"Apache-2.0"
] |
permissive
|
yhgnice/toolsvb
|
6109adbce89dd645da342d619acbcaca31b11efa
|
35f9d27ee2439d134cab160a7cf930ea13a31d26
|
refs/heads/master
| 2020-05-24T05:15:40.112999
| 2017-03-14T06:32:23
| 2017-03-14T06:32:23
| 84,824,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by Nice... on '2017/3/9 20:32'
from django.conf.urls import url
from .views import RebootServices
urlpatterns = [
# 主机工具
url(r'^tools/$', RebootServices.as_view(), name="tools"),
]
|
[
"123"
] |
123
|
607c473b42712149c56ebae6342712cfda2c7ff2
|
a289ad8df7840045a659db4f0f936b09494243b3
|
/gruvi/error.py
|
d0483517992c9c2b8de1ea8e0e33f2443658ca33
|
[
"MIT"
] |
permissive
|
tijko/gruvi
|
a29414bc757f9b19cfc457df36e270c5fefef183
|
558faa181390dfac83cd42fdcafb1850008e4ac5
|
refs/heads/master
| 2020-12-11T05:34:23.897520
| 2014-01-04T06:59:29
| 2014-01-04T06:59:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2013 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import sys
from pyuv.error import UVError
__all__ = ['Error', 'Timeout', 'Cancelled']
Error = UVError
class Timeout(Error):
"""A timeout has occurred."""
class Cancelled(Error):
"""A fiber or calback was cancelled."""
# The following is a pretty bad hack.. We want to use Sphinx's "automodule" to
# document most of our modules in the API reference section, and we want it to
# show inherited members. The result is that it shows an ugly "with_traceback"
# method for gruvi.Error. We fix that by setting that method to None if and
# only if we are running under Sphinx.
if hasattr(sys, 'running_under_sphinx'):
Error.with_traceback = None
|
[
"geertj@gmail.com"
] |
geertj@gmail.com
|
4f0ed701e5f9fc81b15e8550d01102e7412e4ae4
|
3665e5e6946fd825bb03b3bcb79be96262ab6d68
|
/jc/parsers/df.py
|
817f0a21bbaf233bfe3351eeefdabda7f96d9361
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
philippeitis/jc
|
a28b84cff7fb2852a374a7f0f41151b103288f26
|
d96b3a65a98bc135d21d4feafc0a43317b5a11fa
|
refs/heads/master
| 2021-02-16T05:03:03.022601
| 2020-03-04T16:30:52
| 2020-03-04T16:30:52
| 244,969,097
| 0
| 0
|
MIT
| 2020-03-08T21:10:36
| 2020-03-04T18:01:38
| null |
UTF-8
|
Python
| false
| false
| 5,176
|
py
|
"""jc - JSON CLI output utility df Parser
Usage:
specify --df as the first argument if the piped input is coming from df
Compatibility:
'linux', 'darwin'
Examples:
$ df | jc --df -p
[
{
"filesystem": "devtmpfs",
"1k_blocks": 1918820,
"used": 0,
"available": 1918820,
"use_percent": 0,
"mounted_on": "/dev"
},
{
"filesystem": "tmpfs",
"1k_blocks": 1930668,
"used": 0,
"available": 1930668,
"use_percent": 0,
"mounted_on": "/dev/shm"
},
{
"filesystem": "tmpfs",
"1k_blocks": 1930668,
"used": 11800,
"available": 1918868,
"use_percent": 1,
"mounted_on": "/run"
},
...
]
$ df | jc --df -p -r
[
{
"filesystem": "devtmpfs",
"1k_blocks": "1918820",
"used": "0",
"available": "1918820",
"use_percent": "0%",
"mounted_on": "/dev"
},
{
"filesystem": "tmpfs",
"1k_blocks": "1930668",
"used": "0",
"available": "1930668",
"use_percent": "0%",
"mounted_on": "/dev/shm"
},
{
"filesystem": "tmpfs",
"1k_blocks": "1930668",
"used": "11800",
"available": "1918868",
"use_percent": "1%",
"mounted_on": "/run"
},
...
]
"""
import jc.utils
import jc.parsers.universal
class info():
version = '1.1'
description = 'df command parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
compatible = ['linux', 'darwin']
magic_commands = ['df']
__version__ = info.version
def process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (dictionary) raw structured data to process
Returns:
List of dictionaries. Structured data with the following schema:
[
{
"filesystem": string,
"size": string,
"1k_blocks": integer,
"512_blocks": integer,
"used": integer,
"available": integer,
"capacity_percent": integer,
"ifree": integer,
"iused": integer,
"use_percent": integer,
"iused_percent": integer,
"mounted_on": string
}
]
"""
for entry in proc_data:
# change 'avail' to 'available'
if 'avail' in entry:
entry['available'] = entry.pop('avail')
# change 'use%' to 'use_percent'
if 'use%' in entry:
entry['use_percent'] = entry.pop('use%')
# change 'capacity' to 'capacity_percent'
if 'capacity' in entry:
entry['capacity_percent'] = entry.pop('capacity')
# change '%iused' to 'iused_percent'
if '%iused' in entry:
entry['iused_percent'] = entry.pop('%iused')
# change any entry for key with '_blocks' in the name to int
for k in entry:
if str(k).find('_blocks') != -1:
try:
blocks_int = int(entry[k])
entry[k] = blocks_int
except (ValueError):
entry[k] = None
# remove percent sign from 'use_percent', 'capacity_percent', and 'iused_percent'
if 'use_percent' in entry:
entry['use_percent'] = entry['use_percent'].rstrip('%')
if 'capacity_percent' in entry:
entry['capacity_percent'] = entry['capacity_percent'].rstrip('%')
if 'iused_percent' in entry:
entry['iused_percent'] = entry['iused_percent'].rstrip('%')
# change used, available, use_percent, capacity_percent, ifree, iused, iused_percent to int
int_list = ['used', 'available', 'use_percent', 'capacity_percent', 'ifree', 'iused', 'iused_percent']
for key in int_list:
if key in entry:
try:
key_int = int(entry[key])
entry[key] = key_int
except (ValueError):
entry[key] = None
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
Returns:
List of dictionaries. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
cleandata = data.splitlines()
# fix headers
cleandata[0] = cleandata[0].lower()
cleandata[0] = cleandata[0].replace('-', '_')
cleandata[0] = cleandata[0].replace('mounted on', 'mounted_on')
# parse the data
raw_output = jc.parsers.universal.sparse_table_parse(cleandata)
if raw:
return raw_output
else:
return process(raw_output)
|
[
"kellyjonbrazil@gmail.com"
] |
kellyjonbrazil@gmail.com
|
75b7d13d54b027208ae7bf83b28a23721fbfcc21
|
d44c989d1082ec91ae420569f79e39105ec9adf0
|
/Convert Binary Number in a Linked List to Integer.py
|
d8732b80748c73d1c4539378955d8afbfe29849a
|
[] |
no_license
|
hsinhuibiga/Nov
|
c5a79e265a6afcfd03f04e23914d3924129c6389
|
15b2f37f96ded183ab3507a95985900a9d5d3ddc
|
refs/heads/main
| 2023-01-14T13:11:02.568683
| 2020-11-22T13:40:36
| 2020-11-22T13:40:36
| 309,392,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
#Convert Binary Number in a Linked List to Integer
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def getDecimalValue(self, head):
"""
:type head: ListNode
:rtype: int
"""
binary = ''
while head != None:
binary += str(head.val)
head = head.next
return int(binary,2)
|
[
"noreply@github.com"
] |
hsinhuibiga.noreply@github.com
|
36c519d580be12825f3bf43d17406404d77ab275
|
aba2dd3ed154e1307e3ffb360d22c400bc8e17ab
|
/bib2json.py
|
2688d16b53929bd3986a2b8509fc9d5acfecb38d
|
[
"MIT"
] |
permissive
|
SujiKim6/rebiber
|
3938406df01e5aa61a9d9cf24bb74bd4fed82787
|
6617792dbfdb860d96f15027210381215c63685d
|
refs/heads/main
| 2023-02-21T08:22:52.610870
| 2021-01-26T09:41:06
| 2021-01-26T09:57:02
| 332,980,291
| 0
| 0
|
MIT
| 2021-01-26T05:19:34
| 2021-01-26T05:19:33
| null |
UTF-8
|
Python
| false
| false
| 1,802
|
py
|
import json
import re
import sys
import bibtexparser
import argparse
from tqdm import tqdm
def normalize_title(title_str):
title_str = re.sub(r'[^a-zA-Z]',r'', title_str)
return title_str.lower().replace(" ", "").strip()
def load_bib_file(bibpath="acl.bib"):
all_bib_entries = []
with open(bibpath) as f:
bib_entry_buffer = []
for line in f.readlines():
# line = line.strip()
bib_entry_buffer.append(line)
if line == "}\n":
all_bib_entries.append(bib_entry_buffer)
bib_entry_buffer = []
return all_bib_entries
def buil_json(all_bib_entries):
all_bib_dict = {}
num_expections = 0
for bib_entry in tqdm(all_bib_entries[:]):
bib_entry_str = " ".join([line for line in bib_entry if "month" not in line.lower()]).lower()
try:
bib_entry_parsed = bibtexparser.loads(bib_entry_str)
bib_key = normalize_title(bib_entry_parsed.entries[0]["title"])
all_bib_dict[bib_key] = bib_entry
except Exception as e:
print(bib_entry)
print(e)
num_expections += 1
return all_bib_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_bib", default="data/acl.bib",
type=str, help="The input bib file")
parser.add_argument("-o", "--output_json", default="data/acl.json",
type=str, help="The output bib file")
args = parser.parse_args()
all_bib_entries = load_bib_file(args.input_bib)
all_bib_dict = buil_json(all_bib_entries)
with open(args.output_json, "w") as f:
json.dump(all_bib_dict, f, indent=2)
|
[
"yuchen.lin@usc.edu"
] |
yuchen.lin@usc.edu
|
e0e468f23e653c4f37ed38f070b78741ba8bdf07
|
5ca4a0d91f5bd119e80478b5bd3d43ed30133a42
|
/film20/config/urls/pl.py
|
65a13447c224a832a15c19ca2748822deec504d0
|
[] |
no_license
|
thuvh/filmmaster
|
1fc81377feef5a9e13f792b329ef90f840404ec5
|
dd6a2ee5a4951b2397170d5086c000169bf91350
|
refs/heads/master
| 2021-01-17T16:10:54.682908
| 2012-04-29T18:19:52
| 2012-04-29T18:19:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,941
|
py
|
#-------------------------------------------------------------------------------
# Filmaster - a social web network and recommendation engine
# Copyright (c) 2009 Filmaster (Borys Musielak, Adam Zielinski).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
# Project
from film20.config.templates import templates
success = "sukces"
error = "error"
def full_url(key):
try:
from django.conf import settings
DOMAIN = settings.DOMAIN
FULL_DOMAIN = settings.FULL_DOMAIN
except:
DOMAIN = ''
FULL_DOMAIN = ''
return (FULL_DOMAIN or DOMAIN) + '/'+urls[key]+'/'
urls = {
### LEGACY STUFF FOR COMPATIBILITY: FLM-1185 ###
"BLOG_POST_OLD": "notka",
"SHORT_REVIEW_OLD": "krotka-recenzja",
### PUBLIC ###
"FIRST_TIME_INFO": "pierwsze-kroki",
"FAQ": "faq",
"MAIN": "",
"ADMIN": "admin",
"FILM": "film",
"RATE_FILMS": "oceniaj-filmy",
"RATE_FILMS_FAST_FORWARD": "oceniarka",
"RATE_NEXT": "ocen-nastepny",
"PERSON": "osoba",
"SEARCH": "szukaj",
"SEARCH_FILM": "szukaj-filmu",
"SEARCH_PERSON": "szukaj-osoby",
"BLOG": "blog",
"ARTICLE": "artykul",
"CHECKIN": "checkin",
"ARTICLES":"artykuly",
"ARTICLES_OLD":"notki",
"PLANET": "planeta",
"RECENT_ANSWERS": "odpowiedzi",
"PLANET_FOLLOWED": "planeta/obserwowani",
"POSTER": "plakat",
# kokpit
"DASHBOARD": "kokpit",
# publiczne profile
"SHOW_PROFILE": "profil",
"LIST_PROFILES": "lista-profili", #TODO: is this required?
"USER_ARTICLES": "artykuly-filmastera", #TODO: is this required?
"FILMS": "filmy",
"AGENDA": "agenda",
# ogolne do wykorzystania w linkach
"RATING": "ocena",
"EDIT": "edytuj",
"PREVIOUS": "poprzedni",
"NEXT": "nastepny",
"FEED": "feed",
"FILMS_FOR_TAG": "filmy",
"RANKING": "rankingi",
"RATINGS": "oceny",
"RECOMMENDATIONS": "rekomendacje",
"COMPUTE": "przelicz",
"TOP_USERS": "filmasterzy",
"FOLLOWED": "obserwowani",
"FOLLOWERS": "obserwujacy",
"SIMILAR_USERS": "podobni-uzytkownicy",
"SIMILAR_USERS_FOLLOWING": "podobni-uzytkownicy-obserwowani",
"SIMILAR_USERS_FOLLOWED": "podobni-uzytkownicy-obserwujacy",
# "COMPUTE_PROBABLE_SCORES": "wylicz-rekomendacje",
"FILMBASKET": "koszyk",
"OWNED": "kolekcja",
"WISHLIST": "wishlista",
"SHITLIST": "shitlista",
"TAG": "tag",
"SHOW_TAG_PAGE": "tag",
"ADD_TO_BASKET": "dodaj-do-koszyka",
"REGIONAL_INFO": "informacje-regionalne",
"AJAX": "ajax",
# strony statyczne (TODO: zastapic flatpages?)
"TERMS": "regulamin",
"PRIVACY": "prywatnosc",
"LICENSE": "licencja",
"CONTACT": "kontakt",
"ABOUT": "redakcja",
"COOPERATION": "wspolpraca",
"BANNERS": "banery",
"ADVERTISEMENT": "reklama",
"AVATAR_HOWTO": "awatar-howto",
"FORMATTING_POSTS": "formatowanie",
### PRIVATE ###
# logowanie i rejestracja
"ACCOUNT": "dashboard",
"OPENID_ASSOCIATIONS": "openid/associations",
"ASSIGN_FACEBOOK":"fb/assign_facebook",
"EDIT_FACEBOOK":"fb/edit",
"LOGIN": "konto/login",
"LOGOUT": "konto/logout",
"CHANGE_PASSWORD": "zmien-haslo",
"RESET_PASSWORD": "konto/reset-hasla",
"RESET_PASSWORD_CONFIRM": "konto/reset-hasla/potwierdzenie",
"RESET_PASSWORD_COMPLETE": "konto/reset-hasla/koniec",
"RESET_PASSWORD_DONE": "konto/reset-hasla/sukces",
"REGISTRATION": "konto/rejestracja",
"REGISTRATION_ACTIVATE": "konto/rejestracja/aktywacja",
"REGISTRATION_COMPLETE": "konto/rejestracja/koniec",
"ASSOCIATIONS": "edytuj-powiazane-serwisy",
"OAUTH_LOGIN": "konto/oauth-login",
"OAUTH_LOGIN_CB": "konto/oauth-login-cb",
"OAUTH_NEW_USER": "konto/oauth/nowy",
# friends and invitations
"MANAGE_INVITATIONS": "konto/zaproszenia",
"ACCEPT_INVITATION": "konto/akceptuj-zaproszenie",
"REFUSE_INVITATION": "konto/odrzuc-zaproszenie",
# old notifications - TODO: remove
"NOTIFICATIONS": "konto/powiadomienia",
"NOTIFICATION": "konto/powiadomienia/powiadomienie",
"MARK_NOTIFICATIONS_AS_READ": "konto/powiadomienia/oznacz-jako-przeczytane",
"PW": "pw",
"PW_INBOX": "odebrane",
"PW_OUTBOX": "wyslane",
"PW_COMPOSE": "stworz",
"PW_REPLY": "odpowiedz",
"PW_VIEW": "zobacz",
"PW_DELETE": "usun",
"PW_CONV_DELETE": "usun-watek",
"PW_CONV_VIEW": "zobacz-watek",
"PW_UNDELETE": "przywroc",
"PW_TRASH": "kosz",
#export
"EXPORT_RATINGS":"pobierz",
# profiles
"CREATE_PROFILE": "konto/stworz-profil",
"EDIT_PROFILE": "konto/edytuj-profil",
"EDIT_PROFILE_DONE": "konto/edytuj-profil/sukces",
"EDIT_LOCATION": "edytuj-lokalizacje",
"DELETE_PROFILE": "konto/usun-profil",
"DELETE_PROFILE_DONE": "konto/usun-profil/sukces",
"EDIT_AVATAR": "konto/edytuj-awatar",
"CROP_AVATAR": "konto/wytnij-awatar",
"DELETE_AVATAR": "konto/usun-awatar",
# forum
"FORUM": "forum",
"FORUM_FILMASTER": "forum/forum-filmastera",
"FORUM_HYDE_PARK": "forum/hyde-park",
"EDIT_COMMENT": "edytuj-komentarz",
# user activities
"COMMENTS": "komentarze",
"REVIEWS": "recenzje",
"REVIEW": "recenzja",
"SHORT_REVIEWS": "krotkie-recenzje",
"SHORT_REVIEW": "krotka-recenzja",
# default poster
"DEFAULT_POSTER": "/static/img/default_poster.png",
"DEFAULT_ACTOR": "/static/img/default_actor.png",
#rss
"RSS": "rss",
# special events
"SHOW_EVENT": "wydarzenia",
"SHOW_FESTIVAL": "festiwal",
"ORIGINAL_TITLE": "tytul-oryginalny",
# contest
"SHOW_GAME": "mecz",
"SHOW_CONTEST": "plebiscyt",
"CONTEST_VOTE_AJAX": "vote_ajax",
# add films
"ADD_FILMS":"dodaj-film",
"EDIT_CAST":"edytuj-obsade",
#add links
"ADD_LINKS":"dodaj-link",
"REMOVE_LINKS":"usun-link",
"ADD_VIDEO":"dodaj-video",
"LINKS":"linki",
"LINK":"link",
#nudge button
"NUDGE":"szturchnij",
#follow
"FOLLOW":"obserwuj",
#delete comment
"DELETE_COMMENT":"usun-komentarz",
#moderated photos
"POSTER_ADD":"dodaj-plakat",
"PHOTO_ADD":"dodaj-zdjecie",
"MODERATED_PHOTOS": "plakaty-i-zdjecia",
#moderated films
"MODERATED_FILMS": "filmy",
"MOVIES": "filmy",
"GENRE": "gatunek",
#mobile landing page
"MOBILE":"mobile",
#content moderation
"MODERATION": "moderacja",
#wall
"WALL":"wall",
#settings
"SETTINGS": "ustawienia",
"MANAGE_NOTIFICATIONS": "zarzadzaj-powiadomieniami",
"IMPORT_RATINGS":"importuj-oceny",
#dashboard
"NEW_ARTICLE":"nowy-artykul",
"EDIT_ARTICLE":"edytuj-artykul",
"RATED_FILMS":"oceny",
#showtimes
"SHOWTIMES": "rekomendacje",
"SCREENING": "seanse",
"CINEMAS": "kina",
"CINEMA": "kino",
"CHANNEL": "kanal",
"THEATERS": "kina",
"THEATER": "kino",
"TV": "tv",
"TV_CHANNELS": "kanaly-tv",
# applications
"APPLICATIONS": "aplikacje",
"APPLICATION": "aplikacja",
"ADD_APPLICATION": "dodaj-aplikacje",
"REMOVE_ACCESS_TOKEN": "usun-token",
"REMOVE_APPLICATION": "usun-aplikacje",
}
|
[
"email@ibrahimcesar.com"
] |
email@ibrahimcesar.com
|
fc476732d5002a650fe826d5c6d7ec00bb625f4d
|
76de4fc4f00a04c8c9acc1e9e4a5fae12cf0c08a
|
/trunk/pyformex/examples/Isopar.py
|
09660af39e5a8ca12a1bdf071f0b7b4a4e9a1b05
|
[] |
no_license
|
BackupTheBerlios/pyformex-svn
|
ec2361b1b9967918be65e892217a691a6f8b145d
|
f5404809095711334bbb938d9d119a69ad8fc260
|
refs/heads/master
| 2020-12-24T13:20:47.422165
| 2011-11-15T11:52:23
| 2011-11-15T11:52:23
| 40,749,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,202
|
py
|
#!/usr/bin/env pyformex --gui
# $Id$
##
## This file is part of pyFormex 0.8.5 Sun Nov 6 17:27:05 CET 2011
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: https://savannah.nongnu.org/projects/pyformex/
## Copyright (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""Isopar
level = 'normal'
topics = ['geometry']
techniques = ['dialog', 'color','isopar']
"""
from plugins import isopar
import simple
import elements
wireframe()
ttype = ask("Select type of transformation",['Cancel','1D','2D','3D'])
if not ttype or ttype == 'Cancel':
exit()
tdim = int(ttype[0])
# create a unit quadratic grid in tdim dimensions
x = Coords(simple.regularGrid([0.]*tdim, [1.]*tdim, [2]*tdim)).reshape(-1,3)
x1 = Formex(x)
x2 = x1.copy()
# move a few points
if tdim == 1:
eltype = 'line3'
x2[1] = x2[1].rot(-22.5)
x2[2] = x2[2].rot(22.5)
elif tdim == 2:
eltype = 'quad9'
x2[5] = x2[2].rot(-22.5)
x2[8] = x2[2].rot(-45.)
x2[7] = x2[2].rot(-67.5)
x2[4] = x2[8] * 0.6
else:
eltype = 'hex27'
tol = 0.01
d = x2.distanceFromPoint(x2[0])
w = where((d > 0.5+tol) * (d < 1.0 - tol))[0]
# avoid error messages during projection
errh = seterr(all='ignore')
x2[w] = x2.projectOnSphere(0.5)[w]
w = where(d > 1.+tol)[0]
x2[w] = x2.projectOnSphere(1.)[w]
seterr(**errh)
clear()
message('This is the set of nodes in natural coordinates')
draw(x1,color=blue)
message('This is the set of nodes in cartesian coordinates')
draw(x2,color=red)
drawNumbers(x2,color=red)
drawNumbers(x1)
n = 8
stype = ask("Select type of structure",['Cancel','1D','2D','3D'])
if stype == 'Cancel':
exit()
sdim = int(stype[0])
if sdim == 1:
F = simple.line([0.,0.,0.],[1.,1.,0.],10)
elif sdim == 2:
F = simple.rectangle(1,1,1.,1.)
else:
## v = array(elements.Hex8.vertices)
## f = array(elements.Hex8.faces[1])
## F = Formex(v[f])
F = elements.Hex8.toFormex()
if sdim > 1:
for i in range(sdim):
F = F.replic(n,1.,dir=i)
if sdim < tdim:
F = F.trl(2,0.5)
clear()
message('This is the initial Formex')
FA=draw(F)
sz = F.sizes()
if sdim < tdim:
sz[sdim:tdim] = 2.
x1 = x1.scale(sz)
x2 = x2.scale(sz)
G=F.isopar(eltype,x2.points(),x1.points())
G.setProp(1)
message('This is the transformed Formex')
draw(G)
pause()
undraw(FA)
# End
|
[
"bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35"
] |
bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35
|
5d7c6cb5d86f6dfeaaa32c199c33c6b3c2bb6f23
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_7/102.py
|
998a2e0917fabf1d52113661b10a59105356d584
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
import sys
def calc_trees():
"""Read data from stdin and calculate all trees.
Returns a list of coordinates (tuples of ints).
"""
n,A,B,C,D,x,y,M = (int(e) for e in raw_input().split())
trees = [(x, y)]
for i in xrange(n - 1):
x = (A * x + B) % M
y = (C * y + D) % M
trees.append((x, y))
return trees
N = input()
for i in xrange(N):
result = 0
trees = calc_trees()
i1 = 0
for t1 in trees:
i2 = i1 + 1
for t2 in trees[i1 + 1:]:
i3 = i2 + 1
for t3 in trees[i2 + 1:]:
x = (t1[0] + t2[0] + t3[0]) / 3.0
y = (t1[1] + t2[1] + t3[1]) / 3.0
if (x == int(x) and y == int(y)):
result += 1
i3 += 1
i2 += 1
i1 += 1
print "Case #%d: %d" % (i + 1, result)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
099e1e04e14bdbed2f9c2d4cd79c06a2e5cd9ca8
|
33e2c0e11a6fbcc687750dbdcd2e063acf5b931b
|
/setup.py
|
17a2541858522e1e1f5d88fa0f7dd8566e9ffe9f
|
[
"MIT"
] |
permissive
|
uk-gov-mirror/nhsengland.ckanext-introjs
|
f1cb640819f09cdc6d4ecd82818a1e8b4b2653be
|
052d20a4e93cf824a1b28e7ea2e04c385615b40d
|
refs/heads/master
| 2021-05-28T17:21:31.739884
| 2015-02-24T14:44:56
| 2015-02-24T14:44:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(
name='ckanext-introjs',
version=version,
description="Adds intro.js to CKAN so users can follow a guided tour of the UI",
long_description='''
''',
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='Ano Nymous',
author_email='ano.nymous@england.nhs.uk',
url='https://usablica.github.io/intro.js/',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=['ckanext', 'ckanext.introjs'],
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points='''
[ckan.plugins]
# Add plugins here, e.g.
# myplugin=ckanext.introjs.plugin:PluginClass
''',
)
|
[
"ntoll@ntoll.org"
] |
ntoll@ntoll.org
|
84f14ff4534964c26f65d38a7312090e521edd05
|
818fbf7e5ad465d81b5841c3b1f3222dec3877e3
|
/spotify/urls.py
|
73fff59963e36a7d29247497d51fb93a62580135
|
[] |
no_license
|
anonshubh/music-controller
|
29d2c0be0265588e583ec7cde9578bc59e214d4a
|
c71329c691f43dff994ef3f69aa78dc438db047b
|
refs/heads/main
| 2023-02-13T22:30:24.102833
| 2021-01-08T13:11:45
| 2021-01-08T13:11:45
| 320,020,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
from django.urls import path
from . import views
urlpatterns=[
path('get-auth-url/',views.AuthURL.as_view()),
path('redirect/',views.spotify_callback),
path('is-authenticated/',views.IsAuthenticated.as_view()),
path('current-song/',views.CurrentSong.as_view()),
path('play-song/',views.PlaySong.as_view()),
path('pause-song/',views.PauseSong.as_view()),
]
|
[
"shubhpathak07@gmail.com"
] |
shubhpathak07@gmail.com
|
ec6f71dea44bb33840b8a9d2801b571e4dff1aa1
|
d2332604fc80b6d622a263b2af644425a7e703de
|
/fast-track/strings/4_valid_anagram.py
|
4b6cd138e04b2cd36803e6c0e4c3755c99e2ed8a
|
[] |
no_license
|
abhijitdey/coding-practice
|
b3b83a237c1930266768ce38500d6812fc31c529
|
6ae2a565042bf1d6633cd98ed774e4a77f492cc8
|
refs/heads/main
| 2023-08-14T23:31:06.090613
| 2021-10-18T21:35:56
| 2021-10-18T21:35:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
"""
Given two strings s and t, return true if t is an anagram of s, and false otherwise.
"""
def get_char_counts(string):
char_counts = dict()
for char in string:
if char in char_counts:
char_counts[char] += 1
else:
char_counts[char] = 1
return char_counts
def check_anagram(s, t):
if len(s) != len(t) or not s or not t:
return False
s_counts = get_char_counts(s)
t_counts = get_char_counts(t)
for char, count in s_counts.items():
if count != t_counts.get(char, 0):
return False
return True
|
[
"ashiz2013@gmail.com"
] |
ashiz2013@gmail.com
|
892237619b675d7d223b9efee8985aa62256e138
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/models/ms_data/identity_constraint/id_l086_xsd/__init__.py
|
1aefc80d5eb9fa2d66622427e4aa78c72449dfad
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 155
|
py
|
from output.models.ms_data.identity_constraint.id_l086_xsd.id_l086 import (
Root,
T,
Ttype,
)
__all__ = [
"Root",
"T",
"Ttype",
]
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
0b70531014e35ca99bb424d18b4ea7de3b40d224
|
164ffe077dde59373ad9fadcfd727f279a1cfe93
|
/jni_build/jni/include/tensorflow/examples/how_tos/reading_data/convert_to_records.py
|
7794c3f6ea3625880921d442f02c5e4c2c00e81e
|
[] |
no_license
|
Basofe/Community_Based_Repository_Traffic_Signs
|
524a4cfc77dc6ed3b279556e4201ba63ee8cf6bd
|
a20da440a21ed5160baae4d283c5880b8ba8e83c
|
refs/heads/master
| 2021-01-22T21:17:37.392145
| 2017-09-28T21:35:58
| 2017-09-28T21:35:58
| 85,407,197
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,263
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MNIST data to TFRecords file format with Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import mnist
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' # MNIST filenames
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
tf.app.flags.DEFINE_string('directory', '/tmp/data',
'Directory to download data files and write the '
'converted result')
tf.app.flags.DEFINE_integer('validation_size', 5000,
'Number of examples to separate from the training '
'data for the validation set.')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_to(data_set, name):
images = data_set.images
labels = data_set.labels
num_examples = data_set.num_examples
if images.shape[0] != num_examples:
raise ValueError('Images size %d does not match label size %d.' %
(images.shape[0], num_examples))
rows = images.shape[1]
cols = images.shape[2]
depth = images.shape[3]
filename = os.path.join(FLAGS.directory, name + '.tfrecords')
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
image_raw = images[index].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(rows),
'width': _int64_feature(cols),
'depth': _int64_feature(depth),
'label': _int64_feature(int(labels[index])),
'image_raw': _bytes_feature(image_raw)}))
writer.write(example.SerializeToString())
writer.close()
def main(argv):
# Get the data.
data_sets = mnist.read_data_sets(FLAGS.directory,
dtype=tf.uint8,
reshape=False)
# Convert to Examples and write the result to TFRecords.
convert_to(data_sets.train, 'train')
convert_to(data_sets.validation, 'validation')
convert_to(data_sets.test, 'test')
if __name__ == '__main__':
tf.app.run()
|
[
"helder_m_p_novais@hotmail.com"
] |
helder_m_p_novais@hotmail.com
|
6a09bc215bc33dd4733f9d3f862ee3a2bebc8541
|
412e327f41ec7c7a8e9389740bc849ebe173059e
|
/python/finite_element_model/add_node_set.py
|
e1101e9c279d72fe8f02ff56b4929729b7e1237c
|
[] |
no_license
|
erolsson/railway_ballast
|
2b617b91ae720ef86cd1e5c89b08a34b92996fd5
|
cfc86c22cc5e2f857c24ba1a01c2541edf839e3b
|
refs/heads/master
| 2023-07-25T16:54:10.529328
| 2023-07-23T12:49:32
| 2023-07-23T12:49:32
| 186,101,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,475
|
py
|
import os
import odbAccess
def add_node_set_to_odb(odb_file_name, node_set_name, x_min=-1e99, x_max=1e99, y_min=-1e99, y_max=1e99,
z_min=-1e99, z_max=1e99, instance_name=None):
odb = odbAccess.openOdb(odb_file_name, readOnly=False)
if instance_name is None:
instance_name = odb.rootAssembly.instances.keys()[0]
nodes = odb.rootAssembly.instances[instance_name].nodes
set_node_labels = []
for node in nodes:
x, y, z = node.coordinates
if x_min < x < x_max and y_min < y < y_max and z_min < z < z_max:
set_node_labels.append(node.label)
odb.rootAssembly.instances[instance_name].NodeSetFromNodeLabels(name=node_set_name, nodeLabels=set_node_labels)
odb.save()
odb.close()
if __name__ == '__main__':
odb_directory = os.path.expanduser('~/railway_ballast/odbs/')
add_node_set_to_odb(odb_directory + 'embankment_sleepers_low_17_5t.odb', 'ballast_bottom_nodes',
y_min=7-1e-3, y_max=7+1e-3)
add_node_set_to_odb(odb_directory + 'embankment_sleepers_high_17_5t.odb', 'ballast_bottom_nodes',
y_min=7-1e-3, y_max=7+1e-3)
add_node_set_to_odb(odb_directory + 'embankment_slab_low_17_5t.odb', 'ballast_bottom_nodes',
y_min=7 - 1e-3, y_max=7 + 1e-3)
add_node_set_to_odb(odb_directory + 'embankment_slab_high_17_5t.odb', 'ballast_bottom_nodes',
y_min=7 - 1e-3, y_max=7 + 1e-3)
|
[
"erolsson@kth.se"
] |
erolsson@kth.se
|
dc249fd778ca98ecbc6b3b862b7660fbb8310715
|
a4ebfeefa95e97d60f9ad2fe36e75e59e3b50769
|
/Trial_Aligned_Analysis/Trial_Aligned_Utils.py
|
7aa0ed89d59bb10ef4bd5bd2df5ea1bdb13b351b
|
[] |
no_license
|
matt-j-harvey/Widefield_Analysis
|
9f1566ac26d4164d988ab9a43d953f228b15746d
|
e03037e909ce986d8221113e1bbf5d46ddd7ad70
|
refs/heads/master
| 2023-05-01T12:30:27.064246
| 2023-04-24T11:23:37
| 2023-04-24T11:23:37
| 252,776,416
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,526
|
py
|
import os
import h5py
from tqdm import tqdm
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
import tables
from datetime import datetime
from Widefield_Utils import widefield_utils
def get_session_averages(activity_dataset, metadata_dataset):
# Load Session List
session_list = metadata_dataset[:, 2]
unique_sessions = np.unique(session_list)
condition_1_session_average_list = []
condition_2_session_average_list = []
for session in unique_sessions:
session_indicies = np.where(session_list == session)[0]
session_trials = activity_dataset[session_indicies]
session_metadata = metadata_dataset[session_indicies]
[condition_1_trials, condition_2_trials] = split_trials_by_condition(session_trials, session_metadata)
condition_1_mean = np.mean(condition_1_trials, axis=0)
condition_2_mean = np.mean(condition_2_trials, axis=0)
condition_1_session_average_list.append(condition_1_mean)
condition_2_session_average_list.append(condition_2_mean)
return condition_1_session_average_list, condition_2_session_average_list
def get_mouse_averages(activity_dataset, metadata_dataset):
# Load Session List
mouse_list = metadata_dataset[:, 1]
unique_mice = np.unique(mouse_list)
condition_1_mouse_average_list = []
condition_2_mouse_average_list = []
for mouse in unique_mice:
mouse_indicies = np.where(mouse_list == mouse)[0]
mouse_activity_data = activity_dataset[mouse_indicies]
mouse_metadata = metadata_dataset[mouse_indicies]
# Get Session Averages
condition_1_session_averages, condition_2_session_averages = get_session_averages(mouse_activity_data, mouse_metadata)
# Get Mouse Averages
condition_1_mouse_average = np.mean(condition_1_session_averages, axis=0)
condition_2_mouse_average = np.mean(condition_2_session_averages, axis=0)
# Add To List
condition_1_mouse_average_list.append(condition_1_mouse_average)
condition_2_mouse_average_list.append(condition_2_mouse_average)
return condition_1_mouse_average_list, condition_2_mouse_average_list
def split_trials_by_condition(activity_dataset, metata_dataset):
condition_list = metata_dataset[:, 3]
unique_conditions = np.unique(condition_list)
combined_activity_list = []
for condition in unique_conditions:
condition_indicies = np.where(condition_list == condition)[0]
combined_activity_list.append(activity_dataset[condition_indicies])
return combined_activity_list
def get_mouse_session_averages(activity_dataset, metadata_dataset):
# Load Session List
mouse_list = metadata_dataset[:, 1]
unique_mice = np.unique(mouse_list)
condition_1_mouse_average_list = []
condition_2_mouse_average_list = []
for mouse in unique_mice:
mouse_indicies = np.where(mouse_list == mouse)[0]
mouse_activity_data = activity_dataset[mouse_indicies]
mouse_metadata = metadata_dataset[mouse_indicies]
# Get Session Averages
condition_1_session_averages, condition_2_session_averages = get_session_averages(mouse_activity_data, mouse_metadata)
# Add To List
condition_1_mouse_average_list.append(condition_1_session_averages)
condition_2_mouse_average_list.append(condition_2_session_averages)
return condition_1_mouse_average_list, condition_2_mouse_average_list
|
[
"matthew.jc.harvey@gmail.com"
] |
matthew.jc.harvey@gmail.com
|
4681259f82617ba5a09860ebcd688d94a01d71ed
|
9d6218ca6c75a0e1ec1674fe410100d93d6852cb
|
/app/supervisor/venvs/supervisor/bin/echo_supervisord_conf
|
6de0696c980aed451f221b8ebe2d45ed7f743467
|
[] |
no_license
|
bopopescu/uceo-2015
|
164694268969dd884904f51b00bd3dc034695be8
|
5abcbfc4ff32bca6ca237d71cbb68fab4b9f9f91
|
refs/heads/master
| 2021-05-28T21:12:05.120484
| 2015-08-05T06:46:36
| 2015-08-05T06:46:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
#!/edx/app/supervisor/venvs/supervisor/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'supervisor==3.1.3','console_scripts','echo_supervisord_conf'
__requires__ = 'supervisor==3.1.3'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('supervisor==3.1.3', 'console_scripts', 'echo_supervisord_conf')()
)
|
[
"root@uceociputra.com"
] |
root@uceociputra.com
|
|
edd05aa7a6d0b2519f15498c48e760666baaf731
|
e9c7f991c319efe0b1831e8e8360e13c3e177555
|
/Python_do_zero_Guanabara/06_Metodos/desafio/100_desafio.py
|
ed4107d33cd4045e831afb18d8b04a91da717e45
|
[
"MIT"
] |
permissive
|
HenriqueSOliver/Projetos_Python
|
a3c4cabc61442221da968df55a463d9ad5865fcc
|
f18c5a343ad1b746a12bd372298b2debe9bc65ec
|
refs/heads/main
| 2023-06-05T03:11:42.879988
| 2021-06-17T22:26:03
| 2021-06-17T22:26:03
| 327,629,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
from random import randint
from time import sleep
def sortLista(lista):
print('Sorteando 5 valores da lista: ', end='')
for c in range (0, 5):
n = randint(1,100)
lista.append(n)
print(f' {n}', end=' - ', flush=True)
sleep(0.5)
print('PRONTO')
def somaP(lista):
soma = 0
for valor in lista:
if valor % 2 == 0:
soma += valor
print(f'Somando os valores pares de {lista}, temos {soma}')
#programa principal
números = []
sortLista(números)
somaP(números)
|
[
"HenriqueSOliver85@gmail.com"
] |
HenriqueSOliver85@gmail.com
|
2e1c49c9ad0740b75147789a0c0d4b5d54c026b1
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D00A/JAPRESD00AUN.py
|
4fb33d38de066ce2be86ab6db137a31ba760f7d6
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 2,391
|
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD00AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 2},
{ID: 'PNA', MIN: 1, MAX: 99, LEVEL: [
{ID: 'ADR', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'CTA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'RFF', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'GIS', MIN: 0, MAX: 5, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'UNS', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 1, MAX: 999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'FTX', MIN: 0, MAX: 5},
{ID: 'PNA', MIN: 1, MAX: 1, LEVEL: [
{ID: 'ADR', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 5},
{ID: 'NAT', MIN: 0, MAX: 9},
{ID: 'PDI', MIN: 0, MAX: 1},
{ID: 'DOC', MIN: 0, MAX: 9},
]},
{ID: 'RFF', MIN: 1, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'GIS', MIN: 1, MAX: 5, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'EMP', MIN: 0, MAX: 5, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 1},
{ID: 'GIS', MIN: 0, MAX: 5, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'ATT', MIN: 0, MAX: 20, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
{ID: 'PTY', MIN: 0, MAX: 1},
]},
{ID: 'LAN', MIN: 0, MAX: 10, LEVEL: [
{ID: 'GIS', MIN: 0, MAX: 1},
]},
]},
{ID: 'SAL', MIN: 0, MAX: 1, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 2},
{ID: 'ATT', MIN: 0, MAX: 10, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'GIS', MIN: 0, MAX: 2, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'MOA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 1},
]},
]},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
2a99a7db473372aeeb7fa4a6ffac6b7e5ed25760
|
5058401352fd2b80bf33bd4a0c0acc77b0e05231
|
/python/makeexec/makeexec.py
|
a4205f5d195154aaa03e44e18c1d69e964d28ad9
|
[] |
no_license
|
pgl/mcandre
|
3b81ee64bf10ccaf02b9a1d44ed73e20cbcad4b6
|
81055895d872e2f93cd055f5a832c6d89848e3a3
|
refs/heads/master
| 2021-01-24T20:25:24.672765
| 2013-10-24T09:06:25
| 2013-10-24T09:06:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,189
|
py
|
#!/usr/bin/env python
"""Properly shebang and mark a file as executable"""
__author__ = "Andrew Pennebaker (andrew.pennebaker@gmail.com)"
__date__ = "3 Apr 2006"
__copyright__ = "Copyright 2006 Andrew Pennebaker"
import sys
import getopt
INTERPRETERS = {
"py":"#!/usr/bin/env python",
"pl":"#!/usr/bin/env perl",
"pm":"#!/usr/bin/env perl",
"lua":"#!/usr/bin/env lua",
"sh":"#!/bin/sh",
"rb":"#!/usr/bin/env ruby"
}
def update():
"""Update file"""
global INTERPRETERS
f = open("paths.conf", "r")
options = ("".join(f.readlines())).split("\n")
INTERPRETERS = {}
for option in options:
key, value = option.split(":")
INTERPRETERS[key] = value
def get_extension(filename):
"""Get a file's extension"""
return filename[filename.rindex(".")+1:]
def makeexec(filename, manual = None):
"""Make a file properly executable"""
auto = None
if manual:
auto = manual
else:
try:
auto = INTERPRETERS[get_extension(filename)]
except KeyError:
raise Exception("Cannot guess interpreter. Specify manual path.")
f = None
try:
f = open(filename, "r")
except IOError:
raise Exception("Error reading %s" % (filename))
lines = ("".join(f.readlines())).split("\n")
f.close()
if lines[0] != auto:
try:
f = open(filename, "w")
except IOError:
raise Exception("Error writing to %s" % (filename))
f.write("%s\n\n" % (auto))
for line in lines:
f.write("%s\n" % (line))
f.close()
def usage():
"""Print usage message"""
print "Usage: %s [options] <file1> <file2> <file3> <...>" % (sys.argv[0])
print "\n--manual <interpreter path>"
print "--help (usage)"
sys.exit()
def main():
"""CLI"""
system_args = sys.argv[1:] # ignore program name
manual = None
optlist = []
args = []
try:
optlist, args = getopt.getopt(system_args, "", ["manual=", "help"])
except getopt.GetoptError:
usage()
if len(args) < 1:
usage()
for option, value in optlist:
if option == "--help":
usage()
elif option == "--manual":
manual = value
for fn in args:
makeexec(fn, manual)
if __name__ == "__main__":
main()
update()
|
[
"andrew.pennebaker@gmail.com"
] |
andrew.pennebaker@gmail.com
|
369600eb04fb78121977a41f64c63cab8ecd8243
|
6fa0d5d3b61fbce01fad5a7dd50258c09298ee00
|
/Algorithm/BOJ/1051.py
|
267eb1c6bd32d770b9a4e55572968cf549d5ad9d
|
[] |
no_license
|
athletejuan/TIL
|
c8e6bd9f7e2c6f999dbac759adcdb6b2959de384
|
16b854928af2f27d91ba140ebc1aec0007e5eb04
|
refs/heads/master
| 2023-02-19T13:59:06.495110
| 2022-03-23T15:08:04
| 2022-03-23T15:08:04
| 188,750,527
| 1
| 0
| null | 2023-02-15T22:54:50
| 2019-05-27T01:27:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,216
|
py
|
N,M = map(int, input().split())
base = [input() for _ in range(N)]
def rectangular(l):
while l:
for i in range(M-l):
for j in range(N-l):
if base[j][i] == base[j][i+l] == base[j+l][i] == base[j+l][i+l]:
return (l+1)**2
return rectangular(l-1)
return 1
l = N-1 if N < M else M-1
print(rectangular(l))
# 1st try
# breaker = False
# if N < M:
# for i in range(N-1):
# for j in range(i+1):
# for k in range(M-N+i+1):
# if r[j][k] == r[j][k+N-1-i] == r[j+N-1-i][k] == r[j+N-1-i][k+N-1-i]:
# print((N-i)**2)
# breaker = True
# break
# if breaker:
# break
# if breaker:
# break
# else:
# for i in range(M-1):
# for j in range(i+1):
# for k in range(N-M+i+1):
# if r[k][j] == r[k][j+M-1-i] == r[k+M-1-i][j] == r[k+M-1-i][j+M-1-i]:
# print((M-i)**2)
# breaker = True
# break
# if breaker:
# break
# if breaker:
# break
# if not breaker:
# print(1)
|
[
"vanillasky84.0627@gmail.com"
] |
vanillasky84.0627@gmail.com
|
44bb08d1eb1cf06afe26eafcbb135c4b3e59f333
|
e31d6c6c74a71daf27d618de4debf59e8cb9f188
|
/gluon/losses.py
|
22f050cb0f61e493be60803078e9ba0ce4e905eb
|
[
"MIT"
] |
permissive
|
vlomonaco/imgclsmob
|
574ebfbfe4be7a11c8742f34261bc4e7cc1f30be
|
d0d1c49a848ab146213ef4cbd37239799d0102d8
|
refs/heads/master
| 2022-04-18T16:03:11.361053
| 2020-04-14T06:17:36
| 2020-04-14T06:17:36
| 255,555,032
| 0
| 1
|
MIT
| 2020-04-14T08:39:59
| 2020-04-14T08:39:58
| null |
UTF-8
|
Python
| false
| false
| 5,009
|
py
|
"""
Loss functions.
"""
__all__ = ['SegSoftmaxCrossEntropyLoss', 'MixSoftmaxCrossEntropyLoss']
from mxnet.gluon.loss import Loss, _reshape_like, _apply_weighting
class SegSoftmaxCrossEntropyLoss(Loss):
"""
SoftmaxCrossEntropyLoss with ignore labels (for segmentation task).
Parameters
----------
axis : int, default -1
The axis to sum over when computing softmax and entropy.
sparse_label : bool, default True
Whether label is an integer array instead of probability distribution.
from_logits : bool, default False
Whether input is a log probability (usually from log_softmax) instead of unnormalized numbers.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
ignore_label : int, default -1
The label to ignore.
size_average : bool, default False
Whether to re-scale loss with regard to ignored labels.
"""
def __init__(self,
sparse_label=True,
batch_axis=0,
ignore_label=-1,
size_average=True,
**kwargs):
super(SegSoftmaxCrossEntropyLoss, self).__init__(None, batch_axis, **kwargs)
self._sparse_label = sparse_label
self._ignore_label = ignore_label
self._size_average = size_average
def hybrid_forward(self, F, pred, label):
"""
Compute loss.
"""
softmaxout = F.SoftmaxOutput(
pred,
label.astype(pred.dtype),
ignore_label=self._ignore_label,
multi_output=self._sparse_label,
use_ignore=True,
normalization=("valid" if self._size_average else "null"))
if self._sparse_label:
loss = -F.pick(F.log(softmaxout), label, axis=1, keepdims=True)
else:
label = _reshape_like(F, label, pred)
loss = -F.sum(F.log(softmaxout) * label, axis=-1, keepdims=True)
loss = F.where(label.expand_dims(axis=1) == self._ignore_label, F.zeros_like(loss), loss)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class MixSoftmaxCrossEntropyLoss(SegSoftmaxCrossEntropyLoss):
"""
SegSoftmaxCrossEntropyLoss with auxiliary loss support.
Parameters
----------
aux : bool, default True
Whether to use auxiliary loss.
aux_weight : float, default 0.2
The weight for aux loss.
ignore_label : int, default -1
The label to ignore.
"""
def __init__(self,
aux=True,
mixup=False,
aux_weight=0.2,
ignore_label=-1,
**kwargs):
super(MixSoftmaxCrossEntropyLoss, self).__init__(ignore_label=ignore_label, **kwargs)
self.aux = aux
self.mixup = mixup
self.aux_weight = aux_weight
def _aux_forward(self, F, pred1, pred2, label, **kwargs):
"""
Compute loss including auxiliary output.
"""
loss1 = super(MixSoftmaxCrossEntropyLoss, self).hybrid_forward(F, pred1, label, **kwargs)
loss2 = super(MixSoftmaxCrossEntropyLoss, self). hybrid_forward(F, pred2, label, **kwargs)
return loss1 + self.aux_weight * loss2
def _aux_mixup_forward(self, F, pred1, pred2, label1, label2, lam):
"""
Compute loss including auxiliary output.
"""
loss1 = self._mixup_forward(F, pred1, label1, label2, lam)
loss2 = self._mixup_forward(F, pred2, label1, label2, lam)
return loss1 + self.aux_weight * loss2
def _mixup_forward(self, F, pred, label1, label2, lam, sample_weight=None):
if not self._from_logits:
pred = F.log_softmax(pred, self._axis)
if self._sparse_label:
loss1 = -F.pick(pred, label1, axis=self._axis, keepdims=True)
loss2 = -F.pick(pred, label2, axis=self._axis, keepdims=True)
loss = lam * loss1 + (1 - lam) * loss2
else:
label1 = _reshape_like(F, label1, pred)
label2 = _reshape_like(F, label2, pred)
loss1 = -F.sum(pred * label1, axis=self._axis, keepdims=True)
loss2 = -F.sum(pred * label2, axis=self._axis, keepdims=True)
loss = lam * loss1 + (1 - lam) * loss2
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
def hybrid_forward(self, F, preds, label, **kwargs):
"""
Compute loss.
"""
if self.aux:
if self.mixup:
return self._aux_mixup_forward(F, *preds, label, **kwargs)
else:
return self._aux_forward(F, *preds, label, **kwargs)
else:
if self.mixup:
return self._mixup_forward(F, *preds, label, **kwargs)
else:
return super(MixSoftmaxCrossEntropyLoss, self).hybrid_forward(F, *preds, label, **kwargs)
|
[
"osemery@gmail.com"
] |
osemery@gmail.com
|
830510183c1a21c8df0759aa9047cb4d65f415a3
|
639c1f8c8bec57cb49665142ae2985d50da8c757
|
/mysite/admin.py
|
5bf0c8b4754467c15524a98be087b8fd1762af6d
|
[] |
no_license
|
info3g/hospitalevent
|
4c7d66d3af5b1e0e3f65cdd375b99324042f7c9b
|
fdd17bd7ae0828bd5bbdcf8fc61689f5485a571f
|
refs/heads/master
| 2022-11-05T12:47:23.700584
| 2019-04-23T05:51:05
| 2019-04-23T05:51:05
| 166,350,181
| 0
| 1
| null | 2022-10-28T12:29:33
| 2019-01-18T05:39:27
|
Python
|
UTF-8
|
Python
| false
| false
| 464
|
py
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(promisAnswers)
admin.site.register(diseases)
admin.site.register(symptoms)
admin.site.register(treatments)
admin.site.register(userProfile)
admin.site.register(userProfileSymptom)
admin.site.register(userProfileSymptomUpdate)
admin.site.register(userProfileTreatment)
admin.site.register(message)
admin.site.register(event)
admin.site.register(promisquestions)
|
[
"infothreeg@gmail.com"
] |
infothreeg@gmail.com
|
3b64b90411be6f00cfcba8c6d834c3c044629f05
|
8bb4a472344fda15985ac322d14e8f4ad79c7553
|
/Python3-Core/src/test/prompto/runtime/o/TestFilter.py
|
2b491c5cd234908a06c3b10241185209545c0ad8
|
[] |
no_license
|
prompto/prompto-python3
|
c6b356f5af30c6826730ba7f2ad869f341983a2d
|
64bd3d97d4702cc912097d41d961f7ab3fd82bee
|
refs/heads/master
| 2022-12-24T12:33:16.251468
| 2022-11-27T17:37:56
| 2022-11-27T17:37:56
| 32,623,633
| 4
| 0
| null | 2019-05-04T11:06:05
| 2015-03-21T07:17:25
|
Python
|
UTF-8
|
Python
| false
| false
| 550
|
py
|
from prompto.parser.o.BaseOParserTest import BaseOParserTest
from prompto.runtime.utils.Out import Out
class TestFilter(BaseOParserTest):
def setUp(self):
super(type(self), self).setUp()
Out.init()
def tearDown(self):
Out.restore()
def testFilterFromIterable(self):
self.checkOutput("filter/filterFromIterable.poc")
def testFilterFromList(self):
self.checkOutput("filter/filterFromList.poc")
def testFilterFromSet(self):
self.checkOutput("filter/filterFromSet.poc")
|
[
"eric.vergnaud@wanadoo.fr"
] |
eric.vergnaud@wanadoo.fr
|
8a87d3c15006f967d5b0d48dbd228929680398d2
|
1a23cc660649efe857808fef96740b4046f14713
|
/mysite/views.py
|
201c61bd6daca2e61d1d912d5faaef4ddd4cd0ba
|
[] |
no_license
|
itd/djtest
|
b1df94b0651bf94582778338d472d42e583c1497
|
4903d0624892501ca3a361ce2feca18c12d8d082
|
refs/heads/master
| 2021-01-10T10:24:08.491299
| 2015-12-10T13:55:27
| 2015-12-10T13:55:27
| 47,764,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("Hello world. Polls index.")
|
[
"kurt@tool.net"
] |
kurt@tool.net
|
d0ac59455b0338b38c3c6fb28dd4f59a9259b261
|
6eb35cb8d53ad226de2a6f78e16cb665644fbbdf
|
/orca/topology/alerts/matcher.py
|
b842957d8317f0f69e8a4cdeb3fccd3a67d98a4b
|
[
"Apache-2.0"
] |
permissive
|
openrca/orca
|
631fbc55f72d7dd01563ebc784a259bf0fa75d22
|
3b3ddcb2c14cc550c586f64eb6ca01e827fbc451
|
refs/heads/master
| 2023-05-30T22:38:55.431661
| 2022-09-11T09:33:24
| 2022-09-11T09:33:24
| 218,142,874
| 88
| 18
|
Apache-2.0
| 2023-05-01T21:16:56
| 2019-10-28T20:51:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
# Copyright 2020 OpenRCA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from orca.topology import matcher
class Matcher(matcher.Matcher):
"""Base class for Alert matchers."""
class AlertToSourceMatcher(Matcher):
"""Generic matcher for links between Alert and source objects."""
def are_linked(self, alert, obj):
source_mapping = alert.properties.source_mapping
if not source_mapping.origin == obj.origin:
return False
if not source_mapping.kind == obj.kind:
return False
mapping_items = source_mapping.properties.items()
obj_items = obj.properties.items()
return all(item in obj_items for item in mapping_items)
|
[
"zurkowski.bartosz@gmail.com"
] |
zurkowski.bartosz@gmail.com
|
7909f7285c70e1cd8f35d44e1e0df1567e7c7104
|
d4a7ed22a20599c2f12a550b782327eea312cdc1
|
/doc/src/tutorial/src-odespy/osc2.py
|
de868146836989337530b80f18b4bb76a3852882
|
[] |
no_license
|
rothnic/odespy
|
dcfca8593d738604fc7b6b66fbe8083c7358fc5c
|
dd50508030ab61047ca885bf0e842f1ad4ef38f4
|
refs/heads/master
| 2021-01-15T10:51:19.854871
| 2015-05-02T03:51:30
| 2015-05-02T03:51:30
| 35,228,290
| 1
| 0
| null | 2015-05-07T15:33:52
| 2015-05-07T15:33:52
| null |
UTF-8
|
Python
| false
| false
| 1,357
|
py
|
"""As osc1.py, but testing several solvers and setting sin(theta) to theta."""
from math import pi, sqrt
class Problem:
def __init__(self, c, Theta):
self.c, self.Theta = float(c), float(Theta)
self.freq = sqrt(c)
self.period = 2*pi/self.freq
def f(self, u, t):
theta, omega = u; c = self.c
return [omega, -c*theta]
problem = Problem(c=1, Theta=pi/4)
import odespy
solvers = [
odespy.ThetaRule(problem.f, theta=0), # Forward Euler
odespy.ThetaRule(problem.f, theta=0.5), # Midpoint method
odespy.ThetaRule(problem.f, theta=1), # Backward Euler
odespy.RK4(problem.f),
odespy.MidpointIter(problem.f, max_iter=2, eps_iter=0.01),
odespy.LeapfrogFiltered(problem.f),
]
N_per_period = 20
T = 3*problem.period # final time
import numpy
import matplotlib.pyplot as plt
legends = []
for solver in solvers:
solver_name = str(solver) # short description of solver
print solver_name
solver.set_initial_condition([problem.Theta, 0])
N = N_per_period*problem.period
time_points = numpy.linspace(0, T, N+1)
u, t = solver.solve(time_points)
theta = u[:,0]
legends.append(solver_name)
plt.plot(t, theta)
plt.hold('on')
plt.legend(legends)
plotfile = __file__[:-3]
plt.savefig(plotfile + '.png'); plt.savefig(plotfile + '.pdf')
plt.show()
|
[
"hpl@simula.no"
] |
hpl@simula.no
|
35c4bdebc781d3d87cdc25b59b881a5ba5da2bed
|
a438748ac89d53b19e7f4130529906896f059b25
|
/Композиция.py
|
8cc2c2abe97d5c1760e95ec575d7544e5ac3e6d1
|
[] |
no_license
|
Alexfordrop/Basics
|
90ead9294727a823eb044e5f2f69d8f29133d150
|
eda400424b2c72bd5e01a6c7cb14ad7ae29477d4
|
refs/heads/master
| 2023-06-08T16:42:26.704163
| 2021-06-27T20:46:27
| 2021-06-27T20:46:27
| 329,421,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
class Salary:
def __init__(self, pay):
self.pay = pay
def getTotal(self):
return (self.pay*12)
class Employee:
def __init__(self, pay, bonus):
self.pay = pay
self.bonus = bonus
self.salary = Salary(self.pay)
def annualSalary(self):
return "Total: " + str(self.salary.getTotal() + self.bonus)
employee = Employee(100, 10)
print(employee.annualSalary())
|
[
"mishechkin.aleksei@mail.ru"
] |
mishechkin.aleksei@mail.ru
|
5aef16b6aeb2d157280392287cf28cad33e25528
|
9ac205e4d8f111608d1abbcfa78b5b6598c17955
|
/33.搜索旋转排序数组.py
|
d447ed77448cc4578fc79c195a6ccb85bf544fb8
|
[] |
no_license
|
oceanbei333/leetcode
|
41ff0666da41750f7d3c82db53ec6f7f27125d3e
|
5d29bcf7ea1a9e489a92bc36d2158456de25829e
|
refs/heads/main
| 2023-03-16T18:17:25.232522
| 2021-02-28T04:56:40
| 2021-02-28T04:56:40
| 319,561,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
#
# @lc app=leetcode.cn id=33 lang=python3
#
# [33] 搜索旋转排序数组
#
# @lc code=start
from typing import List
class Solution:
def search(self, nums: List[int], target: int) -> int:
return nums.index(target) if target in nums else -1
def search(self, nums: List[int], target: int) -> int:
left, right = 0, len(nums)-1
while left <= right:
mid = (left+right) >> 1
if nums[mid] == target:
return mid
# 只能在有序序列中进行二分查找
# nums[:mid+1] 升序
if nums[left] <= nums[mid]:
# target 在 nums[:mid+1]
if nums[mid] > target >= nums[left]:
right = mid - 1
else:
# target 在 nums[mid+1:]
left = mid+1
else:
# nums[mid:] 升序
if nums[mid] < target <= nums[right]:
# target 在 nums[mid+1:]
left = mid+1
else:
# target 在 nums[:mid]
right = mid - 1
return -1
# @lc code=end
|
[
"hyram@wudun.net"
] |
hyram@wudun.net
|
61334443dff95bdd7751b514c74720f8be96eb4f
|
1ab788ce84e446a98b085b62e1e17f8a2afa148d
|
/문제풀기/2112. [모의 SW 역량테스트] 보호 필름.py
|
f68fa9c9fa62e32bd2c49165bc5c321e56ed8bda
|
[] |
no_license
|
kimjy392/exception
|
884dd26e1ec6f1c0357c1fe000742b1562adbeaa
|
b37e9c2f70adae6b93b94b86f96512469f431739
|
refs/heads/master
| 2022-12-11T20:33:25.632561
| 2020-08-29T13:26:08
| 2020-08-29T13:26:08
| 195,989,162
| 1
| 0
| null | 2022-12-06T23:20:02
| 2019-07-09T10:43:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,783
|
py
|
# def count():
# global isuse
# isuse = [False] * W
# for j in range(W):
# i, start, cnt = 0, 0, 0
# while i < D:
# if tboard[start][j] == tboard[i][j]:
# cnt += 1
# else:
# cnt = 0
# start = i
# continue
# if cnt == K:
# isuse[j] = True
# break
# i += 1
# if sum(isuse) == W:
# return True
# else:
# return False
# def Cback(k, n):
# global tboard, result, isuse, abc
# if k == n:
# if result <= D - len(Cselect):
# return
# for i in Cselect:
# tboard[i] = board[i]
# if count():
# if (D - len(Cselect)) < result:
# result = (D - len(Cselect))
# tmp = [-1] * D
# for i in range(D):
# if i not in Cselect:
# tmp[i] = Mselect[i]
# abc.append(tmp)
#
# for i in Cselect:
# tboard[i] = [Mselect[i]] * W
# return
#
# Cselect.append(k)
# Cback(k+1, n)
# Cselect.pop()
# Cback(k+1, n)
#
# def Mback(k, n):
# global tboard, abc
# if k == n:
# for j in range(len(abc)):
# for i in range(D):
# if abc[j][i] == Mselect[i]:
# return
# tboard = []
# for i in Mselect:
# tboard.append([i] * W)
# Cback(0, D)
# return
#
#
# Mselect.append(1)
# Mback(k+1, n)
# Mselect.pop()
# Mselect.append(0)
# Mback(k+1, n)
# Mselect.pop()
#
# T = int(input())
#
# for tc in range(1, T+1):
# D, W, K = map(int, input().split())
# board = [list(map(int, input().split())) for _ in range(D)]
# Mselect = []
# result = 0xfff
# Cselect = []
# abc = []
# Mback(0, D)
# print('#{} {}'.format(tc, result))
from collections import deque
def count():
global tboard
isuse = [False] * W
for j in range(W):
i, start, cnt = 0, 0, 0
while i < D:
if tboard[start][j] == tboard[i][j]:
cnt += 1
else:
cnt = 0
start = i
continue
if cnt == K:
isuse[j] = True
break
i += 1
if sum(isuse) == W:
return True
else:
return False
# def bfs():
# global result, tboard
# stack = deque([(0, D, 0, [])])
#
# while stack:
# k, n, res, tmp = stack.popleft()
#
# tboard = []
# for i in range(len(tmp)):
# if tmp[i] == -1:
# tboard.append(board[i])
# else:
# tboard.append([tmp[i]] * W)
# if count():
# if res < result:
# result = res
# for i in -1, 0, 1:
# if i == -1:
# stack.append((k+1, n, res, tmp[:]+[-1]))
# else:
# stack.append((k+1, n, res+1, tmp[:]+[i]))
def back(k, n, res):
global result
if res >= result:
return
if count():
if res < result:
result = res
if k == n:
return
if -1 not in visit[k]:
visit[k].append(-1)
back(k+1, n, res)
for i in range(2):
if i not in visit[k]:
tmp, board[k] = board[k], [i] * W
visit[k].append(i)
back(k+1, n, res+1)
board[k] = tmp
T = int(input())
for tc in range(1, T+1):
D, W, K = map(int, input().split())
board = [list(map(int, input().split())) for _ in range(D)]
visit = [[] for _ in range(D)]
result = 0xfff
# back(0, D, 0)
bfs()
print('#{} {}'.format(tc, result))
|
[
"kimjy392@gmail.com"
] |
kimjy392@gmail.com
|
5f82420827fe3d84a27b93bdb272851e78b8640a
|
2970291ff52e98915abb47848aeb71517ed1fbab
|
/machines/migrations/0028_auto_20200321_2338.py
|
7e985c1f5a57f54e980268faea52817ba7736ccf
|
[] |
no_license
|
dannyswolf/MLShop_Django_Service_boook
|
dd33f4bb0352836897448bc45bbb09b7c49252c2
|
9ac5f85468487a53465e244ba31b9bc968300783
|
refs/heads/master
| 2023-07-15T15:06:53.298042
| 2021-08-29T11:49:42
| 2021-08-29T11:49:42
| 255,998,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
# Generated by Django 3.0.4 on 2020-03-21 21:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('machines', '0027_auto_20200321_2337'),
]
operations = [
migrations.AlterField(
model_name='machines',
name='Μοντέλο',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
|
[
"ntinisiordanis@gmail.com"
] |
ntinisiordanis@gmail.com
|
e68a12ed2dd20f27609111b77d780a6bbe47ed92
|
e72ed9dfc5f90f4772d0b36da249ff7b2d39fd5f
|
/bible/forms.py
|
748bfa871e6d5c9e2b1441ce2ce0f51c7a384224
|
[] |
no_license
|
mparkcode/django-retroplay
|
58b0626bb4c6e80f96232a0e4886d1a6c2805bbd
|
3f76b630469a7105d35708b450eaacb94d384ee4
|
refs/heads/master
| 2022-12-10T23:26:27.842708
| 2019-10-21T13:46:17
| 2019-10-21T13:46:17
| 143,025,309
| 1
| 3
| null | 2022-12-08T02:49:53
| 2018-07-31T14:25:44
|
HTML
|
UTF-8
|
Python
| false
| false
| 186
|
py
|
from django import forms
class IgdbSearchForm(forms.Form):
igdb_search = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'placeholder': 'Search the bible'}), label="")
|
[
"mparkcode@gmail.com"
] |
mparkcode@gmail.com
|
bfa7526cf02028ee81f5be260236d207fd71ada4
|
88cfeb8f7076450e7a38d31ab2d11883c1818c8d
|
/net/densenet.py
|
f37cd38d0ead26afb7480a9c9c2189f1ef9a2c08
|
[] |
no_license
|
ZQPei/Alibaba_Cloud_German_AI_Challenge_for_Earth_Observation
|
4e5a127c12e0c02ed1914ab000a131e1a7f7d844
|
c2efb32763af0a56a3a7ecb9d83c0744f71d5c14
|
refs/heads/master
| 2020-04-26T04:31:57.731178
| 2019-02-17T01:10:55
| 2019-02-17T01:10:55
| 173,305,034
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,159
|
py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from collections import OrderedDict
def _bn_function_factory(norm, relu, conv):
def bn_function(*inputs):
concated_features = torch.cat(inputs, 1)
bottleneck_output = conv(relu(norm(concated_features)))
return bottleneck_output
return bn_function
class _DenseLayer(nn.Module):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, efficient=False):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1, bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False)),
self.drop_rate = drop_rate
self.efficient = efficient
def forward(self, *prev_features):
bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1)
if self.efficient and any(prev_feature.requires_grad for prev_feature in prev_features):
bottleneck_output = cp.checkpoint(bn_function, *prev_features)
else:
bottleneck_output = bn_function(*prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return new_features
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class _DenseBlock(nn.Module):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, efficient=False):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
efficient=efficient,
)
self.add_module('denselayer%d' % (i + 1), layer)
def forward(self, init_features):
features = [init_features]
for name, layer in self.named_children():
new_features = layer(*features)
features.append(new_features)
return torch.cat(features, 1)
class DenseNet(nn.Module):
"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 3 or 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
small_inputs (bool) - set to True if images are 32x32. Otherwise assumes images are larger.
efficient (bool) - set to True to use checkpointing. Much more memory efficient, but slower.
"""
def __init__(self, growth_rate=12, block_config=(16, 16, 16), compression=0.5,
num_init_features=24, bn_size=4, drop_rate=0,
num_classes=17, small_inputs=True, efficient=False):
super(DenseNet, self).__init__()
assert 0 < compression <= 1, 'compression of densenet should be between 0 and 1'
# self.avgpool_size = 8 if small_inputs else 7
self.avgpool_size = 8
# First convolution
if small_inputs:
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(10, num_init_features, kernel_size=3, stride=1, padding=1, bias=False)),
]))
else:
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(10, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
]))
self.features.add_module('norm0', nn.BatchNorm2d(num_init_features))
self.features.add_module('relu0', nn.ReLU(inplace=True))
self.features.add_module('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1,
ceil_mode=False))
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
efficient=efficient,
)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features,
num_output_features=int(num_features * compression))
self.features.add_module('transition%d' % (i + 1), trans)
num_features = int(num_features * compression)
# Final batch norm
self.features.add_module('norm_final', nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Initialization
for name, param in self.named_parameters():
if 'conv' in name and 'weight' in name:
n = param.size(0) * param.size(2) * param.size(3)
param.data.normal_().mul_(math.sqrt(2. / n))
elif 'norm' in name and 'weight' in name:
param.data.fill_(1)
elif 'norm' in name and 'bias' in name:
param.data.fill_(0)
elif 'classifier' in name and 'bias' in name:
param.data.fill_(0)
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.avg_pool2d(out, kernel_size=self.avgpool_size).view(features.size(0), -1)
out = self.classifier(out)
return out
|
[
"dfzspzq@163.com"
] |
dfzspzq@163.com
|
e05bcb65006e0ceeb16eb2c70a9ef633d6e7c8b5
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_6404600001200128_0/Python/knabbers/A.py
|
bbd8fd315049627fb82af60f5c2e8854e2c57ff3
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
from collections import defaultdict
with open('in.txt','rb') as fin, open('output.txt','w') as fout:
case = 1
it = iter(fin.readlines())
_ = next(it) # cases
for line in it:
print ("\n")
print ("case " + str(case))
N = int(line)
line=next(it)
xs = [int(c) for c in line.split(" ")]
print xs
m1 = 0
m2 = 0
for i in range(N-1):
if xs[i+1] - xs[i] < 0:
m1 -= (xs[i+1] - xs[i])
if xs[i+1] < xs[i]:
m2 = max(m2,xs[i] - xs[i+1])
m3 = 0
for i in range(N-1):
#how much can she eat of current one
m3 += min(m2,xs[i])
best = 1
fout.write("Case #" + str(case) + ": " + str(m1) + " " + str(m3) + "\n")
case += 1
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
63dfb28677eaa87faeab89b154711257dc907fc9
|
1f9897e86f93438eed2555d6da1716099df54147
|
/2020/jokenpo.py
|
c69ace473e1326c65b70a76621b05b55119baf5f
|
[] |
no_license
|
AfonsoArtoni/PUG-PE-Dojo
|
10371ec321dc11d0280b8ac01dd70f47d29127a3
|
974a5293f58a721491915b2ee4d2e95e2247e745
|
refs/heads/master
| 2020-12-21T00:44:18.466602
| 2020-01-25T22:03:59
| 2020-01-25T22:03:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,269
|
py
|
"""Jokenpo.
Jokenpo é uma brincadeira japonesa, onde dois jogadores escolhem um dentre
três possíveis itens: Pedra, Papel ou Tesoura. O objetivo é fazer um juiz de
Jokenpo que dada a jogada dos dois jogadores informa o resultado da partida.
As regras são as seguintes:
- Pedra empata com Pedra e ganha de Tesoura
- Tesoura empata com Tesoura e ganha de Papel
- Papel empata com Papel e ganha de Pedra
"""
def jokenpo(entrada1, entrada2):
"""
>>> jokenpo('pedra','pedra')
(0, 'empate')
>>> jokenpo('tesoura', 'tesoura')
(0, 'empate')
>>> jokenpo('papel', 'papel')
(0, 'empate')
>>> jokenpo('tesoura', 'pedra')
(2, 'pedra')
>>> jokenpo('pedra', 'tesoura')
(1, 'pedra')
>>> jokenpo('pedra', 'papel')
(2, 'papel')
>>> jokenpo('papel', 'pedra')
(1, 'papel')
>>> jokenpo('tesoura', 'papel')
(1, 'tesoura')
>>> jokenpo('papel', 'tesoura')
(2, 'tesoura')
"""
d = {
'tesoura': 'papel',
'pedra': 'tesoura',
'papel': 'pedra'
}
if d[entrada1] == entrada2:
return (1, entrada1)
if d[entrada2] == entrada1:
return (2, entrada2)
return (0, 'empate')
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"marcusgabriel.ds@gmail.com"
] |
marcusgabriel.ds@gmail.com
|
f59b0e05422e2f0ed0e20fd76f2efe583c8387d0
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/test/dynamo/test_subclasses.py
|
938215cb807c44ef22ce95b85305d0d6a3a17192
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,096
|
py
|
# Owner(s): ["module: dynamo"]
import contextlib
import torch
import torch._dynamo.test_case
import torch._dynamo.testing
import torch._functorch.config
import torch.utils.checkpoint
class MockSubclass(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return func(*args, **kwargs)
@contextlib.contextmanager
def preserve_subclass_config():
old_subclass_set = set(torch._dynamo.config.traceable_tensor_subclasses)
try:
torch._dynamo.config.traceable_tensor_subclasses.add(MockSubclass)
yield
finally:
torch._dynamo.config.traceable_tensor_subclasses.clear()
torch._dynamo.config.traceable_tensor_subclasses.update(old_subclass_set)
class SubclassTests(torch._dynamo.test_case.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._exit_stack.enter_context(preserve_subclass_config())
@classmethod
def tearDownClass(cls):
cls._exit_stack.close()
def test_torch_function_state_graph_break(self):
@torch.compile(backend="eager")
def fn(x):
with torch._C.DisableTorchFunctionSubclass():
torch._dynamo.graph_break()
return torch._C._is_torch_function_enabled(), torch.add(x, 1.0)
input = torch.ones(2, 2)
res, _ = fn(input)
self.assertFalse(res)
def test_torch_function_state_tracing(self):
@torch.compile(backend="eager", fullgraph=True)
def fn(x):
with torch._C.DisableTorchFunctionSubclass():
torch.add(x, 1.0)
input = torch.ones(2, 2)
res = fn(input)
def test_torch_function_state_guards(self):
cnt = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=cnt, fullgraph=True)
def fn(x):
torch.add(x, 1.0)
input = torch.ones(2, 2)
with torch._C.DisableTorchFunctionSubclass():
res = fn(input)
res = fn(input)
self.assertEqual(cnt.frame_count, 2)
def test_return_subclass(self):
@torch.compile(backend="eager", fullgraph=True)
def fn(x):
return MockSubclass(torch.add(x, 1.0))
input = torch.ones(2, 2)
res = fn(input)
self.assertIsInstance(res, MockSubclass)
def test_return_local_subclass(self):
class LocalSubclass(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return func(*args, **kwargs)
torch._dynamo.config.traceable_tensor_subclasses.add(LocalSubclass)
@torch.compile(backend="eager", fullgraph=True)
def fn(x):
return LocalSubclass(torch.add(x, 1.0))
input = torch.ones(2, 2)
res = fn(input)
self.assertIsInstance(res, LocalSubclass)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
[
"pytorchmergebot@users.noreply.github.com"
] |
pytorchmergebot@users.noreply.github.com
|
c0546b68f5584ad3b7da2cf791a2c1c65b27dbfe
|
c4e2e1aded20c81fa9ab2a38620cfda71639c4c8
|
/print_updates.py
|
c63e212818cd24e22e2d09f8972571f4a4a7c587
|
[
"MIT"
] |
permissive
|
russss/pydsn
|
118f341191f2ce6c702e9a81b3c0fd4da00f54b4
|
84e3b441effded7cfb4716cfa04e7b69d98d8ac1
|
refs/heads/master
| 2020-12-25T17:25:11.405678
| 2020-09-05T11:51:54
| 2020-09-05T11:51:54
| 21,252,624
| 7
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,098
|
py
|
# coding=utf-8
from __future__ import division, absolute_import, print_function, unicode_literals
import logging
from dsn import DSN
def to_GHz(freq):
if freq is None:
return None
return str(round(float(freq) / 10 ** 9, 4))
def update_callback(antenna, old, new):
if len(new['down_signal']) == 0:
return
for i in range(0, len(new['down_signal'])):
signal = new['down_signal'][i]
if len(old['down_signal']) > i:
old_signal = old['down_signal'][i]
if (to_GHz(signal['frequency']) == to_GHz(old_signal['frequency']) and
signal['debug'] == old_signal['debug'] and
signal['spacecraft'] == old_signal['spacecraft']):
# No change, don't print anything
return
print("%s channel %s\ttracking %s\tstatus: %s\tinfo: %s\tfrequency: %sGHz" %
(antenna, i, signal['spacecraft'], signal['type'],
signal['debug'], to_GHz(signal['frequency'])))
logging.basicConfig()
dsn = DSN()
dsn.update_callback = update_callback
dsn.run()
|
[
"russ@garrett.co.uk"
] |
russ@garrett.co.uk
|
b4c98948d06b56b3abe16f50d15b2211226c7ba5
|
9e9d1a5b711191f87a849f2ea34eb00e17587080
|
/chalk_line/materials/rhythm/segment_03/rhythm_makers.py
|
f283ec2f8c528fd879e5d10bb8a44e8c6e0db308
|
[] |
no_license
|
GregoryREvans/chalk_line
|
c72e3bbdd383d6032e8afd8eba6f41d895f1c673
|
e333343ccb039b83393690d46d06e4d5225d6327
|
refs/heads/master
| 2022-02-23T17:40:38.569781
| 2022-02-10T13:52:13
| 2022-02-10T13:52:13
| 241,491,131
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
import evans
rmaker_one = evans.RTMMaker(
rtm=[
"(1 ((2 (1 1 1)) 1 -1))",
"(1 (1 2 3))",
"(1 (1 3))",
"(1 (1 1 2))",
"(1 (1 1))",
"(1 (1))",
"(1 (2 2 1 -1))",
"(1 (1))",
"(1 ((2 (1 1 1)) 1 -1))",
"(1 (1 2 3))",
"(1 (1 3))",
"(1 (1 1 2))",
"(1 (1 1))",
"(1 (1))",
"(1 (2 2 1 -1))",
"(1 (1))",
"(1 (2 1))",
"(1 (3 2 1))",
"(1 (1 2 3 4))",
"(1 (1 2 3 4 5 6))",
"(1 ((2 (1 1 1)) 1 -1))",
"(1 (1 2 3))",
"(1 (1 3))",
"(1 (1 1 2))",
"(1 (1 1))",
"(1 (1))",
"(1 (2 2 1 -1))",
"(1 (1))",
"(1 (2 1))",
"(1 (3 2 1))",
"(1 (1 2 3 4))",
"(1 (1 2 3 4 5 6))",
]
)
|
[
"gregoryrowlandevans@gmail.com"
] |
gregoryrowlandevans@gmail.com
|
c5b24563692c7be59ada5e6c4bae377ad2ee98b4
|
7bf617f77a55d8ec23fa8156c1380b563a5ac7f6
|
/CG/SciPy/circle_1.py
|
58a991f780e853de703aa238f4858b7803f7d0fc
|
[] |
no_license
|
anyatran/school
|
c06da0e08b148e3d93aec0e76329579bddaa85d5
|
24bcfd75f4a6fe9595d790808f8fca4f9bf6c7ec
|
refs/heads/master
| 2021-06-17T10:45:47.648361
| 2017-05-26T12:57:23
| 2017-05-26T12:57:23
| 92,509,148
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
"""
Program name: circle_1.py
Objective: A circle is a special case of an oval.
Keywords: canvas, oval, circle
============================================================================79
Explanation: A circle is a special case of an oval and is defined by the
box it fits inside. The bounding box is specified the same as rectangles,
from bottom-left to top-right.
Author: Mike Ohlson de Fine
"""
# circle_1.py
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
from Tkinter import *
root = Tk()
root.title('A circle')
cw = 150 # canvas width
ch = 140 # canvas height
canvas_1 = Canvas(root, width=cw, height=ch, background="white")
canvas_1.grid(row=0, column=1)
# specify bottom-left and top-right as a set of four numbers named 'xy'
xy = 20, 20, 120, 120
canvas_1.create_oval(xy)
root.mainloop()
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
[
"panhtran249@gmail.com"
] |
panhtran249@gmail.com
|
1a2c312399d2472dde204fc5e36017a06ffad7c6
|
6336828aeab3ea2ba3e1cf9452a8a3f3a084b327
|
/fundooNotes-master/virtual-env/bin/nosetests-3.4
|
121dcd17ab6d074ffdd65beef38adefe3c734d61
|
[
"MIT"
] |
permissive
|
kalereshma96/DjangoNewRepository
|
85f2eaed6b689be273af48d328c0a388244bbe2b
|
37fd232c2ac91eb6940300f20118f93d17926f9a
|
refs/heads/master
| 2020-04-12T18:12:15.698279
| 2019-01-21T13:46:37
| 2019-01-21T13:46:37
| 162,672,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
4
|
#!/home/admin1/PycharmProjects/mynewpythonproject/fundooNotes-master/virtual-env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from nose import run_exit
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_exit())
|
[
"kalereshma96@gmail.com"
] |
kalereshma96@gmail.com
|
b9e96932a14e41fe2293414f813539a41dac1547
|
e60487a8f5aad5aab16e671dcd00f0e64379961b
|
/python_stack/Algos/Fundamentals/bracesValid.py
|
1cf44dec62b3abd0f888db3f006ddf5ae829bf98
|
[] |
no_license
|
reenadangi/python
|
4fde31737e5745bc5650d015e3fa4354ce9e87a9
|
568221ba417dda3be7f2ef1d2f393a7dea6ccb74
|
refs/heads/master
| 2021-08-18T08:25:40.774877
| 2021-03-27T22:20:17
| 2021-03-27T22:20:17
| 247,536,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
# it are valid. For example, given the input string
# "w(a{t}s[o(n{c}o)m]e)h[e{r}e]!" , return
# true . Given "d(i{a}l[t]o)n{e" , return
# false . Given "a(1)s[O(n]0{t)0}k" , return
# false .
def bracesValid(str):
mapping={"(":")","{":"}","[":"]"}
myStack=[]
for c in str:
if c in ('(','{','['):
myStack.append(c)
elif c in (')','}',']'):
if myStack:
top=myStack.pop()
if c!=mapping[top]:
return False
else:
return False
if myStack: return False
else: return True
print(bracesValid("w(a{t}s[o(n{c}o)m]e)h[e{r}e]!"))
|
[
"reena.dangi@gmail.com"
] |
reena.dangi@gmail.com
|
912e0ef322d0210628742b89e0e9105897dc42f6
|
1e14e73b66aa4e60a528addf6358d5c009705e9e
|
/scripts/visualize.py
|
8f3dca025adabe11f2e73709dd4ea46d5d23a0ed
|
[
"MIT"
] |
permissive
|
cannin/covid-sicr-test
|
aeaa5935c5ddc99e616bdf1b9527a82eedcaa641
|
f842946357428730265b7d0a6640172dc757ecae
|
refs/heads/master
| 2022-10-08T04:46:31.415917
| 2020-06-11T21:06:30
| 2020-06-11T21:06:30
| 271,634,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,321
|
py
|
#!/usr/bin/env python
# coding: utf-8
import argparse
import logging
from multiprocessing import Pool
import pandas as pd
import papermill as pm
from pathlib import Path
from tqdm import tqdm
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
for lib in ['blib2to3', 'papermill']:
logger = logging.getLogger(lib)
logger.setLevel(logging.WARNING)
from niddk_covid_sicr import get_data_prefix, get_ending, list_rois
notebook_path = Path(__file__).parent.parent / 'notebooks'
# Parse all the command-line arguments
parser = argparse.ArgumentParser(description=('Executes all of the analysis '
'notebooks'))
parser.add_argument('model_name',
help='Name of the Stan model file (without extension)')
parser.add_argument('-dp', '--data_path', default='./data',
help='Path to directory containing the data files')
parser.add_argument('-fp', '--fits_path', default='./fits',
help='Path to directory containing pickled fit files')
parser.add_argument('-rp', '--results_path', default='./results/vis-notebooks',
help=('Path to directory where resulting notebooks '
'will be stored'))
parser.add_argument('-mp', '--models_path', default='./models',
help='Path to directory containing .stan files')
parser.add_argument('-r', '--rois', default=[], nargs='+',
help='Space separated list of ROIs')
parser.add_argument('-n', '--n_threads', type=int, default=16, nargs='+',
help='Number of threads to use for analysis')
parser.add_argument('-f', '--fit_format', type=int, default=1,
help='Version of fit format')
parser.add_argument('-v', '--verbose', type=int, default=0,
help='Verbose error reporting')
args = parser.parse_args()
for key, value in args.__dict__.items():
if '_path' in key and 'results' not in key:
assert Path(value).is_dir(),\
"%s is not a directory" % Path(value).resolve()
# pathlibify some paths
data_path = Path(args.data_path)
fits_path = Path(args.fits_path)
models_path = Path(args.models_path)
results_path = Path(args.results_path)
results_path.mkdir(parents=True, exist_ok=True)
assert any([x.name.endswith('.csv') for x in data_path.iterdir()]),\
"No .csv files found in data_path %s" % (data_path.resolve())
assert any([x.name.endswith('.stan') for x in models_path.iterdir()]),\
"No .stan files found in models_path %s" % (models_path.resolve())
assert any([x.name.endswith('.pkl') or x.name.endswith('.csv')
for x in fits_path.iterdir()]),\
"No .pkl or .csv files found in fits_path %s" % (fits_path.resolve())
ending = get_ending(args.fit_format)
if not args.rois:
data_rois = list_rois(data_path, get_data_prefix(), '.csv')
fit_rois = list_rois(fits_path, args.model_name, ending)
args.rois = list(set(data_rois).intersection(fit_rois))
args.n_threads = min(args.n_threads, len(args.rois))
print("Running visualization notebook for %d rois on model '%s'" %
(len(args.rois), args.model_name))
# Make sure all ROI pickle files exist
for roi in args.rois:
file = fits_path / ('%s_%s%s' % (args.model_name, roi, ending))
assert file.is_file(), "No such %s file: %s" % (ending, file.resolve())
# Function to be execute on each ROI
def execute(model_name, roi, data_path, fits_path, model_path, notebook_path,
results_path, fit_format, verbose=False):
try:
result = pm.execute_notebook(
str(notebook_path / 'visualize.ipynb'),
str(results_path / ('visualize_%s_%s.ipynb' % (model_name, roi))),
parameters={'model_name': model_name,
'roi': roi,
'data_path': str(data_path),
'fits_path': str(fits_path),
'models_path': str(models_path),
'fit_format': fit_format},
nest_asyncio=True)
except pm.PapermillExecutionError as e:
exception = '%s: %s' % (e.ename, e.evalue)
except Exception as e:
exception = str(e.split('\n')[-1:])
else:
# Possible exception that was raised
# (or `None` if notebook completed successfully)
exception = str(result['metadata']['papermill']['exception'])
if exception and verbose:
print(roi, exception)
return exception
# Top progress bar (how many ROIs have finished)
pbar = tqdm(total=len(args.rois), desc="All notebooks", leave=True)
def update(*a):
pbar.update()
# Execute up to 16 ROIs notebooks at once
pool = Pool(processes=args.n_threads)
jobs = {roi: pool.apply_async(execute,
[args.model_name, roi, data_path, fits_path,
models_path, notebook_path, results_path,
args.fit_format],
{'verbose': args.verbose},
callback=update)
for roi in args.rois}
pool.close()
pool.join()
print('\n')
error_table = pd.Series({roi: job.get() for roi, job in jobs.items()})
error_table = error_table[error_table != 'None']
if len(error_table):
print("Errors:")
print(error_table)
|
[
"rgerkin@asu.edu"
] |
rgerkin@asu.edu
|
809e15e8c97fec14f1187a7f5c9189c43e53ad04
|
a1b8b807a389fd3971ac235e46032c0be4795ff1
|
/testrepo/Zips/script.module.streamhublive/resources/modules/downloader.py
|
97608f2e48e1c7d67e403ad3122dc6daeb436677
|
[] |
no_license
|
sClarkeIsBack/StreamHub
|
0cd5da4b3229592a4e2cf7ce3e857294c172aaba
|
110983579645313b8b60eac08613435c033eb92d
|
refs/heads/master
| 2020-05-23T09:09:54.898715
| 2020-02-29T12:15:32
| 2020-02-29T12:15:32
| 80,440,827
| 9
| 20
| null | 2017-10-04T07:32:52
| 2017-01-30T16:43:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,512
|
py
|
import xbmcgui
import urllib
import time
from urllib import FancyURLopener
import sys
class MyOpener(FancyURLopener):
version = '[COLOR ffff0000][B]StreamHub[/B][/COLOR]'
myopener = MyOpener()
urlretrieve = MyOpener().retrieve
urlopen = MyOpener().open
def download(url, dest, dp = None):
start_time=time.time()
urlretrieve(url, dest, lambda nb, bs, fs: _pbhook(nb, bs, fs, dp, start_time))
def auto(url, dest, dp = None):
start_time=time.time()
urlretrieve(url, dest, lambda nb, bs, fs: _pbhookauto(nb, bs, fs, dp, start_time))
def _pbhookauto(numblocks, blocksize, filesize, url, dp):
none = 0
def _pbhook(numblocks, blocksize, filesize, dp, start_time):
try:
percent = min(numblocks * blocksize * 100 / filesize, 100)
currently_downloaded = float(numblocks) * blocksize / (1024 * 1024)
kbps_speed = numblocks * blocksize / (time.time() - start_time)
if kbps_speed > 0:
eta = (filesize - numblocks * blocksize) / kbps_speed
else:
eta = 0
kbps_speed = kbps_speed / 1024
mbps_speed = kbps_speed / 1024
total = float(filesize) / (1024 * 1024)
mbs = '[COLOR white]%.02f MB[/COLOR] of %.02f MB' % (currently_downloaded, total)
e = 'Speed: [COLOR lime]%.02f Mb/s ' % mbps_speed + '[/COLOR]'
e += 'ETA: [COLOR yellow]%02d:%02d' % divmod(eta, 60) + '[/COLOR]'
except:
percent = 100
def unzip(zip,dest):
import zipfile
zip_ref = zipfile.ZipFile(zip, 'r')
zip_ref.extractall(dest)
zip_ref.close()
def getmodules():
import os,re,xbmc
zip = 'https://github.com/sClarkeIsBack/StreamHub/raw/master/StreamHubLive/rootdownloads.zip'
root = xbmc.translatePath('special://home/addons/script.module.streamhublive/resources/root/')
udata = xbmc.translatePath('special://home/userdata/addon_data/script.module.streamhublive/downloads/')
dest = xbmc.translatePath(os.path.join('special://home/userdata/addon_data/script.module.streamhublive/downloads/', 'root.zip'))
if not os.path.exists(udata):
os.makedirs(udata)
try:
download(zip,dest)
unzip(dest,root)
except:
xbmcgui.Dialog().ok('[COLOR ffff0000][B]StreamHub[/B][/COLOR]','Oops..Something went wrong with our auto update feature, Please Inform us at','http://facebook.com/groups/streamh')
try:
os.remove(dest)
except:
pass
|
[
"mediahubiptv@gmail.com"
] |
mediahubiptv@gmail.com
|
8f91301ee92109eaebdec1ed72f4f25409581a1b
|
9d69d37c930821f4ebf265f3c1f214c2cc558502
|
/scripts/extra/csv_to_coco_json_result.py
|
3fc14bfbe55bbd61d296159e00b5d157b88d6e32
|
[
"Apache-2.0"
] |
permissive
|
mayanks888/mAP
|
255b35e25384659dfaf97e6e3eec53bafb5bb3cc
|
7e6a6c4b916223e737d30c76ebb11a75ed15d984
|
refs/heads/master
| 2023-01-29T11:05:21.099541
| 2020-12-13T20:54:44
| 2020-12-13T20:54:44
| 278,285,070
| 0
| 0
|
Apache-2.0
| 2020-07-09T06:44:56
| 2020-07-09T06:44:55
| null |
UTF-8
|
Python
| false
| false
| 1,600
|
py
|
from collections import namedtuple
import os
import pandas as pd
import json
from utils import *
coco91class = coco80_to_coco91_class()
csv_path='yolo_txt_to_csv.csv'
# csv_path='yolo1.csv'
data = pd.read_csv(csv_path)
print(data.head())
def split(df, group):
data = namedtuple('data', ['filename', 'object'])
# filename='img_name'
# data = namedtuple('data', ['img_name', 'obj_class'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
grouped = split(data, 'filename')
jdict= []
for group in grouped:
# filename = group.filename.encode('utf8')
filename = group.filename
print(filename)
for index, row in group.object.iterrows():
xmin=(row['xmin'])
ymin = (row['ymin'])
width= (row['xmax'])-xmin
height=(row['ymax'])-ymin
# box_=[xmin,ymin,xmax,ymax]
# box2=xyxy2xywh(box_)
# obj_id = obj['category_id']
# print(obj_id)
score=row['conf']
obj_name=row["class"]
obj_cat=row["obj_category"]
################3
obj_cat=coco91class[int(obj_cat)]
#####################
bbox = ((xmin), (ymin), (width), (height))
# bbox = box2
jdict.append({'image_id': int(filename), 'category_id': obj_cat, 'bbox': [round(x, 3) for x in bbox], 'score': round(score, 5)})
print('\nGenerating json detection for pycocotools...')
with open('results.json', 'w') as file:
json.dump(jdict, file)
|
[
"mayank.sati@gwmidc.in"
] |
mayank.sati@gwmidc.in
|
e86947e81e355edde5f00faccb2b3b4b7adfe1b7
|
4de28b1f6d97640834e4a795e68ca9987f9e2cd5
|
/check plugins 2.0/dell_powervault_me4/checks/agent_dellpowervault
|
872a4202350c171f757ac3e0fb517852ad2a04de
|
[] |
no_license
|
Yogibaer75/Check_MK-Things
|
affa0f7e6e772074c547f7b1df5c07a37dba80b4
|
029c546dc921c4157000d8ce58a878618e7bfa97
|
refs/heads/master
| 2023-09-01T15:52:28.610282
| 2023-08-29T06:18:52
| 2023-08-29T06:18:52
| 20,382,895
| 47
| 16
| null | 2023-07-30T15:52:22
| 2014-06-01T18:04:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,216
|
#!/usr/bin/env python3
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# (c) Andreas Doehler <andreas.doehler@bechtle.com/andreas.doehler@gmail.com>
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
def agent_dellpowervault_arguments(params, hostname, ipaddress):
args = ''
if params["user"] != "":
args += " -u " + quote_shell_string(params["user"])
if params["password"] != "":
args += " -p " + quote_shell_string(params["password"])
args += " " + quote_shell_string(ipaddress)
return args
special_agent_info['dellpowervault'] = agent_dellpowervault_arguments
|
[
"andreas.doehler@gmail.com"
] |
andreas.doehler@gmail.com
|
|
906e43db8bb1001f90d120dced7b2b11273ffe1e
|
35f7c36a55a98cd4150abe51c24bf6b2313ee9d5
|
/pytestFrame_demon1/TestCase/testmy.py
|
4d60e67571f998cd5a788db480a9d3cbf2532dcf
|
[] |
no_license
|
jingshiyue/zhongkeyuan_workspace
|
58b12e46223d398b184c48c4c6b799e5235e4470
|
aa0749f4a237ee76a61579dc5984635a7127a631
|
refs/heads/master
| 2021-07-15T15:18:40.186561
| 2020-08-12T05:32:45
| 2020-08-12T05:34:20
| 197,749,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
import sys
import os
path0 = os.path.realpath(__file__) #'D:\\workfile\\workspace\\pytestFrame_demon1\\TestCase\\testmy.py'
path1 = os.path.dirname(path0)
GRANDFA = os.path.dirname(path1)
sys.path.append(GRANDFA ) # 将祖父路径加入sys中
print("ok")
sys.path.append(sys.path.append(sys.path[0] + r"\..\.."))
sys.path.append(sys.path.append(sys.path[0] + r"\.."))
|
[
"173302591@qq.com"
] |
173302591@qq.com
|
f3e8686fcdc11a92cf10d7b6bd5f7a314cd2ce1b
|
b94bb6b6e2fac5fb8f02354a2d05374b8f00ff60
|
/mandelbrot/numpy_vector_numexpr/numpy_vector_numexpr.py
|
484ad304db72cf8701216640e58a2e7b8f1d35dc
|
[] |
no_license
|
janus/EuroPython2011_HighPerformanceComputing
|
17ff9e6d7d5634c424983103ad45442acfe2502e
|
1a15b5e66a22bd11422a1bb9ad749c5d906e3f98
|
refs/heads/master
| 2021-01-16T22:41:04.249856
| 2011-06-28T22:32:50
| 2011-06-28T22:32:50
| 1,973,541
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,837
|
py
|
import datetime
import sys
import numpy as np
import numexpr
# area of space to investigate
x1, x2, y1, y2 = -2.13, 0.77, -1.3, 1.3
# use numexpr library to vectorise (and maybe parallelise) the numpy expressions
def calculate_z_numpy(q, maxiter, z):
output = np.resize(np.array(0,), q.shape)
for iteration in range(maxiter):
#z = z*z + q
z = numexpr.evaluate("z*z+q")
#done = nm.greater(abs(z), 2.0)
done = numexpr.evaluate("abs(z).real>2.0")
#q = nm.where(done,0+0j, q)
q = numexpr.evaluate("where(done, 0+0j, q)")
#z = nm.where(done,0+0j, z)
z = numexpr.evaluate("where(done,0+0j, z)")
#output = nm.where(done, iteration, output)
output = numexpr.evaluate("where(done, iteration, output)")
return output
def calculate(show_output):
# make a list of x and y values which will represent q
# xx and yy are the co-ordinates, for the default configuration they'll look like:
# if we have a 1000x1000 plot
# xx = [-2.13, -2.1242, -2.1184000000000003, ..., 0.7526000000000064, 0.7584000000000064, 0.7642000000000064]
# yy = [1.3, 1.2948, 1.2895999999999999, ..., -1.2844000000000058, -1.2896000000000059, -1.294800000000006]
x_step = (float(x2 - x1) / float(w)) * 2
y_step = (float(y1 - y2) / float(h)) * 2
x=[]
y=[]
ycoord = y2
while ycoord > y1:
y.append(ycoord)
ycoord += y_step
xcoord = x1
while xcoord < x2:
x.append(xcoord)
xcoord += x_step
x = np.array(x)
y = np.array(y) * 1j # make y a complex number
print "x and y have length:", len(x), len(y)
# create a square matrix using clever addressing
x_y_square_matrix = x+y[:, np.newaxis] # it is np.complex128
# convert square matrix to a flatted vector using ravel
q = np.ravel(x_y_square_matrix)
# create z as a 0+0j array of the same length as q
# note that it defaults to reals (float64) unless told otherwise
z = np.zeros(q.shape, np.complex128)
start_time = datetime.datetime.now()
print "Total elements:", len(q)
output = calculate_z_numpy(q, maxiter, z)
end_time = datetime.datetime.now()
secs = end_time - start_time
print "Main took", secs
validation_sum = sum(output)
print "Total sum of elements (for validation):", validation_sum
if show_output:
import Image
output = (output + (256*output) + (256**2)*output) * 8
im = Image.new("RGB", (w/2, h/2))
im.fromstring(output.tostring(), "raw", "RGBX", 0, -1)
im.show()
if __name__ == '__main__':
w = int(sys.argv[1]) # e.g. 100
h = int(sys.argv[1]) # e.g. 100
maxiter = int(sys.argv[2]) # e.g. 300
calculate(True)
|
[
"ian@ianozsvald.com"
] |
ian@ianozsvald.com
|
dd4afdb6db252146efbf72714ce2914c07933fec
|
20343e8a8435b3f839d5abd0c4063cf735f43341
|
/Experiment/price_with_basic/JQ_Demo.py
|
7f7010abf4084044ea5f6bd90bd0cf54ea8c9477
|
[] |
no_license
|
alading241/MoDeng
|
948f2099e2f7e4548d6e477b6e06b833bdf4f9bb
|
01819e58943d7d1a414714d64aa531c0e99dfe22
|
refs/heads/master
| 2021-05-23T11:39:41.326804
| 2020-04-05T06:06:01
| 2020-04-05T06:06:01
| 253,269,397
| 1
| 0
| null | 2020-04-05T15:38:33
| 2020-04-05T15:38:33
| null |
UTF-8
|
Python
| false
| false
| 2,699
|
py
|
# encoding=utf-8
# from JQData_Test.auth_info import *
import pandas as pd
from SDK.MyTimeOPT import convert_str_to_date
from matplotlib import pyplot as plt
import seaborn as sns
"""
使用JQ数据进行研究
"""
stk_code = normalize_code('000001')
# 查询300508的市值数据
q = query(valuation.pe_ratio,
valuation.pb_ratio,
indicator.eps,
indicator.roe,
indicator.operating_profit,
indicator.net_profit_margin,
indicator.inc_revenue_annual,
indicator.inc_operation_profit_year_on_year,
indicator.inc_operation_profit_annual,
indicator.inc_net_profit_year_on_year,
indicator.inc_net_profit_annual
).filter(valuation.code.in_([stk_code]))
panel = get_fundamentals_continuously(q, end_date='2019-05-12', count=1200)
df_basic = panel.minor_xs(stk_code)
df_basic['date_str'] = df_basic.index
df_basic['date'] = df_basic.apply(lambda x: convert_str_to_date(x['date_str']), axis=1)
df_basic = df_basic.set_index('date')
# 查询收盘价
df_close = get_price(stk_code, start_date='2017-01-01', end_date='2019-05-12', frequency='daily', fields=None, skip_paused=False, fq='pre')
df_close = df_close.reset_index()
df_close['date'] = df_close.apply(lambda x: convert_str_to_date(str(x['index'])[:10]), axis=1)
df_close = df_close.set_index('date')
df_concat = pd.concat([df_basic, df_close], axis=1)\
.dropna(axis=0)\
.loc[:, [
'close',
'eps',
'pb_ratio',
'pe_ratio',
'roe',
'operating_profit',
'net_profit_margin',
'inc_revenue_annual',
'inc_operation_profit_year_on_year',
'inc_operation_profit_annual',
'inc_net_profit_year_on_year',
'inc_net_profit_annual']]
df_corr = df_concat.corr()
# sns.distplot(df_corr['close'])
df_corr['xlabel'] = df_corr.index
# 画条形图
sns.barplot(y='close', x='xlabel', data=df_corr)
plt.xticks(rotation=90)
plt.show()
"""
df_concat.corr()
画图
.corr()
"""
s
"""
#DataFrame的corr和cov方法将以DataFrame的形式返回完整的相关系数或协方差矩阵:
data.corr()
data.cov()
"""
end = 0
|
[
"1210055099@qq.com"
] |
1210055099@qq.com
|
4ede01c900ccfbb8f6ca47a02a125acfb6428bd3
|
be70e130f53c7703f942057923577adf607687a6
|
/src/biotite/file.pyi
|
a2139f3eef3966c7794d5746f092780f16322213
|
[
"BSD-3-Clause"
] |
permissive
|
Dr-Moreb/biotite
|
4043eadb607e9ede13ce049ade554546ce58afe0
|
c34ccb7a7a7de923bf8a238944dfb7e1e635bb28
|
refs/heads/master
| 2020-04-01T19:02:08.086093
| 2018-10-10T16:01:45
| 2018-10-10T16:01:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
pyi
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
from typing import Generic, TypeVar, Union, TextIO, BinaryIO
from .copyable import Copyable
_T_io = TypeVar("_T_io", TextIO, BinaryIO)
class File(Copyable, Generic[_T_io]):
def __init__(self) -> None: ...
def read(self, file: Union[str, _T_io]) -> None: ...
def write(self, file: Union[str, _T_io]) -> None: ...
class TextFile(File[TextIO]):
def __init__(self) -> None: ...
def read(self, file: Union[str, TextIO]) -> None: ...
def write(self, file: Union[str, TextIO]) -> None: ...
def __str__(self) -> str: ...
class InvalidFileError(Exception):
...
|
[
"patrick.kunzm@gmail.com"
] |
patrick.kunzm@gmail.com
|
f692be9989677461e8cb5d3829593c3c761017f4
|
bd72c02af0bbd8e3fc0d0b131e3fb9a2aaa93e75
|
/Design/implement_stack_using_queues.py
|
90c0e731a480e74cfb57531edf2b51ea640c72b6
|
[] |
no_license
|
harvi7/Leetcode-Problems-Python
|
d3a5e8898aceb11abc4cae12e1da50061c1d352c
|
73adc00f6853e821592c68f5dddf0a823cce5d87
|
refs/heads/master
| 2023-05-11T09:03:03.181590
| 2023-04-29T22:03:41
| 2023-04-29T22:03:41
| 222,657,838
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 866
|
py
|
from queue import Queue
class MyStack:
def __init__(self):
"""
Initialize your data structure here.
"""
self.stack = Queue()
def push(self, x: int) -> None:
"""
Push element x onto stack.
"""
self.stack.put(x);
i = 1;
while i < self.stack.qsize():
i += 1
self.stack.put(self.stack.get())
def pop(self) -> int:
"""
Removes the element on top of the stack and returns that element.
"""
return self.stack.get()
def top(self) -> int:
"""
Get the top element.
"""
top = self.stack.get()
self.push(top)
return top
def empty(self) -> bool:
"""
Returns whether the stack is empty.
"""
return self.stack.empty()
|
[
"iamharshvirani7@gmail.com"
] |
iamharshvirani7@gmail.com
|
9fb7ec8bf614b85657848553a6966bddee75bccb
|
795df757ef84073c3adaf552d5f4b79fcb111bad
|
/r8lib/r83col_print_part.py
|
457c5bdb95cdf27c187fd4287858d3c0b6468785
|
[] |
no_license
|
tnakaicode/jburkardt-python
|
02cb2f9ba817abf158fc93203eb17bf1cb3a5008
|
1a63f7664e47d6b81c07f2261b44f472adc4274d
|
refs/heads/master
| 2022-05-21T04:41:37.611658
| 2022-04-09T03:31:00
| 2022-04-09T03:31:00
| 243,854,197
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,243
|
py
|
#! /usr/bin/env python
#
def r83col_print_part ( n, a, max_print, title ):
#*****************************************************************************80
#
## R83COL_PRINT_PART prints "part" of an R83COL.
#
# Discussion:
#
# An R83COL is a (3,N) array of R8's.
#
# The user specifies MAX_PRINT, the maximum number of lines to print.
#
# If N, the size of the vector, is no more than MAX_PRINT, then
# the entire vector is printed, one entry per line.
#
# Otherwise, if possible, the first MAX_PRINT-2 entries are printed,
# followed by a line of periods suggesting an omission,
# and the last entry.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 11 April 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the number of entries of the vector.
#
# Input, real A(N,3), the vector to be printed.
#
# Input, integer MAX_PRINT, the maximum number of lines
# to print.
#
# Input, string TITLE, a title.
#
if ( 0 < max_print ):
if ( 0 < n ):
if ( 0 < len ( title ) ):
print ( '' )
print ( title )
print ( '' )
if ( n <= max_print ):
for i in range ( 0, n ):
print ( ' %4d %14g %14g %14g' % ( i, a[i,0], a[i,1], a[i,2] ) )
elif ( 3 <= max_print ):
for i in range ( 0, max_print - 2 ):
print ( ' %4d %14g %14g %14g' % ( i, a[i,0], a[i,1], a[i,2] ) )
print ( ' .... .............. .............. ..............' )
i = n - 1
print ( ' %4d %14g %14g %14g' % ( i, a[i,0], a[i,1], a[i,2] ) )
else:
for i in range ( 0, max_print - 1 ):
print ( ' %4d %14g %14g %14g' % ( i, a[i,0], a[i,1], a[i,2] ) )
i = max_print - 1
print ( ' %4d %14g %14g %14g ...more entries...' \
% ( i, a[i,0], a[i,1], a[i,2] ) )
return
def r83col_print_part_test ( ):
#*****************************************************************************80
#
## R83COL_PRINT_PART_TEST tests R83COL_PRINT_PART.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 11 April 2015
#
# Author:
#
# John Burkardt
#
import numpy as np
import platform
print ( '' )
print ( 'R83COL_PRINT_PART_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' R83COL_PRINT_PART prints part of an R83COL.' )
n = 10
v = np.array ( [ \
[ 11, 12, 13 ], \
[ 21, 22, 23 ], \
[ 31, 32, 33 ], \
[ 41, 42, 43 ], \
[ 51, 52, 53 ], \
[ 61, 62, 63 ], \
[ 71, 72, 73 ], \
[ 81, 82, 83 ], \
[ 91, 92, 93 ], \
[ 101, 102, 103 ] ] )
max_print = 2
r83col_print_part ( n, v, max_print, ' Output with MAX_PRINT = 2' )
max_print = 5
r83col_print_part ( n, v, max_print, ' Output with MAX_PRINT = 5' )
max_print = 25
r83col_print_part ( n, v, max_print, ' Output with MAX_PRINT = 25' )
#
# Terminate.
#
print ( '' )
print ( 'R83COL_PRINT_PART_TEST:' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
r83col_print_part_test ( )
timestamp ( )
|
[
"tnakaicode@gmail.com"
] |
tnakaicode@gmail.com
|
7dda127c3c00949baafaf34b60be20df495fd4e2
|
2f4f6efd1963aa4e8e749f17b078720c437ae9ac
|
/time_process.py
|
b6100d34aff621e09ccb32022c48bbdd9b09ece0
|
[] |
no_license
|
FlashRepo/Flash-Storm
|
d248f4ed615096539048be1ec65f1fcb31f1d2ee
|
2dd953350a5d690c409996512dec30196d8e9199
|
refs/heads/master
| 2021-07-15T02:58:36.421446
| 2017-10-23T07:03:47
| 2017-10-23T07:03:47
| 107,943,717
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
import pickle
import numpy as np
dict = pickle.load(open('time.p'))
problems = sorted(dict.keys())
print ', '.join(['Problem', 'MOEAD', 'NSGAII', 'SPEA2'])
for problem in problems:
print problem,
algorithms = sorted(dict[problem].keys())
# print algorithms
# print algorithms
for algorithm in algorithms:
print round(np.mean(dict[problem][algorithm]), 3),
print
|
[
"vivekaxl@gmail.com"
] |
vivekaxl@gmail.com
|
e329f90cf89024a973e4360bf56a43969742fe39
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5658571765186560_1/Python/jfguo/main.py
|
5cdb0cbc380443d34ead8f2010a9a0172066645e
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 999
|
py
|
import os
import sys
import glob
import subprocess
import random
import fileinput
next_line = 0
lines = [line.strip() for line in fileinput.input()]
def get_line():
global next_line
i = next_line
next_line += 1
return lines[i]
def calc():
s = get_line().split(' ')
X = int(s[0])
R = int(s[1])
C = int(s[2])
if R > C:
R, C = C, R
if R*C % X != 0:
return 'RICHARD'
if R < (X + 1)/2:
return 'RICHARD'
if X == 1:
return 'GABRIEL'
if X == 2:
return 'GABRIEL'
if X == 3:
return 'GABRIEL'
if X == 4:
if R == 2:
return 'RICHARD'
if R == 3:
return 'GABRIEL'
if R == 4:
return 'GABRIEL'
if X >= 7:
return 'RICHARD'
if R >= (X + 1)/2 + 1:
return 'GABRIEL'
if R*C <= 2*X:
return 'RICHARD'
return 'GABRIEL'
T = int(get_line())
for i in range(1, T + 1):
print('Case #%d: %s' % (i, calc()))
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
82660f2d91527c4a4c020cc6bd64fd6a5a183eed
|
be6e6d8af85adf044bf79676b7276c252407e010
|
/spec/python/test_params_call_extra_parens.py
|
5035e7de4b9cff4c0b219268de38ae4498a8fa5d
|
[
"MIT"
] |
permissive
|
kaitai-io/kaitai_struct_tests
|
516e864d29d1eccc5fe0360d1b111af7a5d3ad2b
|
3d8a6c00c6bac81ac26cf1a87ca84ec54bf1078d
|
refs/heads/master
| 2023-08-19T19:42:47.281953
| 2023-08-04T20:26:50
| 2023-08-04T20:26:50
| 52,155,797
| 12
| 41
|
MIT
| 2023-07-30T23:30:30
| 2016-02-20T13:55:39
|
Ruby
|
UTF-8
|
Python
| false
| false
| 378
|
py
|
# Autogenerated from KST: please remove this line if doing any edits by hand!
import unittest
from params_call_extra_parens import ParamsCallExtraParens
class TestParamsCallExtraParens(unittest.TestCase):
def test_params_call_extra_parens(self):
with ParamsCallExtraParens.from_file('src/term_strz.bin') as r:
self.assertEqual(r.buf1.body, u"foo|b")
|
[
"greycat@altlinux.org"
] |
greycat@altlinux.org
|
9e4a72cfaa5d511cbc35bccab33d6d759a585c40
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2976/49361/276731.py
|
c934f824ceb2d2636866731a7c096aa5606facba
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
import re
lines = []
while True:
try:
lines.append(input())
except:
break
outputStr = ""
for index in range(1, len(lines)):
afterStr = re.sub(lines[0].replace(" ", ""), "", lines[index].replace(" ", ""), flags=re.IGNORECASE)
outputStr += afterStr + "\n"
print(outputStr.strip("\n"))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
3f04bfd5dc6f5a148c9f8957f359a211e6e91bd0
|
0e834094f5e4274b279939b81caedec7d8ef2c73
|
/m2/d05/fork.py
|
0f760582ce1b6de41ccd05f7672b9e7300500889
|
[] |
no_license
|
SpringSnowB/All-file
|
b74eaebe1d54e1410945eaca62c70277a01ef0bf
|
03485c60e7c07352aee621df94455da3d466b872
|
refs/heads/master
| 2020-11-27T23:54:36.984555
| 2020-01-21T08:42:21
| 2020-01-21T08:42:21
| 229,651,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
import os
print("-----------------")
a = 1
pid = os.fork()#子进程从此处开始执行
if pid < 0:
print("fail")
elif pid ==0:
print("child a=",a)#1
a = 10000
else:
print("parent a=",a) #1
print("over a=",a) #子进程10000,父进程1
|
[
"tszxwsb@163.com"
] |
tszxwsb@163.com
|
e36502d092eb4d4f11257c8b70b8846bfef0a973
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/histogram/marker/colorbar/_yanchor.py
|
5d929c6570d7a6ea82df360ef7b79e5066a0b291
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
import _plotly_utils.basevalidators
class YanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="yanchor", parent_name="histogram.marker.colorbar", **kwargs
):
super(YanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["top", "middle", "bottom"]),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
a2b750917d590f2cad5dfbac283319542556e13c
|
fd717fe6ca74f6d77210cdd57a8c365d27c5bfc6
|
/pychron/experiment/utilities/mass_spec_utilities.py
|
e439853d44c9878143faa78ab7e9391af8a64b9f
|
[
"Apache-2.0"
] |
permissive
|
stephen-e-cox/pychron
|
1dea0467d904d24c8a3dd22e5b720fbccec5c0ed
|
681d5bfe2c13e514859479369c2bb20bdf5c19cb
|
refs/heads/master
| 2021-01-19T15:40:03.663863
| 2016-07-14T14:37:16
| 2016-07-14T14:37:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,294
|
py
|
# ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import os
# ============= standard library imports ========================
# ============= local library imports ==========================
os.environ['MassSpecDBVersion'] = '16'
from pychron.mass_spec.database.massspec_database_adapter import MassSpecDatabaseAdapter
from pychron.mass_spec.database.massspec_orm import AnalysesTable, IsotopeTable, DetectorTable
db = MassSpecDatabaseAdapter(bind=False)
db.host = '129.138.12.160'
db.name = 'massspecdata'
db.username = 'jross'
db.password = 'Jross40*39'
db.kind = 'mysql'
db.connect(test=False)
def fix_reference_detector(rd, aid):
with db.session_ctx() as sess:
q = sess.query(AnalysesTable)
q = q.filter(AnalysesTable.AnalysisID == aid)
record = q.one()
q = sess.query(DetectorTable)
q = q.join(IsotopeTable)
q = q.join(AnalysesTable)
q = q.filter(AnalysesTable.AnalysisID == aid)
for r in q.all():
if r.Label == rd:
print 'setting refid current={} new={}'.format(record.RefDetID, r.DetectorID)
record.RefDetID = r.DetectorID
def fix_reference_detectors(path):
with open(path) as rfile:
for line in rfile:
line = line.strip()
if line:
aid = int(line)
fix_reference_detector('H2', aid)
# break
path = '/Users/ross/Desktop/Untitled.csv'
fix_reference_detectors(path)
# ============= EOF =============================================
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
351ffee98beaac269739f12103d72257fded664c
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r9/Gen/DecFiles/options/12267141.py
|
fde8c5e02df48c536fc9078ec662b3e30932602c
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,212
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/12267141.py generated: Fri, 27 Mar 2015 16:10:10
#
# Event Type: 12267141
#
# ASCII decay Descriptor: [B+ -> (D~0 -> (KS0 -> pi+ pi-) K+ K-) K+ pi- pi+]cc
#
from Configurables import Generation
Generation().EventType = 12267141
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_D0Kpipi,KSKK=addResTuned,TightCut,PHSP.dec"
Generation().SignalRepeatedHadronization.CutTool = "LoKi::GenCutTool/TightCut"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
#
from Configurables import LoKi__GenCutTool
from Gauss.Configuration import *
Generation().SignalRepeatedHadronization.addTool ( LoKi__GenCutTool , 'TightCut' )
tightCut = Generation().SignalRepeatedHadronization.TightCut
tightCut.Decay = '^[B+ ==> ^(D~0 => ^(KS0 ==> ^pi+ ^pi-) ^K+ ^K-) ^K+ ^pi- ^pi+]CC'
tightCut.Preambulo += [
'GVZ = LoKi.GenVertices.PositionZ() ' ,
'from GaudiKernel.SystemOfUnits import millimeter',
'inAcc = (in_range (0.005, GTHETA, 0.400))',
'goodB = (GP > 55000 * MeV) & (GPT > 5000 * MeV) & (GTIME > 0.135 * millimeter)',
'goodD = (GP > 25000 * MeV) & (GPT > 2500 * MeV)',
'goodKS = (GFAEVX(abs(GVZ), 0) < 2500.0 * millimeter)',
'goodDDaugPi = (GNINTREE ((("K+" == GABSID) | ("pi+" == GABSID)) & (GP > 2000 * MeV) & inAcc, 4) > 3.5)',
'goodKsDaugPi = (GNINTREE (("pi+" == GABSID) & (GP > 2000 * MeV) & inAcc, 4) > 1.5)',
'goodBachKPia = (GNINTREE ((("K+" == GABSID) | ("pi+" == GABSID)) & (GP > 2000 * MeV) & (GPT > 100 * MeV) & inAcc, 4) > 4.5)',
'goodBachKPib = (GNINTREE ((("K+" == GABSID) | ("pi+" == GABSID)) & (GP > 2000 * MeV) & (GPT > 300 * MeV) & inAcc, 4) > 1.5)',
]
tightCut.Cuts = {
'[B+]cc' : 'goodB & goodBachKPia & goodBachKPib',
'[D0]cc' : 'goodD & goodDDaugPi',
'[KS0]cc' : 'goodKS & goodKsDaugPi',
'[pi+]cc' : 'inAcc'
}
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12267141
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
91f2f0326c39a0175aa8510fe1f285d347efdc54
|
fb383c3550cdcb1514df17a2e5d87b453240e4a5
|
/baekjoon/13706.py
|
ee0b9f933b5e253d683e4f1962ab3f59ba8fa62b
|
[] |
no_license
|
mingxoxo/Algorithm
|
5ba84afebe1b0125d43011403610619804c107be
|
dc352f591c6c8ed84f7dbbeb37a2df4178270605
|
refs/heads/master
| 2023-07-30T17:38:00.190966
| 2023-07-30T08:51:23
| 2023-07-30T08:51:23
| 205,862,682
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
# 제곱근
# 23.05.11
# https://www.acmicpc.net/problem/13706
def square_root(n: int) -> int:
start, end = 1, n // 2
while start <= end:
mid = (start + end) // 2
if mid * mid == n:
return mid
elif mid * mid < n:
start = mid + 1
else:
end = mid - 1
return 1
N = int(input())
print(square_root(N))
|
[
"noreply@github.com"
] |
mingxoxo.noreply@github.com
|
ea57c7126e4dac9e0f5ce7e56a0f128fa3a91ef5
|
a0dfeb01fd15550961b7e15c504327ea37ce4dea
|
/home_and_login/migrations/0001_initial.py
|
0436e1fa98f1ffce003621db14932c8e78e495f9
|
[] |
no_license
|
theparadoxer02/Colossus
|
34c4d99f7d14caa0c464036d25b776dde31c4ec0
|
7d95024acea42b46b598923aef80080cd7890fa2
|
refs/heads/master
| 2021-01-19T22:59:25.057466
| 2017-09-25T18:03:37
| 2017-09-25T18:03:37
| 88,902,426
| 2
| 1
| null | 2020-07-11T12:45:14
| 2017-04-20T19:20:42
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,320
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-26 19:50
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='user_details',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=50)),
('profile_link', models.URLField(default='/welcome_user')),
('dob', models.DateField(null=True)),
('intro', models.CharField(max_length=200, null=True)),
('photo_link', models.URLField(default='/static/sitewide/anonymous-male.png')),
('followers_total', models.IntegerField(default=0)),
('following_total', models.IntegerField(default=0)),
('projects_total', models.IntegerField(default=0)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"abhimanyu98986@gmail.com"
] |
abhimanyu98986@gmail.com
|
ffd724c24a3e8a69f6d6d092e453a4544c9d2d41
|
311b08b547e51907fe88e177817f10e5754dedbe
|
/tests/test_module.py
|
6e800f1af7fe1dbdbb9dbd206ee1e3478bbd22a4
|
[
"BSD-3-Clause"
] |
permissive
|
kernc/dill
|
a17187545ffd6fa3c410ae4763579744f8238326
|
11effa42b8c486b2139125d094988f6f68595b3c
|
refs/heads/master
| 2020-07-05T14:11:30.681507
| 2016-11-16T12:27:42
| 2016-11-16T12:27:42
| 74,115,619
| 0
| 0
| null | 2016-11-18T09:34:14
| 2016-11-18T09:34:14
| null |
UTF-8
|
Python
| false
| false
| 1,620
|
py
|
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2008-2016 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/pathos/browser/dill/LICENSE
import sys
import dill
import test_mixins as module
try: from imp import reload
except ImportError: pass
dill.settings['recurse'] = True
cached = (module.__cached__ if hasattr(module, "__cached__")
else module.__file__.split(".", 1)[0] + ".pyc")
module.a = 1234
pik_mod = dill.dumps(module)
module.a = 0
# remove module
del sys.modules[module.__name__]
del module
module = dill.loads(pik_mod)
assert hasattr(module, "a") and module.a == 1234
assert module.double_add(1, 2, 3) == 2 * module.fx
# Restart, and test use_diff
reload(module)
try:
dill.use_diff()
module.a = 1234
pik_mod = dill.dumps(module)
module.a = 0
# remove module
del sys.modules[module.__name__]
del module
module = dill.loads(pik_mod)
assert hasattr(module, "a") and module.a == 1234
assert module.double_add(1, 2, 3) == 2 * module.fx
except AttributeError:
pass
# clean up
import os
os.remove(cached)
pycache = os.path.join(os.path.dirname(module.__file__), "__pycache__")
if os.path.exists(pycache) and not os.listdir(pycache):
os.removedirs(pycache)
# test when module is None
import math
def get_lambda(str, **kwarg):
return eval(str, kwarg, None)
obj = get_lambda('lambda x: math.exp(x)', math=math)
assert obj.__module__ is None
assert dill.copy(obj)(3) == obj(3)
# EOF
|
[
"mmckerns@8bfda07e-5b16-0410-ab1d-fd04ec2748df"
] |
mmckerns@8bfda07e-5b16-0410-ab1d-fd04ec2748df
|
9394c9c8fd1b27eac0dd7b4bc2d82db67266668f
|
1d2bbeda56f8fede69cd9ebde6f5f2b8a50d4a41
|
/easy/python3/c0009_28_implement-strstr/00_leetcode_0009.py
|
3f3da727753e9087912c82010101009b9d93cff2
|
[] |
no_license
|
drunkwater/leetcode
|
38b8e477eade68250d0bc8b2317542aa62431e03
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
refs/heads/master
| 2020-04-06T07:09:43.798498
| 2018-06-20T02:06:40
| 2018-06-20T02:06:40
| 127,843,545
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 892
|
py
|
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#28. Implement strStr()
#Implement strStr().
#Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.
#Example 1:
#Input: haystack = "hello", needle = "ll"
#Output: 2
#Example 2:
#Input: haystack = "aaaaa", needle = "bba"
#Output: -1
#Clarification:
#What should we return when needle is an empty string? This is a great question to ask during an interview.
#For the purpose of this problem, we will return 0 when needle is an empty string. This is consistent to C's strstr() and Java's indexOf().
#class Solution:
# def strStr(self, haystack, needle):
# """
# :type haystack: str
# :type needle: str
# :rtype: int
# """
# Time Is Money
|
[
"Church.Zhong@audiocodes.com"
] |
Church.Zhong@audiocodes.com
|
80a26aad0a1f115f682e53ed5d47c9cbfd137809
|
03f9666687a147bfd6bace2adfbab6de8879e207
|
/plugins/action/device_credential.py
|
2d88230b22e84c11fad07c6f85152d6c6fde77a6
|
[
"MIT"
] |
permissive
|
robertcsapo/dnacenter-ansible
|
a221c8dc6ab68d6ccbc710e5e5f3061b90b0de59
|
33f776f8c0bc7113da73191c301dd1807e6b4a43
|
refs/heads/main
| 2023-07-17T08:27:59.902108
| 2021-09-06T15:58:05
| 2021-09-06T15:58:05
| 376,349,036
| 0
| 0
|
MIT
| 2021-06-12T17:32:27
| 2021-06-12T17:32:27
| null |
UTF-8
|
Python
| false
| false
| 2,797
|
py
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
try:
from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import (
AnsibleArgSpecValidator,
)
except ImportError:
ANSIBLE_UTILS_IS_INSTALLED = False
else:
ANSIBLE_UTILS_IS_INSTALLED = True
from ansible.errors import AnsibleActionFail
from ansible_collections.cisco.dnac.plugins.module_utils.dnac import (
ModuleDefinition,
DNACModule,
dnac_argument_spec,
)
from ansible_collections.cisco.dnac.plugins.module_utils.definitions.device_credential import (
module_definition,
)
IDEMPOTENT = False
# Instantiate the module definition for this module
moddef = ModuleDefinition(module_definition)
# Get the argument spec for this module and add the 'state' param,
# which is common to all modules
argument_spec = moddef.get_argument_spec_dict()
argument_spec.update(dict(dnac_argument_spec(idempotent=IDEMPOTENT)))
# Get the schema conditionals, if applicable
required_if = moddef.get_required_if_list()
class ActionModule(ActionBase):
def __init__(self, *args, **kwargs):
if not ANSIBLE_UTILS_IS_INSTALLED:
raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'")
super(ActionModule, self).__init__(*args, **kwargs)
self._supports_async = False
self._result = None
# Checks the supplied parameters against the argument spec for this module
def _check_argspec(self):
aav = AnsibleArgSpecValidator(
data=self._task.args,
schema=dict(argument_spec=argument_spec),
schema_format="argspec",
schema_conditionals=dict(required_if=required_if),
name=self._task.action,
)
valid, errors, self._task.args = aav.validate()
if not valid:
raise AnsibleActionFail(errors)
def run(self, tmp=None, task_vars=None):
self._task.diff = False
self._result = super(ActionModule, self).run(tmp, task_vars)
self._result["changed"] = False
self._check_argspec()
dnac = DNACModule(
moddef=moddef,
params=self._task.args,
verbosity=self._play_context.verbosity,
)
state = self._task.args.get("state")
if state == "query":
dnac.exec("get")
elif state == "delete":
dnac.exec("delete")
elif state == "create":
dnac.disable_validation()
dnac.exec("post")
elif state == "update":
dnac.disable_validation()
dnac.exec("put")
self._result.update(dnac.exit_json())
return self._result
|
[
"rcampos@altus.cr"
] |
rcampos@altus.cr
|
9f5e950d9099755d4c2e22f43dda1ea777edf4d7
|
4a0ed9c079286428e44bf8bcfc82034dac041897
|
/gallery/views.py
|
6afa3b8cfd519dce4b362fd22970809cab865f38
|
[
"MIT"
] |
permissive
|
lilianwaweru/Gallery
|
3b57fed326e1d868c83944037fe203f35204a650
|
de9b02e59dac22f45df8c7cbc0570fe7ac685d3f
|
refs/heads/master
| 2020-05-21T07:06:38.468356
| 2019-05-14T13:03:09
| 2019-05-14T13:03:09
| 185,950,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
from django.shortcuts import render
from .models import Image,Category,Location
# Create your views here.
def welcome(request):
images = Image.objects.all()
return render(request,'welcome.html',{'images':images})
def search_category(request):
if 'category' in request.GET and request.GET["category"]:
search_term = (request.GET.get("category")).title()
searched_images = Image.search_by_category(search_term)
message = f"{search_term}"
return render(request, 'all-gallery/search.html',{"message":message,"images": searched_images})
else:
message = "You haven't searched for any category"
return render(request, 'all-gallery/search.html',{"message":message})
def display_location(request,location_id):
try:
locations = Location.objects.all()
location = Location.objects.get(id = location_id)
images = Image.objects.filter(image_location = location.id)
except:
raise Http404()
return render(request,'location.html',{'location':location,'images':images,'locations':locations})
|
[
"lilowesh.lw@gmail.com"
] |
lilowesh.lw@gmail.com
|
9855c9b0200842716863d96c8193f9f091dcf658
|
70bcdd97318a85acc5bc3f4d47afde696fb7a33b
|
/jqdata/gta_tables/MAC_INDUSTRY_EMPLOYWAGEQ.py
|
2ebaaaaaf34a003becdbd973ddf86f715001b314
|
[] |
no_license
|
Inistlwq/tulipquant-code
|
f78fe3c4238e98014f6d4f36735fb65a8b88f60d
|
5959bfe35b6ae2e0e2a204117bda66a13893c64c
|
refs/heads/master
| 2020-03-31T11:25:12.145593
| 2018-04-22T02:16:16
| 2018-04-22T02:16:16
| 152,175,727
| 2
| 0
| null | 2018-10-09T02:31:34
| 2018-10-09T02:31:33
| null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
# coding: utf-8
from sqlalchemy import BigInteger, Column, DateTime, Integer, Numeric, SmallInteger, String, Table, Text, text
from sqlalchemy.dialects.mysql.base import LONGBLOB
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class MAC_INDUSTRY_EMPLOYWAGEQ(Base):
__tablename__ = "MAC_INDUSTRY_EMPLOYWAGEQ"
SGNQUARTER = Column(String(14, u'utf8_bin'), primary_key=True, nullable=False)
INDUSTRYID = Column(String(20, u'utf8_bin'), primary_key=True, nullable=False)
EMPLOY = Column(Numeric(18, 4))
STAFF = Column(Numeric(18, 4))
EMPLOYPAY = Column(Numeric(18, 4))
STAFFWAGE = Column(Numeric(18, 4))
|
[
"wonghiu45@163.com"
] |
wonghiu45@163.com
|
f35954923107394313f1f954760bca011b8ce868
|
85a9ffeccb64f6159adbd164ff98edf4ac315e33
|
/pysnmp-with-texts/CISCO-EVC-CAPABILITY.py
|
454b6d6567630d64f7fd37b6bc76b1264ec7ba05
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
agustinhenze/mibs.snmplabs.com
|
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
refs/heads/master
| 2020-12-26T12:41:41.132395
| 2019-08-16T15:51:41
| 2019-08-16T15:53:57
| 237,512,469
| 0
| 0
|
Apache-2.0
| 2020-01-31T20:41:36
| 2020-01-31T20:41:35
| null |
UTF-8
|
Python
| false
| false
| 3,608
|
py
|
#
# PySNMP MIB module CISCO-EVC-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-EVC-CAPABILITY
# Produced by pysmi-0.3.4 at Wed May 1 11:57:40 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
AgentCapabilities, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "AgentCapabilities", "NotificationGroup", "ModuleCompliance")
iso, ModuleIdentity, Bits, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, IpAddress, Integer32, NotificationType, MibIdentifier, TimeTicks, Unsigned32, ObjectIdentity, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "ModuleIdentity", "Bits", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "IpAddress", "Integer32", "NotificationType", "MibIdentifier", "TimeTicks", "Unsigned32", "ObjectIdentity", "Counter64")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoEvcCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 568))
ciscoEvcCapability.setRevisions(('2008-08-26 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoEvcCapability.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoEvcCapability.setLastUpdated('200808260000Z')
if mibBuilder.loadTexts: ciscoEvcCapability.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoEvcCapability.setContactInfo('Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-ethermibs@cisco.com')
if mibBuilder.loadTexts: ciscoEvcCapability.setDescription('Agent capabilities for the CISCO-EVC-MIB.')
ciscoEvcCapabilityV12R02SR = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 568, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoEvcCapabilityV12R02SR = ciscoEvcCapabilityV12R02SR.setProductRelease('Cisco IOS 12.2 SR Release')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoEvcCapabilityV12R02SR = ciscoEvcCapabilityV12R02SR.setStatus('current')
if mibBuilder.loadTexts: ciscoEvcCapabilityV12R02SR.setDescription('CISCO-EVC-MIB capabilities.')
ciscoEvcCapabilityV12R02XO = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 568, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoEvcCapabilityV12R02XO = ciscoEvcCapabilityV12R02XO.setProductRelease('Cisco IOS 12.2 XO Release.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoEvcCapabilityV12R02XO = ciscoEvcCapabilityV12R02XO.setStatus('current')
if mibBuilder.loadTexts: ciscoEvcCapabilityV12R02XO.setDescription('CISCO-EVC-MIB capabilities.')
mibBuilder.exportSymbols("CISCO-EVC-CAPABILITY", ciscoEvcCapability=ciscoEvcCapability, ciscoEvcCapabilityV12R02SR=ciscoEvcCapabilityV12R02SR, PYSNMP_MODULE_ID=ciscoEvcCapability, ciscoEvcCapabilityV12R02XO=ciscoEvcCapabilityV12R02XO)
|
[
"dcwangmit01@gmail.com"
] |
dcwangmit01@gmail.com
|
eb09ae376760615f5b04d20e921dc431942061bc
|
c85b91bfdd7eb2fa5a7d6c6a9b722c8548c83105
|
/vscode/extensions/ms-python.python-2020.3.69010/languageServer.0.5.31/Typeshed/stdlib/3/posix.pyi
|
d99a4584d6d9a6efe91e80408d29b4bf3a3e89ff
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
ryangniadek/.dotfiles
|
ddf52cece49c33664b56f01b17d476cf0f1fafb1
|
be272baf6fb7d7cd4f4db1f6812b710196511ffe
|
refs/heads/master
| 2021-01-14T07:43:12.516127
| 2020-03-22T20:27:22
| 2020-03-22T20:27:22
| 242,632,623
| 0
| 0
|
MIT
| 2020-09-12T17:28:01
| 2020-02-24T02:50:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,650
|
pyi
|
# Stubs for posix
# NOTE: These are incomplete!
from typing import NamedTuple, Tuple
class stat_result:
# For backward compatibility, the return value of stat() is also
# accessible as a tuple of at least 10 integers giving the most important
# (and portable) members of the stat structure, in the order st_mode,
# st_ino, st_dev, st_nlink, st_uid, st_gid, st_size, st_atime, st_mtime,
# st_ctime. More items may be added at the end by some implementations.
st_mode: int # protection bits,
st_ino: int # inode number,
st_dev: int # device,
st_nlink: int # number of hard links,
st_uid: int # user id of owner,
st_gid: int # group id of owner,
st_size: int # size of file, in bytes,
st_atime: float # time of most recent access,
st_mtime: float # time of most recent content modification,
st_ctime: float # platform dependent (time of most recent metadata change on Unix, or the time of creation on Windows)
st_atime_ns: int # time of most recent access, in nanoseconds
st_mtime_ns: int # time of most recent content modification in nanoseconds
st_ctime_ns: int # platform dependent (time of most recent metadata change on Unix, or the time of creation on Windows) in nanoseconds
# not documented
def __init__(self, tuple: Tuple[int, ...]) -> None: ...
# On some Unix systems (such as Linux), the following attributes may also
# be available:
st_blocks: int # number of blocks allocated for file
st_blksize: int # filesystem blocksize
st_rdev: int # type of device if an inode device
st_flags: int # user defined flags for file
# On other Unix systems (such as FreeBSD), the following attributes may be
# available (but may be only filled out if root tries to use them):
st_gen: int # file generation number
st_birthtime: int # time of file creation
# On Mac OS systems, the following attributes may also be available:
st_rsize: int
st_creator: int
st_type: int
uname_result = NamedTuple('uname_result', [('sysname', str), ('nodename', str),
('release', str), ('version', str), ('machine', str)])
times_result = NamedTuple('times_result', [
('user', float),
('system', float),
('children_user', float),
('children_system', float),
('elapsed', float),
])
waitid_result = NamedTuple('waitid_result', [
('si_pid', int),
('si_uid', int),
('si_signo', int),
('si_status', int),
('si_code', int),
])
sched_param = NamedTuple('sched_priority', [
('sched_priority', int),
])
|
[
"ryan@gniadek.net"
] |
ryan@gniadek.net
|
cb5a7ad60c72cc52d78ddfbdca5cecf634886a08
|
539815f896acbc88b72338992f1adcd55bd7700f
|
/demo/movie_svc/app_instance.py
|
d7fce6337abd89e14700e0110df3a57cb570f72d
|
[
"MIT"
] |
permissive
|
talkpython/responder-webframework-minicourse
|
dcb0f38ead081b75a536aca99c6f52fc172c1c0e
|
321d52d8ddb434952f373a127b51ef3bbfbeb6af
|
refs/heads/master
| 2021-06-16T13:39:19.149560
| 2021-03-11T20:29:24
| 2021-03-11T20:29:24
| 178,065,735
| 29
| 21
|
MIT
| 2021-03-11T20:29:25
| 2019-03-27T19:58:30
| null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
import responder
# CORS wasn't demoed in the course, but is required to be used from
# external apps like movie exploder.
cors_params = {
'allow_origins': '*',
'allow_methods': '*',
}
api = responder.API(cors=True, cors_params=cors_params)
|
[
"mikeckennedy@gmail.com"
] |
mikeckennedy@gmail.com
|
64876e9ed6c56a785bda85f43297a2f5c6c1aaa3
|
5f8baed3acceaf7b3127f8fbe0ed417070c0e809
|
/DiSAN/src/utils/logger.py
|
81b83bca491c2b056252a64abcb03365dde710a0
|
[
"MIT"
] |
permissive
|
satwik77/Transformer-Computation-Analysis
|
ead241d848af51fefd85fe365a3ff87b9251bac5
|
82341f5f2f9cd0831e390f44b338165e45cd6413
|
refs/heads/main
| 2022-12-29T01:32:12.081865
| 2020-10-10T07:04:27
| 2020-10-10T07:04:27
| 301,588,833
| 5
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,007
|
py
|
import logging
import pdb
import pandas as pd
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
import json
'''Logging Modules'''
#log_format='%(asctime)s | %(levelname)s | %(filename)s:%(lineno)s - %(funcName)5s() ] | %(message)s'
def get_logger(name, log_file_path='./logs/temp.log', logging_level=logging.INFO, log_format='%(asctime)s | %(levelname)s | %(filename)s: %(lineno)s : %(funcName)s() ::\t %(message)s'):
logger = logging.getLogger(name)
logger.setLevel(logging_level)
formatter = logging.Formatter(log_format)
file_handler = logging.FileHandler(log_file_path, mode='w')
file_handler.setLevel(logging_level)
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging_level)
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
# logger.addFilter(ContextFilter(expt_name))
return logger
def print_log(logger, dict):
string = ''
for key, value in dict.items():
string += '\n {}: {}\t'.format(key.replace('_', ' '), value)
# string = string.strip()
logger.info(string)
def store_results(config, bleu_score, error_score):
#pdb.set_trace()
try:
with open(config.result_path) as f:
res_data =json.load(f)
except:
res_data = {}
try:
train_loss = train_loss.item()
except:
pass
try:
val_loss = val_loss.item()
except:
pass
#try:
data= {'run_name' : str(config.run_name)
, 'best bleu score' : str(bleu_score)
, 'minimum error' : str(error_score)
, 'dataset' : config.dataset
, 'd_model' : config.d_model
, 'd_ff' : config.d_ff
, 'layers' : config.layers
, 'heads': config.heads
, 'dropout' : config.dropout
, 'lr' : config.lr
, 'batch_size' : config.batch_size
, 'epochs' : config.epochs
}
# res_data.update(data)
res_data[str(config.run_name)] = data
with open(config.result_path, 'w', encoding='utf-8') as f:
json.dump(res_data, f, ensure_ascii= False, indent= 4)
#except:
# pdb.set_trace()
def store_val_results(config, acc_score):
#pdb.set_trace()
try:
with open(config.val_result_path) as f:
res_data = json.load(f)
except:
res_data = {}
try:
data= {'run_name' : str(config.run_name)
, 'acc score': str(acc_score)
, 'dataset' : config.dataset
, 'emb1_size': config.emb1_size
, 'emb2_size': config.emb2_size
, 'cell_type' : config.cell_type
, 'hidden_size' : config.hidden_size
, 'depth' : config.depth
, 'dropout' : config.dropout
, 'init_range' : config.init_range
, 'bidirectional' : config.bidirectional
, 'lr' : config.lr
, 'batch_size' : config.batch_size
, 'opt' : config.opt
, 'use_word2vec' :config.use_word2vec
}
# res_data.update(data)
res_data[str(config.run_name)] = data
with open(config.val_result_path, 'w', encoding='utf-8') as f:
json.dump(res_data, f, ensure_ascii= False, indent= 4)
except:
pdb.set_trace()
|
[
"satwik55@gmail.com"
] |
satwik55@gmail.com
|
4344dd113c53ec44e77b7beb867a74a0a9abcdd1
|
773f6abee91e5368e43b34d8ad179c4ab9056da1
|
/gen/referencegenome.py
|
5733374a02094277fbde4887efd4b26c7b446068
|
[] |
no_license
|
richstoner/aibs
|
3dc9489ee6a1db836d58ec736b13d35a7cffc215
|
bfc7e732b53b4dff55f7c3edccdd0703f4bab25f
|
refs/heads/master
| 2021-01-10T05:11:09.484238
| 2013-03-03T06:19:34
| 2013-03-03T06:19:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
# -*- coding: utf-8 -*-
# Rich Stoner, 2013
class ReferenceGenome(object):
'''aibs.model.referencegenome (autogen)'''
# Fields
self.id = 0
self.name = ''
self.build = ''
self.organism_id = 0
# Associations
self.organism = None # belongs_to Organism
self.genome_locuses = [] # has_many GenomeLocus
def __init__(self, initialData={}):
for k,v in initData.iteritems():
setattr(self, k, v)
# add class methods and private methods here
|
[
"stonerri@gmail.com"
] |
stonerri@gmail.com
|
6dc7210e4f8cd00b9dae94dcc3d074d9cbffc1d3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02686/s844095657.py
|
f1ab4403c5eb0d0f166d68bd4338ad075c24471f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
N = int(input())
S = [input() for _ in range(N)]
def solve() :
T = []
for s in S :
open = 0
close = 0
for c in s :
if c == ')' :
if open > 0 :
open -= 1
else :
close += 1
else :
open += 1
T.append((open, close))
if sum(op - cl for op, cl in T) != 0 :
return 'No'
inc = []
dec = []
for op, cl in T :
if op >= cl :
inc.append((cl, op))
else :
dec.append((op, cl))
inc.sort()
open = 0
for cl, op in inc :
if open >= cl :
open += op - cl
else :
return 'No'
close = 0
dec.sort()
for op, cl in dec :
if close >= op :
close += cl - op
else :
return 'No'
return 'Yes'
print(solve())
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
fd06fd94704d0b738825aa9fc484c78bdf8ee26e
|
933f2a9f155b2a4f9746bf2020d1b828bfe49e81
|
/python基础/day1/if 语句.py
|
fb1eb4aef52c801e5390c18364af092d427d9f15
|
[] |
no_license
|
WuAlin0327/python3-notes
|
d65ffb2b87c8bb23d481ced100d17cda97aef698
|
1d0d66900f6c4b667b3b84b1063f24ee7823e1bb
|
refs/heads/master
| 2020-03-26T04:49:34.937700
| 2018-12-31T11:12:58
| 2018-12-31T11:12:58
| 144,524,404
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
monny = int(input("你有多少钱:"))
if monny > 5000:
print("I want buy a macbook")
elif monny >=3000:
print("I want buy a iadp")
elif monny >= 2000:
print("buy a phone")
else:
print("no monny")
|
[
"1032298871@qq.com"
] |
1032298871@qq.com
|
cf37da3f5b81520ea9ba19cc258a0363291042d6
|
89bcfc45d70a3ca3f0f1878bebd71aa76d9dc5e2
|
/scrapy_demo/sina_news/sina_news/middlewares.py
|
819f2c24c10afaf9fbaced6ef0f1b0f49ec5c423
|
[] |
no_license
|
lichao20000/python_spider
|
dfa95311ab375804e0de4a31ad1e4cb29b60c45b
|
81f3377ad6df57ca877463192387933c99d4aff0
|
refs/heads/master
| 2022-02-16T20:59:40.711810
| 2019-09-10T03:13:07
| 2019-09-10T03:13:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,601
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class SinaNewsSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SinaNewsDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"64174469@qq.com"
] |
64174469@qq.com
|
e1ee11936044cee591fa34caea14fe7c48692724
|
a990bd26d3a69d1ea6699c85efa2cea99452c3df
|
/pytriplets/pythagoreanTriplets.py
|
9800357dc2720a809abc7bcffed191203f31baa3
|
[] |
no_license
|
abecus/DS-and-Algorithms
|
5f1a948a085465ae165090ec957a9d5307ce729d
|
3259e8183382265a27cf8c91e37d0086175a5703
|
refs/heads/master
| 2022-05-05T07:07:08.194243
| 2022-04-05T16:23:39
| 2022-04-05T16:23:39
| 193,111,610
| 11
| 6
| null | 2020-11-18T16:19:18
| 2019-06-21T14:27:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,315
|
py
|
from math import ceil, sqrt
def EratosthenesSieve(N:int)-> list:
'''
Calculating SPF (Smallest Prime Factor)
for every number till N.
Time Complexity : O(NloglogN)
'''
N+=1
# stores smallest prime factor for every number
spf = [*range(N)]
# separately marking spf for every even number as 2
for i in range(4, N, 2):
spf[i] = 2
for i in range(3, ceil(sqrt(N))):
# checking if i is prime
if (spf[i] == i):
# marking SPF for all numbers divisible by i
for j in range(i * i, N, i):
# marking spf[j] if it is not previously marked
if (spf[j] == j):
spf[j] = i
return spf
def getReducedFactorization(N:int, spf:list)-> int:
"""
counts repetition of each prime from prime factorisation of N
using trial method upon spf list, and calculating the ceil of
half of all prime's powers (pow(p, ceil(a/2))) and multiplying
them together.
"""
gamma = 1
while (N!=1):
# keep a prime in prev variable
prev=spf[N]
# for counting the power
c=0
# counts power of a prime
while spf[N]==prev:
c+=1
N//=spf[N]
# multiplies the half ceil of power on primes
gamma*=pow(prev, ceil(c/2))
prev=spf[N]
return gamma
def pythagoreanTriplets(n):
# calculate spf array
spf=EratosthenesSieve((n - int(sqrt((n<<1) -1)))<<1)
# keeps the triplet count
tripletCount=0
# loopinf for every values of 2*b
for b2 in range(4, (n - int(sqrt((n<<1) -1)))<<1, 2):
# calculates reduced factor of 2*b
gamma=getReducedFactorization(b2, spf)
# for findin all triplets from 2*b
for i in range(1, int(sqrt(b2*((b2>>1)-1)))//gamma+1):
i*=gamma
sqVal = i*i
q=sqVal//b2
# if z = q+i+(b2>>1) > n break else print triplet
if q+i+(b2>>1)>n:
break
else:
# remove comments in this else block to print Triplets
x=q+i
print((x, (b2>>1)+i, x+(b2>>1)))
# tripletCount+=1
return tripletCount
if __name__ == "__main__":
n=100
print(pythagoreanTriplets(n))
|
[
"insaaone@gmail.com"
] |
insaaone@gmail.com
|
4ce7e9375fb540a78e89c6052c9ac31834889e7a
|
90f2cbe1c940a20dcc893837b6033a51d3233931
|
/python 进阶/面向对象5.py
|
e0cdc5256497aaf75db084c7e20d655c6faec438
|
[] |
no_license
|
MaxNcu/Learn_Python
|
71501f38f6442f3ff2a1de1ff685b8975e50af20
|
5a1c6edf353ed7447b2ffd4126ad7668d8c5a407
|
refs/heads/master
| 2022-01-15T18:56:04.814476
| 2019-07-20T03:02:02
| 2019-07-20T03:02:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2018/5/3 0003 17:27
# @Author : Langzi
# @Blog : www.langzi.fun
# @File : 面向对象5.py
# @Software: PyCharm
import sys
import requests
reload(sys)
sys.setdefaultencoding('utf-8')
class gg:
url = 0
stat = 0
# 因为使用classmethod后会传入新的变量,所以一开始是需要自己先定义类变量
def __init__(self,url=0,stat=0):
# 这里按照正常的定义构造函数
self.url=url
self.stat=stat
@classmethod
# 装饰器,立马执行下面的函数
def split(cls,info):
# 这个函数接受两个参数,默认的cls就是这个类的init函数,info就是外面传入进来的
url,stat=map(str,info.split('-'))
# 这里转换成了格式化的结构
data = cls(url,stat)
# 然后执行这个类第一个方法,这个类构造函数需要传入两个参数,于是就传入了两个参数
return data
# 这里就直接返回了函数结果
def outer(self):
print self.url
print self.stat
r = gg.split(('langzi-200'))
r.outer()
# 这里是调用类方法,与调用实例方法一样
|
[
"982722261@qq.com"
] |
982722261@qq.com
|
9d3d24b465ffb8dc9148555c52358627c3f4e05b
|
3551f1150dee2772b1949a199250e4960a71989e
|
/focusgrouplogs/web.py
|
0391b7ec78d6461332a0c6ec9d81af8e275f140c
|
[
"MIT"
] |
permissive
|
ccpgames/focusgrouplogs-frontend
|
868f4398fb5e965f3a27f66bbba46086dc6906c6
|
42bd2bac04bbdc49d87ed9218f6b32a1d239c1ee
|
refs/heads/master
| 2021-01-17T06:38:04.869762
| 2018-05-08T18:02:28
| 2018-05-08T18:02:28
| 50,437,131
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,456
|
py
|
"""Web routes for focusgrouplogs."""
import os
import sys
import traceback
from flask import render_template
from flask import Response
from focusgrouplogs import app
from focusgrouplogs import cache
from focusgrouplogs import FOCUS_GROUPS
from focusgrouplogs.datastore import all_content
from focusgrouplogs.datastore import log_content
from focusgrouplogs.datastore import log_metadata
@cache.cached(timeout=None, key_prefix="inline-css")
def get_style():
"""Reads and returns the inline css styling."""
style = os.path.join(os.path.dirname(__file__), "templates", "style.css")
with open(style, "r") as opencss:
return opencss.read().strip()
@app.route("/<regex('({})'):group>/<date>/".format("|".join(FOCUS_GROUPS)),
methods=["GET"])
@cache.memoize(timeout=60)
def group_get(group, date):
"""Displays the most recent day for a group (or specific)."""
if date is None:
group_logs = all_content(group)
else:
group_logs = [log_content(group, date)]
return render_template(
"logs.html",
focus_group=group,
log_days=group_logs,
css=get_style(),
)
@app.route("/", methods=["GET"])
@cache.cached(timeout=3600)
def main_index():
"""Displays links to the focus groups, fairly static."""
return render_template(
"index.html",
groups=[{"name": f, "logs": log_metadata(f)} for f in FOCUS_GROUPS],
css=get_style(),
)
@app.route("/ping", methods=["GET"])
def ping_response():
"""Return a static 200 OK response."""
return Response("ok", status=200)
def traceback_formatter(excpt, value, tback):
"""Catches all exceptions and re-formats the traceback raised."""
sys.stdout.write("".join(traceback.format_exception(excpt, value, tback)))
def hook_exceptions():
"""Hooks into the sys module to set our formatter."""
if hasattr(sys.stdout, "fileno"): # when testing, sys.stdout is StringIO
# reopen stdout in non buffered mode
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
# set the hook
sys.excepthook = traceback_formatter
def paste(*_, **settings):
"""For paste, start and return the Flask app."""
hook_exceptions()
return app
def main():
"""Debug/cmdline entry point."""
paste().run(
host="0.0.0.0",
port=8080,
debug=True,
use_reloader=False,
)
if __name__ == "__main__":
main()
|
[
"github@talsma.ca"
] |
github@talsma.ca
|
39fd5781c172d7c39966c2f8e8ac762b9ae943b6
|
6b64338c3453d896310a381929fdf61cd846bbb7
|
/biaobei-pretrain/tacotron/utils/symbols.py
|
f1e84a10e8d9e07c6bc1ba5b035ec7a4a17c205e
|
[] |
no_license
|
Tubbz-alt/Taco_Collection
|
b0e9234ca8309300783b6a258adb0255d3119f93
|
fb30bab5231c5c22ff03184f428aa43a0700d47d
|
refs/heads/master
| 2022-02-28T21:41:15.275047
| 2019-09-23T14:45:54
| 2019-09-23T14:45:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 892
|
py
|
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run
through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details.
'''
import os
import glob
AUTO_DETECT_SYMBOLS=True
train_text_files = glob.glob(os.path.join("../../female_golden_v2","*.corpus"))
if train_text_files and AUTO_DETECT_SYMBOLS:
_characters = set()
for file in train_text_files:
with open(file,"rb") as fin:
for line in fin:
line = line.decode().split("|")[1]
_characters = _characters.union(line)
else:
_characters = "12345abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ,。!? #*$%"
print(_characters)
_pad = "_"
_eos = "~"
symbols = [_pad,_eos]+list(_characters)
print("all symbols is {}".format(symbols))
|
[
"ascyx1218@163.com"
] |
ascyx1218@163.com
|
165877fef4819cb2279ba767229c479c1f17b7e2
|
a26c8bbd67c614354c72c3eade71981adea28eea
|
/src/main/resources/devops-as-code/add_ci_to_env.py
|
9f51d350a015afba8071c5eea1e0e26b41597529
|
[
"MIT"
] |
permissive
|
xebialabs-community/xld-ansible-step-plugin
|
481e25350728f50cebcb5a15c64e8d388b2d16ed
|
c05eec5767214ed91f6e42819212bf0bc5d164b6
|
refs/heads/master
| 2021-06-16T01:56:12.030162
| 2021-02-08T12:19:36
| 2021-02-08T12:40:12
| 140,391,653
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,061
|
py
|
#
# Copyright 2021 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import com.xebialabs.deployit.plugin.api.reflect.Type as Type
def query_all_containers(ci_id, results):
# print("query {0}".format(ci_id))
result = repositoryService.query(Type.valueOf('udm.Container'), ci_id, None, '', None, None, 0, -1)
sub_result = []
for sub_ci in result:
results.append(sub_ci)
query_all_containers(sub_ci.id, sub_result)
results.extend(sub_result)
print("environment {0}".format(environment))
print("provisioned_host {0}".format(provisioned_host))
list_of_ci = []
query_all_containers(provisioned_host.id, list_of_ci)
members = environment.members
boundConfigurationItems = deployed.boundConfigurationItems
for ci in list_of_ci:
print("Found {0}".format(ci))
read_ci = repositoryService.read(ci.id)
members.add(read_ci)
boundConfigurationItems.add(read_ci)
environment.members = members
deployed.boundConfigurationItems = boundConfigurationItems
print(environment.members)
repositoryService.update([environment])
|
[
"bmoussaud@xebialabs.com"
] |
bmoussaud@xebialabs.com
|
9d5c116670e57e518c30bc5967961c6a87ecc804
|
60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24
|
/IronPythonStubs/release/stubs.min/System/__init___parts/PlatformNotSupportedException.py
|
01352985b0163c59e7d7ded23ed6bdf1af466c25
|
[
"MIT"
] |
permissive
|
shnlmn/Rhino-Grasshopper-Scripts
|
a9411098c5d1bbc55feb782def565d535b27b709
|
0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823
|
refs/heads/master
| 2020-04-10T18:59:43.518140
| 2020-04-08T02:49:07
| 2020-04-08T02:49:07
| 161,219,695
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
class PlatformNotSupportedException(NotSupportedException,ISerializable,_Exception):
"""
The exception that is thrown when a feature does not run on a particular platform.
PlatformNotSupportedException()
PlatformNotSupportedException(message: str)
PlatformNotSupportedException(message: str,inner: Exception)
"""
def add_SerializeObjectState(self,*args):
""" add_SerializeObjectState(self: Exception,value: EventHandler[SafeSerializationEventArgs]) """
pass
def remove_SerializeObjectState(self,*args):
""" remove_SerializeObjectState(self: Exception,value: EventHandler[SafeSerializationEventArgs]) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,message=None,inner=None):
"""
__new__(cls: type)
__new__(cls: type,message: str)
__new__(cls: type,message: str,inner: Exception)
__new__(cls: type,info: SerializationInfo,context: StreamingContext)
"""
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
|
[
"magnetscoil@gmail.com"
] |
magnetscoil@gmail.com
|
4287da8a49bf1158ca40e7e1ea59381b3a4f26dd
|
58afefdde86346760bea40690b1675c6639c8b84
|
/leetcode/magical-string/286608581.py
|
a40ef5d904e5cc169a0d044c1fefbec7c63a3043
|
[] |
no_license
|
ausaki/data_structures_and_algorithms
|
aaa563f713cbab3c34a9465039d52b853f95548e
|
4f5f5124534bd4423356a5f5572b8a39b7828d80
|
refs/heads/master
| 2021-06-21T10:44:44.549601
| 2021-04-06T11:30:21
| 2021-04-06T11:30:21
| 201,942,771
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
# title: magical-string
# detail: https://leetcode.com/submissions/detail/286608581/
# datetime: Tue Dec 17 18:06:50 2019
# runtime: 108 ms
# memory: 27.3 MB
class Solution:
magical_string = [[1, 1], [2,1 ], [2, 1]]
index = 2
def magicalString(self, n: int) -> int:
i = self.index
magical_string = self.magical_string
while i < n:
j = magical_string[i][0]
k = 3 - magical_string[-1][0]
magical_string.append([k, 0])
if j == 2: magical_string.append([k, 0])
if magical_string[i][0] == 1:
magical_string[i][1] = magical_string[i - 1][1] + 1
else:
magical_string[i][1] = magical_string[i - 1][1]
i += 1
self.__class__.index = i
# print(magical_string)
return magical_string[n - 1][1]
|
[
"ljm51689@gmail.com"
] |
ljm51689@gmail.com
|
7bafee4b562b3ea3a9bc7a61db78cfd37ada3ea3
|
7b74696ff2ab729396cba6c203984fce5cd0ff83
|
/tradeaccounts/migrations/0049_auto_20200607_1333.py
|
46a52b8fab8cd24ab327a01dd31af3e81a8c975e
|
[
"MIT"
] |
permissive
|
webclinic017/investtrack
|
e9e9a7a8caeecaceebcd79111c32b334c4e1c1d0
|
4aa204b608e99dfec3dd575e72b64a6002def3be
|
refs/heads/master
| 2023-06-18T12:57:32.417414
| 2021-07-10T14:26:53
| 2021-07-10T14:26:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
# Generated by Django 3.0.2 on 2020-06-07 05:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tradeaccounts', '0048_auto_20200607_1302'),
]
operations = [
migrations.AlterField(
model_name='tradeaccountsnapshot',
name='applied_period',
field=models.CharField(blank=True, choices=[('m', '月'), ('d', '日'), ('w', '周')], default='d', max_length=1, verbose_name='收益周期'),
),
]
|
[
"jie.han@outlook.com"
] |
jie.han@outlook.com
|
0c2a8e2225a636a1c778cc23813b081284f9b3c5
|
dffc22f1e363172d91c72582f54edf088ca96ea8
|
/lib/ffmpeg/vppbase.py
|
4b49b69842ed9fdd3fbb8bca01a3d3fef7767102
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
FocusLuo/vaapi-fits-1
|
975d0eec398d91dcb904660b7cfda4c7b02fb01f
|
52dbc2decf68cf89cb3ed7a11fa7b86e6903e829
|
refs/heads/master
| 2023-07-26T07:49:59.857414
| 2023-05-09T23:25:00
| 2023-05-11T06:43:06
| 170,789,770
| 0
| 0
|
BSD-3-Clause
| 2019-02-15T02:29:44
| 2019-02-15T02:29:44
| null |
UTF-8
|
Python
| false
| false
| 3,902
|
py
|
###
### Copyright (C) 2022 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
import slash
from ...lib.common import timefn, get_media, call, exe2os, filepath2os
from ...lib.ffmpeg.util import have_ffmpeg, BaseFormatMapper
from ...lib.mixin.vpp import VppMetricMixin
from ...lib import metrics2
@slash.requires(have_ffmpeg)
class BaseVppTest(slash.Test, BaseFormatMapper, VppMetricMixin):
def before(self):
self.refctx = []
self.post_validate = lambda: None
self.hwdevice = f"hw:{get_media().render_device}"
def get_input_formats(self):
return self.caps.get("ifmts", [])
def get_output_formats(self):
return self.caps.get("ofmts", [])
def gen_vpp_opts(self):
raise NotImplementedError
def gen_input_opts(self):
if self.vpp_op in ["deinterlace"]:
opts = "-c:v {ffdecoder}"
elif self.vpp_op in ["stack"]:
opts = ""
else:
opts = "-f rawvideo -pix_fmt {mformat} -s:v {width}x{height}"
opts += " -i {ossource}"
return opts
def gen_output_opts(self):
fcomplex = ["composite", "stack"]
vpfilter = self.gen_vpp_opts()
vpfilter.append("hwdownload")
vpfilter.append("format={ohwformat}")
opts = "-filter_complex" if self.vpp_op in fcomplex else "-vf"
opts += f" '{','.join(vpfilter)}'"
opts += " -pix_fmt {mformat}" if self.vpp_op not in ["csc"] else ""
opts += " -f rawvideo -fps_mode passthrough -an -vframes {frames} -y {osdecoded}"
return opts
@timefn("ffmpeg:vpp")
def call_ffmpeg(self, iopts, oopts):
if vars(self).get("decoded", None) is not None:
get_media()._purge_test_artifact(self.decoded)
self.decoded = get_media()._test_artifact2("yuv")
self.osdecoded = filepath2os(self.decoded)
iopts = iopts.format(**vars(self))
oopts = oopts.format(**vars(self))
call(
f"{exe2os('ffmpeg')} -hwaccel {self.hwaccel}"
f" -init_hw_device {self.hwaccel}={self.hwdevice}"
f" -hwaccel_output_format {self.hwaccel}"
f" -v verbose {iopts} {oopts}"
)
def validate_caps(self):
ifmts = self.get_input_formats()
ofmts = self.get_output_formats()
self.ifmt = self.format
self.ofmt = self.format if "csc" != self.vpp_op else self.csc
self.mformat = self.map_format(self.format)
if self.mformat is None:
slash.skip_test(f"ffmpeg.{self.format} unsupported")
if self.vpp_op in ["csc"]:
self.ihwformat = self.map_format(self.ifmt if self.ifmt in ifmts else None)
self.ohwformat = self.map_format(self.ofmt if self.ofmt in ofmts else None)
else:
self.ihwformat = self.map_best_hw_format(self.ifmt, ifmts)
self.ohwformat = self.map_best_hw_format(self.ofmt, ofmts)
if self.ihwformat is None:
slash.skip_test(f"{self.ifmt} unsupported")
if self.ohwformat is None:
slash.skip_test(f"{self.ofmt} unsupported")
if self.vpp_op in ["composite"]:
self.owidth, self.oheight = self.width, self.height
for comp in self.comps:
self.owidth = max(self.owidth, self.width + comp['x'])
self.oheight = max(self.oheight, self.height + comp['y'])
self.post_validate()
def vpp(self):
self.validate_caps()
iopts = self.gen_input_opts()
oopts = self.gen_output_opts()
self.ossource = filepath2os(self.source)
self.call_ffmpeg(iopts, oopts)
if vars(self).get("r2r", None) is not None:
assert type(self.r2r) is int and self.r2r > 1, "invalid r2r value"
metric = metrics2.factory.create(metric = dict(type = "md5", numbytes = -1))
metric.update(filetest = self.decoded)
metric.expect = metric.actual # the first run is our reference for r2r
metric.check()
for i in range(1, self.r2r):
self.call_ffmpeg(iopts, oopts)
metric.update(filetest = self.decoded)
metric.check()
else:
self.check_metrics()
|
[
"ullysses.a.eoff@intel.com"
] |
ullysses.a.eoff@intel.com
|
3d27b8603e7399f16a976fd41a6dda3461f31a61
|
a1cbe24cb8646e7af91a64d1fbfce4a4d7adce99
|
/teesta/config/desktop.py
|
0b643613f46c5abe037d298f362e72a3fa2b58c6
|
[
"MIT"
] |
permissive
|
mbauskar/teesta
|
a4d32f8ec941be42f08d832ad922f092bf77b2b8
|
004bacefec97759e8abf525a58da2f4b17fb9448
|
refs/heads/master
| 2021-01-24T01:28:06.474339
| 2016-06-22T10:01:29
| 2016-06-22T10:01:29
| 61,936,561
| 1
| 0
| null | 2016-06-25T09:37:11
| 2016-06-25T09:37:11
| null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Teesta",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Teesta")
}
]
|
[
"mbauskar@gmail.com"
] |
mbauskar@gmail.com
|
730fd4d9ec2574b3ee5ec4a4b6f7490e1b36834c
|
e089f2598400d4115f9f1a91c48c7eef40e6d251
|
/vgg16_2.py
|
423a087f0d1ad03d149d1deda559efc8d94e0215
|
[] |
no_license
|
cwarny/flower-teller
|
c261ef10077f0b65d96bdb0e28a3e013ef32cef5
|
36b4350e7257ac11d3e89cb594f047963c65b3ac
|
refs/heads/master
| 2021-01-21T15:49:39.393488
| 2017-06-26T02:13:47
| 2017-06-26T02:13:47
| 95,398,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,280
|
py
|
import json
import csv
import numpy as np
from numpy.random import random, permutation
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
from matplotlib import pyplot as plt
from PIL import Image
from sklearn.preprocessing import OneHotEncoder
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.models import Sequential, Model
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers import Input
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, RMSprop, Adam
from keras.preprocessing import image
def ConvBlock(layers, model, filters):
for i in range(layers):
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(filters, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
def FCBlock(model):
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
vgg_mean = np.array([123.68, 116.779, 103.939]).reshape((3,1,1))
def vgg_preprocess(x):
x = x - vgg_mean # subtract mean
return x[:, ::-1] # reverse axis bgr->rgb
def VGG_16():
model = Sequential()
model.add(Lambda(vgg_preprocess, input_shape=(3,224,224)))
ConvBlock(2, model, 64)
ConvBlock(2, model, 128)
ConvBlock(3, model, 256)
ConvBlock(3, model, 512)
ConvBlock(3, model, 512)
model.add(Flatten())
FCBlock(model)
FCBlock(model)
model.add(Dense(1000, activation='softmax'))
return model
model = VGG_16()
fpath = get_file('vgg16.h5', 'vgg16.h5', cache_subdir='models') # See: https://gist.github.com/baraldilorenzo/07d7802847aaad0a35d3
model.load_weights(fpath)
def get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True, batch_size=4, class_mode='categorical', target_size=(224,224)):
return gen.flow_from_directory(dirname, target_size=target_size, class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
val_batches = get_batches('n11669921/sample/valid', shuffle=False, batch_size=64)
batches = get_batches('n11669921/sample/train', shuffle=False, batch_size=64)
def onehot(x):
return np.array(OneHotEncoder().fit_transform(x.reshape(-1,1)).todense())
val_classes = val_batches.classes
trn_classes = batches.classes
val_labels = onehot(val_classes)
trn_labels = onehot(trn_classes)
# Fine-tuning
model.pop()
for layer in model.layers:
layer.trainable = False
model.add(Dense(121, activation='softmax'))
def fit_model(model, batches, val_batches, nb_epoch=1):
model.fit_generator(batches, samples_per_epoch=batches.N, nb_epoch=nb_epoch, validation_data=val_batches, nb_val_samples=val_batches.N)
opt = RMSprop(lr=0.1)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
fit_model(model, batches, val_batches, nb_epoch=2)
preds = model.predict_classes(val_data, batch_size=64)
probs = model.predict_proba(val_data, batch_size=64)[:,0]
layers = model.layers
# Get the index of the first dense layer
first_dense_idx = [index for index,layer in enumerate(layers) if type(layer) is Dense][0]
# Set this and all subsequent layers to trainable
for layer in layers[first_dense_idx:]:
layer.trainable = True
K.set_value(opt.lr, 0.0001)
fit_model(model, batches, val_batches, 3)
model.save_weights('models/finetune2.h5')
|
[
"cedric.warny@gmail.com"
] |
cedric.warny@gmail.com
|
456f8a27016afc6e1e5da8a314af1625002e861a
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4352/codes/1649_2445.py
|
c823fb8e8c5b6c16933c41968349e882bb866230
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
escala = (input("digite a escala: (C/F)"))
valor = float(input("digite a temperatura: "))
formula1 = 5/9 * (valor - 32)
formula2 = 9*valor/5 + 32
if escala == "F":
print(round(formula1, 2))
else:
print(round(formula2, 2))
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
ad4a165dfc6950d2c638cdd134f1deeef8352d52
|
cde2f83809b89ae0b01a7b30b9caae83183d09a0
|
/correlation.py
|
9825fc5f79db4b3d456117cd1171a2f4a5337ad5
|
[] |
no_license
|
airbaggie/judgmental_eye
|
264fa8f4aaec452f4acbf34dbb99d070408c2c22
|
67cb86e450133d253c74b07950a846bf2bb4f06f
|
refs/heads/master
| 2020-06-08T21:32:54.271881
| 2019-06-24T04:12:58
| 2019-06-24T04:12:58
| 193,310,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
from math import sqrt
def pearson(pairs):
"""Return Pearson correlation for pairs.
Using a set of pairwise ratings, produces a Pearson similarity rating.
"""
series_1 = [float(pair[0]) for pair in pairs]
series_2 = [float(pair[1]) for pair in pairs]
sum_1 = sum(series_1)
sum_2 = sum(series_2)
squares_1 = sum([n * n for n in series_1])
squares_2 = sum([n * n for n in series_2])
product_sum = sum([n * m for n, m in pairs])
size = len(pairs)
numerator = product_sum - ((sum_1 * sum_2) / size)
denominator = sqrt(
(squares_1 - (sum_1 * sum_1) / size) *
(squares_2 - (sum_2 * sum_2) / size)
)
if denominator == 0:
return 0
return numerator / denominator
|
[
"vagrant@vagrant.vm"
] |
vagrant@vagrant.vm
|
7dc433e6a444ecf139658897f4b616a313f2c5ee
|
050fc5ca698dfd7612dee42aa980fc7b5eee40a2
|
/tests/plugin/data/sw_kafka/test_kafka.py
|
30f9f02021b73bbc4f9a958cb224e19b9557fff7
|
[
"Apache-2.0"
] |
permissive
|
apache/skywalking-python
|
8ac6ce06630c519f9984a45e74c1fcc88cf5b9d6
|
1a360228c63cd246dd4c5dd8e1f09bdd5556ad7d
|
refs/heads/master
| 2023-09-05T02:45:56.225937
| 2023-08-28T22:19:24
| 2023-08-28T22:19:24
| 261,456,329
| 178
| 122
|
Apache-2.0
| 2023-08-28T22:19:26
| 2020-05-05T12:13:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,352
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Callable
import pytest
import requests
from skywalking.plugins.sw_kafka import support_matrix
from tests.orchestrator import get_test_vector
from tests.plugin.base import TestPluginBase
@pytest.fixture
def prepare():
# type: () -> Callable
return lambda *_: requests.get('http://0.0.0.0:9090/users', timeout=5)
class TestPlugin(TestPluginBase):
@pytest.mark.parametrize('version', get_test_vector(lib_name='kafka-python', support_matrix=support_matrix))
def test_plugin(self, docker_compose, version):
self.validate()
|
[
"noreply@github.com"
] |
apache.noreply@github.com
|
695e6a3453693a6839a8274d69b5d35e909f9015
|
a87f87e71d971bc8d6c205400052a47f8d957e5d
|
/psuedo_train.py
|
b0fd34acf099b173fe37b8a78a74967baf39f36e
|
[] |
no_license
|
bcaitech1/p1-img-MaiHon
|
045eef675a92bb9b26532ff930f919fe3c6e6919
|
d17a925e301349e167327c5eab9d3b65f06d61a3
|
refs/heads/master
| 2023-04-08T10:22:25.803079
| 2021-04-19T12:22:12
| 2021-04-19T12:22:12
| 359,440,859
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,604
|
py
|
import os
import torch
import random
import argparse
import numpy as np
import pandas as pd
import albumentations as A
from tqdm import tqdm
from src.models import *
from src.configs.config import InferConfig
from src.dataset import PseudoDataset
import torch.nn.functional as F
from torch.utils.data import DataLoader
def seed_everything(seed=2021):
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import imgaug
imgaug.random.seed(seed)
def main():
parser = argparse.ArgumentParser(description='Arguments')
parser.add_argument('--seed', default=43, type=int, help='Reproduction Seed')
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--postfix', required=True)
parser.add_argument('--model_type', required=True)
parser.add_argument('--tta', default=0, type=int)
args = parser.parse_args()
seed_everything(args.seed)
cfg = InferConfig(args)
tta_infer = True if args.tta == 1 else False
if tta_infer:
print("TTA Inference")
tta_tfms = [
# A.CLAHE(clip_limit=2.0, p=1.0), --> 넣어도 같은 결과나옴
A.HorizontalFlip(p=1.0),
]
else:
tta_tfms = None
if tta_infer:
infer_ds = PseudoDataset(cfg, tta_tfms)
else:
infer_ds = PseudoDataset(cfg)
infer_dl = DataLoader(
infer_ds,
batch_size=args.batch_size,
shuffle=False,
num_workers=3,
pin_memory=True
)
models = []
for i in range(len(cfg.ckpts)):
model = Net(cfg)
model = model.to(cfg.device)
save_dict = torch.load(cfg.ckpts[i])
print(f"Epoch: {save_dict['epoch']}")
print(f"Loss : {save_dict['loss']}")
state_dict = save_dict["state_dict"]
model.load_state_dict(state_dict)
models.append(model)
print(f"Total {len(models)} models loaded.")
if tta_infer:
pred_paths = []
predictions = []
with torch.no_grad():
for sample in tqdm(infer_dl, total=len(infer_dl)):
images = sample['image']
paths = np.array(sample['path'])
pred = 0
for image in images:
for model in models:
model.eval()
pred = model(image.to(cfg.device))
pred += F.log_softmax(pred, dim=-1)
_, pred = torch.max(pred / (len(models)), -1)
predictions.extend(pred.detach().cpu().numpy())
pred_paths.extend(paths)
else:
pred_paths = []
predictions = []
with torch.no_grad():
for sample in tqdm(infer_dl, total=len(infer_dl)):
images = sample['image'].to(cfg.device)
paths = np.array(sample['path'])
pred = 0
for model in models:
model.eval()
pred = model(images)
pred += F.log_softmax(pred, dim=-1)
_, pred = torch.max(pred / (len(models)), -1)
predictions.extend(pred.detach().cpu().numpy())
pred_paths.extend(paths)
pseudo = pd.DataFrame(data={
'image': pred_paths,
'label': predictions
})
pseudo.to_csv(cfg.submission_dir, index=False)
print("Inference Done.")
if __name__ == "__main__":
main()
|
[
"mai.hong0924@gmail.com"
] |
mai.hong0924@gmail.com
|
f44312a56f753dec7e321a13f2d402666c08d473
|
779af874adf1647461981b0c36530cf9924f5f01
|
/python3/dist-packages/plainbox/impl/exporter/text.py
|
c8889b30876cfff0a422e3a3d37debfa5f7bf396
|
[] |
no_license
|
hitsuyo/Library_Python_3.5
|
8974b5de04cb7780b0a1a75da5cb5478873f08e7
|
374e3f9443e4d5cae862fd9d81db8b61030ae172
|
refs/heads/master
| 2022-11-05T23:46:47.188553
| 2018-01-04T19:29:05
| 2018-01-04T19:29:05
| 116,093,537
| 1
| 2
| null | 2022-10-26T03:07:06
| 2018-01-03T05:02:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,001
|
py
|
# This file is part of Checkbox.
#
# Copyright 2012 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3,
# as published by the Free Software Foundation.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`plainbox.impl.exporter.text` -- plain text exporter
=========================================================
.. warning::
THIS MODULE DOES NOT HAVE STABLE PUBLIC API
"""
from plainbox.i18n import gettext as _
from plainbox.impl.color import Colorizer
from plainbox.impl.exporter import SessionStateExporterBase
from plainbox.impl.result import outcome_meta
class TextSessionStateExporter(SessionStateExporterBase):
"""Human-readable session state exporter."""
def __init__(self, option_list=None, color=None, exporter_unit=None):
super().__init__(option_list, exporter_unit=exporter_unit)
self.C = Colorizer(color)
def get_session_data_subset(self, session_manager):
return session_manager.state
def dump(self, session, stream):
for job in session.run_list:
state = session.job_state_map[job.id]
if state.result.is_hollow:
continue
if self.C.is_enabled:
stream.write(
" {}: {}\n".format(
self.C.custom(
outcome_meta(state.result.outcome).unicode_sigil,
outcome_meta(state.result.outcome).color_ansi
), state.job.tr_summary(),
).encode("UTF-8"))
if len(state.result_history) > 1:
stream.write(_(" history: {0}\n").format(
', '.join(
self.C.custom(
result.outcome_meta().tr_outcome,
result.outcome_meta().color_ansi)
for result in state.result_history)
).encode("UTF-8"))
else:
stream.write(
"{:^15}: {}\n".format(
state.result.tr_outcome(),
state.job.tr_summary(),
).encode("UTF-8"))
if state.result_history:
print(_("History:"), ', '.join(
self.C.custom(
result.outcome_meta().unicode_sigil,
result.outcome_meta().color_ansi)
for result in state.result_history))
|
[
"nguyentansang3417@gmail.com"
] |
nguyentansang3417@gmail.com
|
3d248b9822e566b434bc50291ba5c73e7f9d7aa3
|
564d6a4d305a8ac6a7e01c761831fb2081c02d0f
|
/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/aio/operations/_virtual_machine_run_commands_operations.py
|
ae68dd96f544a951679e1e4b41833dc0b708fe85
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
paultaiton/azure-sdk-for-python
|
69af4d889bac8012b38f5b7e8108707be679b472
|
d435a1a25fd6097454b7fdfbbdefd53e05029160
|
refs/heads/master
| 2023-01-30T16:15:10.647335
| 2020-11-14T01:09:50
| 2020-11-14T01:09:50
| 283,343,691
| 0
| 0
|
MIT
| 2020-07-28T22:43:43
| 2020-07-28T22:43:43
| null |
UTF-8
|
Python
| false
| false
| 7,966
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineRunCommandsOperations:
"""VirtualMachineRunCommandsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs
) -> AsyncIterable["models.RunCommandListResult"]:
"""Lists all available run commands for a subscription in a location.
:param location: The location upon which run commands is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RunCommandListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2018_06_01.models.RunCommandListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RunCommandListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RunCommandListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands'} # type: ignore
async def get(
self,
location: str,
command_id: str,
**kwargs
) -> "models.RunCommandDocument":
"""Gets specific run command for a subscription in a location.
:param location: The location upon which run commands is queried.
:type location: str
:param command_id: The command id.
:type command_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunCommandDocument, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_06_01.models.RunCommandDocument
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RunCommandDocument"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'commandId': self._serialize.url("command_id", command_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RunCommandDocument', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands/{commandId}'} # type: ignore
|
[
"noreply@github.com"
] |
paultaiton.noreply@github.com
|
3a6ccd4f7a0edffa6f93e9687c076417d0a1b0d7
|
7b5828edda7751700ca7002b40a214e39e5f48a8
|
/EA/simulation/__hooks__.py
|
85e30235b7cb2d5c0d2088433d93d3f9f0f6c835
|
[] |
no_license
|
daniela-venuta/Sims-4-Python-Script-Workspace
|
54c33dac02f84daed66f46b7307f222fede0fa62
|
f408b28fb34626b2e3b2953152343d591a328d66
|
refs/heads/main
| 2023-03-29T18:08:39.202803
| 2021-03-30T19:00:42
| 2021-03-30T19:00:42
| 353,111,243
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
RELOADER_ENABLED = False
__enable_gc_callback = True
import gc
try:
import _profile
except:
__enable_gc_callback = False
def system_init(gameplay):
import sims4.importer
sims4.importer.enable()
print('Server Startup')
if __enable_gc_callback:
gc.callbacks.append(_profile.notify_gc_function)
def system_shutdown():
global RELOADER_ENABLED
import sims4.importer
sims4.importer.disable()
RELOADER_ENABLED = False
|
[
"44103490+daniela-venuta@users.noreply.github.com"
] |
44103490+daniela-venuta@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.