blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f91540884b5e4959ef73492c7f863d2922eccf94
|
18aee5d93a63eab684fe69e3aa0abd1372dd5d08
|
/test/collective/fleet/test_fleet_lamb_meta_optimizer.py
|
c32135bafc1922c15f3cde7cad759415d8939996
|
[
"Apache-2.0"
] |
permissive
|
Shixiaowei02/Paddle
|
8d049f4f29e281de2fb1ffcd143997c88078eadb
|
3d4d995f26c48f7792b325806ec3d110fc59f6fc
|
refs/heads/develop
| 2023-06-26T06:25:48.074273
| 2023-06-14T06:40:21
| 2023-06-14T06:40:21
| 174,320,213
| 2
| 1
|
Apache-2.0
| 2022-12-28T05:14:30
| 2019-03-07T10:09:34
|
C++
|
UTF-8
|
Python
| false
| false
| 6,035
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import paddle
from paddle import fluid
from paddle.distributed import fleet
from paddle.distributed.fleet.base import role_maker
paddle.enable_static()
class TestFleetLambMetaOptimizer(unittest.TestCase):
def setUp(self):
os.environ["PADDLE_TRAINER_ID"] = "1"
os.environ[
"PADDLE_TRAINER_ENDPOINTS"
] = "127.0.0.1:36001,127.0.0.1:36002"
def net(self, main_prog, startup_prog):
with fluid.program_guard(main_prog, startup_prog):
with fluid.unique_name.guard():
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.static.data(
name="y", shape=[-1, 1], dtype='int64'
)
fc_1 = paddle.static.nn.fc(
x=input_x, size=64, activation='tanh'
)
fc_2 = paddle.static.nn.fc(x=fc_1, size=256, activation='tanh')
prediction = paddle.static.nn.fc(
x=[fc_2], size=2, activation='softmax'
)
cost = paddle.nn.functional.cross_entropy(
input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
)
avg_cost = paddle.mean(x=cost)
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.lamb = True
strategy.lamb_configs = {
'lamb_weight_decay': 0.01,
'exclude_from_weight_decay': [],
}
return avg_cost, strategy
def test_lamb_optimizer(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
startup_prog = fluid.Program()
train_prog = fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
optimizer = paddle.fluid.optimizer.Adam(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops = [op.type for op in avg_cost.block.ops]
self.assertIn('lamb', ops)
def test_lamb_not_apply_with_momentum(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
startup_prog = fluid.Program()
train_prog = fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
optimizer = paddle.fluid.optimizer.Momentum(
learning_rate=0.1, momentum=0.9
)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops = [op.type for op in avg_cost.block.ops]
self.assertNotIn('lamb', ops)
def test_lamb_exclude_fn(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
startup_prog = fluid.Program()
train_prog = fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
optimizer = paddle.fluid.optimizer.Adam(learning_rate=0.01)
strategy.lamb_configs = {
'lamb_weight_decay': 0.01,
'exclude_from_weight_decay': ['.b_0'],
}
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops_without_wd = [
op
for op in avg_cost.block.ops
if op.type == 'lamb' and op.attr('op_role_var')[0].endswith('.b_0')
]
for op in ops_without_wd:
self.assertEqual(op.attr('weight_decay'), 0)
def test_lamb_apply_with_amp(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32')
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh')
prediction = paddle.static.nn.fc(x=[fc_2], size=2, activation='softmax')
cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(x=cost)
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.amp = True
strategy.amp_configs = {
"init_loss_scaling": 32768,
"decr_every_n_nan_or_inf": 2,
"incr_every_n_steps": 1000,
"incr_ratio": 2.0,
"use_dynamic_loss_scaling": True,
"decr_ratio": 0.5,
"custom_white_list": ['softmax'],
"custom_black_list": ['tanh'],
}
strategy.lamb = True
strategy.lamb_configs = {
'lamb_weight_decay': 0.01,
'exclude_from_weight_decay': [],
}
optimizer = paddle.fluid.optimizer.Adam(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops = [op.type for op in avg_cost.block.ops]
self.assertIn('lamb', ops)
self.assertIn('cast', ops)
self.assertIn('check_finite_and_unscale', ops)
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
Shixiaowei02.noreply@github.com
|
21e5c7969a5e8530ef51ea85feca0c3bfffdd174
|
5c1531b47fb4dc4d7e5998d44f7200bf1786b12b
|
/074_search_2d_matrix/search_2d_matrix.py
|
3c4f9a7a01a2733e9c54637410c7700ced156771
|
[] |
no_license
|
Web-Dev-Collaborative/Leetcode-JS-PY-MD
|
d1f560051aad1896a80eccdd4b4fbb389e7033e3
|
675b94fa5da8d40f0ea79efe6d3ef1393221425f
|
refs/heads/master
| 2023-09-01T22:30:32.313793
| 2021-10-26T02:17:03
| 2021-10-26T02:17:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
import bisect
class Solution:
# @param {integer[][]} matrix
# @param {integer} target
# @return {boolean}
def searchMatrix(self, matrix, target):
row = bisect.bisect_left([r[0] for r in matrix], target)
if row == len(matrix):
row = row - 1
else:
if matrix[row][0] == target:
return True
else:
row = row - 1
col = bisect.bisect_left(matrix[row], target)
if col == len(matrix[0]):
return False
else:
return matrix[row][col] == target
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
ad120a33f5a38c61f5bf51b963ca28fe9bb7181e
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/compute/azure-mgmt-avs/generated_samples/workload_networks_list_virtual_machines.py
|
5c0476b45004e198a88a5953076a48e7cd6a0852
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.avs import AVSClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-avs
# USAGE
python workload_networks_list_virtual_machines.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AVSClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.workload_networks.list_virtual_machines(
resource_group_name="group1",
private_cloud_name="cloud1",
)
for item in response:
print(item)
# x-ms-original-file: specification/vmware/resource-manager/Microsoft.AVS/stable/2022-05-01/examples/WorkloadNetworks_ListVirtualMachines.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
8b6353a0c90e0e69cd17cfbd061e0b152fd0363a
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/util/recognizer/DmgRecognizer.pyi
|
38a1f498d2fec2009f3d8d943ac5a215070df3d4
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
pyi
|
from typing import List
import ghidra.app.util.recognizer
import java.lang
class DmgRecognizer(object, ghidra.app.util.recognizer.Recognizer):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getPriority(self) -> int: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def numberOfBytesRequired(self) -> int: ...
def recognize(self, __a0: List[int]) -> unicode: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def priority(self) -> int: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
b401270ebad54df08a47715a824853f516267bdc
|
a3746020cf091f433beb41bde1b62818b4de569b
|
/past/rule_analysis/rule/text/check_using_revoke.py
|
b8b835f7f2fc5a709f3ae4af5bdc769296baefa9
|
[] |
no_license
|
kk71/sqlaudit
|
59bab5765a67f56f1dd2f3103812051c5acbbc49
|
747aaa02573a9c2b46a9e14415d27c0ab8e6158c
|
refs/heads/master
| 2023-02-04T18:38:46.125746
| 2020-06-05T09:49:46
| 2020-06-05T09:49:46
| 323,559,338
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
# Author: kk.Fang(fkfkbill@gmail.com)
import re
from .utils import judge_if_ddl
def execute_rule(sql, db_model=None, **kwargs):
if not judge_if_ddl(sql):
return False
if re.search('revoke\s+', sql, re.I):
return True
return False
|
[
"fkfkbill@gmail.com"
] |
fkfkbill@gmail.com
|
d8334689fb752ac8c03db533028e606d863cb0fe
|
ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3
|
/python/baiduads-sdk-auto/test/test_add_segment_bind_response_wrapper.py
|
2ac6343754d5d8b7139947919a07f681b7905ae0
|
[
"Apache-2.0"
] |
permissive
|
baidu/baiduads-sdk
|
24c36b5cf3da9362ec5c8ecd417ff280421198ff
|
176363de5e8a4e98aaca039e4300703c3964c1c7
|
refs/heads/main
| 2023-06-08T15:40:24.787863
| 2023-05-20T03:40:51
| 2023-05-20T03:40:51
| 446,718,177
| 16
| 11
|
Apache-2.0
| 2023-06-02T05:19:40
| 2022-01-11T07:23:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,100
|
py
|
"""
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.advancedsegmentbind.model.add_segment_bind_response_wrapper_body import AddSegmentBindResponseWrapperBody
from baiduads.common.model.api_response_header import ApiResponseHeader
globals()['AddSegmentBindResponseWrapperBody'] = AddSegmentBindResponseWrapperBody
globals()['ApiResponseHeader'] = ApiResponseHeader
from baiduads.advancedsegmentbind.model.add_segment_bind_response_wrapper import AddSegmentBindResponseWrapper
class TestAddSegmentBindResponseWrapper(unittest.TestCase):
"""AddSegmentBindResponseWrapper unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAddSegmentBindResponseWrapper(self):
"""Test AddSegmentBindResponseWrapper"""
# FIXME: construct object with mandatory attributes with example values
# model = AddSegmentBindResponseWrapper() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"jiangyuan04@baidu.com"
] |
jiangyuan04@baidu.com
|
13f1ee4a2434f217b952e800c62824c3622ae5c8
|
c3d4f7e811b39de9d6f8fa8b013ecd1c13ed46b1
|
/2018-01/01_Jan/19/_08_bases_.py
|
e3d2e736ed0920f3ea0979e26cc97440af0999d2
|
[
"Apache-2.0"
] |
permissive
|
z727354123/pyCharmTest
|
b58ebb78c3b51633ed6894009565ec84c8441509
|
577aad45c5bf7bef055db0788b9f480529a04186
|
refs/heads/master
| 2023-03-10T23:59:20.098707
| 2023-03-01T05:12:48
| 2023-03-01T05:12:48
| 99,561,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
# _*_ encoding=utf-8 _*__*_ encoding=utf-8 _*_
# 元组
class A:
_name = "A"
@classmethod
def printName(self):
print(self._name)
class B:
_name = "B"
class StartA(A, B):
pass
class StartB(B, A):
pass
print(StartA._name)
print(StartB._name)
StartA.printName()
StartB.printName()
print(A.__dict__)
class B:
_name = "B"
class BSon(B):
pass
print(B.__dict__)
print(BSon.__dict__)
print(BSon.__weakref__ is B.__weakref__)
|
[
"727354123@qq.com"
] |
727354123@qq.com
|
aff8d834ffd907d4713b0f8caee9d5a834be77ab
|
a1c8731a8527872042bd46340d8d3e6d47596732
|
/programming-laboratory-I/2r9q/jasei.py
|
7aa512c05333853cbb941be1c35fa9fe5bede241
|
[
"MIT"
] |
permissive
|
MisaelAugusto/computer-science
|
bbf98195b0ee954a7ffaf58e78f4a47b15069314
|
d21335a2dc824b54ffe828370f0e6717fd0c7c27
|
refs/heads/master
| 2022-12-04T08:21:16.052628
| 2020-08-31T13:00:04
| 2020-08-31T13:00:04
| 287,621,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
# coding: utf-8
# Aluno: Misael Augusto
# Matrícula: 117110525
# Problema: Já sei tocar essa música
def sei_tocar_musica(musica, acordes):
n = 0
for i in range(len(musica)):
for j in range(len(acordes)):
if musica[i] == acordes[j]:
n += 1
break
if n == len(musica):
return True
else:
return False
|
[
"misael.costa@ccc.ufcg.edu.br"
] |
misael.costa@ccc.ufcg.edu.br
|
3ae06584c472949daf17c71997368ef6a6d112a0
|
59ac1d0f09ebfb527701031f3ab2cfbfb8055f51
|
/soapsales/basedata/serializers.py
|
502074108cd066ca5c0ee996f46d869ec1458bb7
|
[] |
no_license
|
DUMBALINYOLO/erpmanu
|
d4eb61b66cfa3704bd514b58580bdfec5639e3b0
|
db979bafcc7481f60af467d1f48d0a81bbbfc1aa
|
refs/heads/master
| 2023-04-28T13:07:45.593051
| 2021-05-12T09:30:23
| 2021-05-12T09:30:23
| 288,446,097
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
from rest_framework import serializers
from .models import (
Note,
Organization,
UnitOfMeasure
)
class NoteSerializer(serializers.ModelSerializer):
class Meta:
model = Note
fields = "__all__"
class OrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = Organization
fields = "__all__"
class UnitOfMeasureSerializer(serializers.ModelSerializer):
class Meta:
model = UnitOfMeasure
fields = ['symbol', 'verbose_name', 'scale_factor', 'unit_type']
class UnitOfMeasureSerializer(serializers.ModelSerializer):
class Meta:
model = UnitOfMeasure
fields = [
'symbol',
'verbose_name',
'scale_factor',
'unit_type'
]
class UnitOfMeasureListSerializer(serializers.ModelSerializer):
unit_type = serializers.SerializerMethodField()
class Meta:
model = UnitOfMeasure
fields = [
'symbol',
'verbose_name',
'scale_factor',
'unit_type'
]
def get_unit_type(self, obj):
return obj.get_unit_type_display()
|
[
"baridzimaximillem@gmail.com"
] |
baridzimaximillem@gmail.com
|
19b006dc67b046dbbd412173f9ecb217d47c117f
|
3c797162b544aba5122c8eb85dddd3089f462065
|
/vgg.py
|
44c590d966b6ad101198d73f5ae3b7309651c99e
|
[
"Apache-2.0"
] |
permissive
|
MorvanZhou/Computer-Vision
|
cf5f6dfbc0dd534172f67d812874c72b8fccb75e
|
f0cb97c099ed4ec363c72ee8aae8c93315bef276
|
refs/heads/main
| 2023-02-06T12:05:29.553579
| 2020-12-24T12:01:09
| 2020-12-24T12:01:09
| 324,119,891
| 32
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,862
|
py
|
# [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
# dependency file: https://github.com/MorvanZhou/Computer-Vision/requirements.txt
from tensorflow import keras
from tensorflow.keras import layers
from utils import load_mnist, save_model_structure, save_model_weights
# get data
(x_train, y_train), (x_test, y_test) = load_mnist()
# define model
# like LeNet with more layers and activations
model = keras.Sequential([
layers.Conv2D(filters=8, kernel_size=3, strides=1, padding="same", input_shape=(28, 28, 1)), # [n, 28, 28, 8]
layers.Conv2D(filters=8, kernel_size=3, strides=1, padding="same", input_shape=(28, 28, 1)), # [n, 28, 28, 8]
layers.ReLU(),
layers.MaxPool2D(pool_size=2, strides=2), # [n, 14, 14, 8]
layers.Conv2D(16, 3, 1, "same"), # [n, 14, 14, 16]
layers.Conv2D(16, 3, 1, "same"), # [n, 14, 14, 16]
layers.ReLU(),
layers.MaxPool2D(2, 2), # [n, 7, 7, 16]
layers.Flatten(), # [n, 7*7*16]
layers.Dense(32), # [n, 32]
layers.ReLU(),
layers.Dense(10) # [n, 32]
], name="VGG")
# show model
model.summary()
save_model_structure(model)
# define loss and optimizer
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
opt = keras.optimizers.Adam(0.001)
accuracy = keras.metrics.SparseCategoricalAccuracy()
model.compile(optimizer=opt, loss=loss, metrics=[accuracy])
# training and validation
model.fit(x=x_train, y=y_train, batch_size=32, epochs=3, validation_data=(x_test, y_test))
# save model
save_model_weights(model)
|
[
"morvanzhou@hotmail.com"
] |
morvanzhou@hotmail.com
|
8473cf96f72a7fa8bc7ff272a5dc599372da20de
|
00c2e8163b2292348ac8337462e71e665039044b
|
/article/migrations/0004_auto_20200730_1618.py
|
c6b277529da7a46020789ec39abb33e7151d9d33
|
[] |
no_license
|
InjiChoi/third_crud
|
6666c8ad4e0c6d40555b0a5c6a5a82fe45b54cc0
|
541f135dc1a328be35aa404ea28ef1583e5ba8f3
|
refs/heads/master
| 2022-11-29T06:13:14.602168
| 2020-08-13T07:28:35
| 2020-08-13T07:28:35
| 287,207,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
# Generated by Django 2.2.9 on 2020-07-30 07:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0003_comment'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='article',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='article.Article'),
),
]
|
[
"chldlswl1212@gmail.com"
] |
chldlswl1212@gmail.com
|
bea5f7c08bd226ba60f7bb07cbfa676a5538d5bf
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/EFwDXErjDywXp56WG_11.py
|
ccaa28a482253e024a286e75e335e487bfb65c08
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
"""
Create a function that takes a string and returns `True` or `False`, depending
on whether the characters are in order or not.
### Examples
is_in_order("abc") ➞ True
is_in_order("edabit") ➞ False
is_in_order("123") ➞ True
is_in_order("xyzz") ➞ True
### Notes
You don't have to handle empty strings.
"""
def is_in_order(txt):
x = sorted(txt)
x = "".join(x)
print(x)
return x == txt
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
c33158dd9ad65177efc24faced5f5bd61a02f722
|
cc9a87e975546e2ee2957039cceffcb795850d4f
|
/HelloAdam/HelloAdam20TeknikLooping/__init__.py
|
1aa7fb69d6a33a47a6b599344069d22eb1132404
|
[] |
no_license
|
CodeHunterDev/Belajar-Python
|
304d3243801b91b3605d2b9bd09e49a30735e51b
|
9dd2ffb556eed6b2540da19c5f206fedb218ae99
|
refs/heads/master
| 2023-03-19T22:12:46.330272
| 2020-02-04T08:02:00
| 2020-02-04T08:02:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,081
|
py
|
# Copyright (c) 2020. Adam Arthur Faizal
from sys import copyright
print("====== TEKNIK LOOPING ======\n")
hewan = ["Kucing", "Kelinci", "Sapi", "Ular", "Burung"]
print("Daftar hewan :", hewan)
buah = ["Mangga", "Stroberi", "Pepaya", "Melon"]
print("Daftar buah :", buah)
# Enumerate
print("--- Enumerate ---")
for nomer, nama in enumerate(hewan):
print(nomer + 1, ":", nama)
# Zip
print("--- Zip ---")
for namahewan, namabuah in zip(hewan, buah):
print(namahewan, ":", namabuah)
# Jika diterapkan pada set dan dictionary
# Set
barang = {"TV", "Lemari", "Meja", "Kursi", "Kipas Angin"}
print("Daftar barang :", barang)
for namabarang in sorted(barang):
print(namabarang)
# Dictionary
playlist = {"Linkin Park": "In The End", "Avenged Sevenfold": "So Far Away", "Maroon 5": "Payphone", "Slipknot": "Snuff", "Asking Alexandria": "Moving On"}
for band, lagu in enumerate(playlist.items()):
print(band, "-", lagu)
# Lain-lain
print("--- Lain-lain ---")
for i in reversed(range(1, 10, 1)):
print(i)
print('\n')
print(copyright)
# by Mbah Putih Mulyosugito
|
[
"adam.faizal.af6@gmail.com"
] |
adam.faizal.af6@gmail.com
|
479662d20de07396bd35fcfe7f488ed18826fad5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02855/s718736811.py
|
8ddd9b2385d3289e1813e674e14423b272d3ea98
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
import sys
input = sys.stdin.readline
def main():
H, W, K = map(int, input().split())
cake = [[0]*W for _ in range(H)]
sb = []
for y in range(H):
s = input().strip()
for x, c in enumerate(s):
if c == "#":
sb.append((y, x))
for i, (y, x) in enumerate(sb):
cake[y][x] = i + 1
for i, s in enumerate(sb):
i += 1
y = s[0]
x0 = x1 = s[1]
for x in range(s[1]-1, -1, -1):
if cake[y][x] != 0:
break
cake[y][x] = i
x0 = x
for x in range(s[1]+1, W):
if cake[y][x] != 0:
break
cake[y][x] = i
x1 = x
for y in range(s[0]-1, -1, -1):
if cake[y][x0:x1+1].count(0) != x1-x0+1:
break
for x in range(x0, x1+1):
cake[y][x] = i
for y in range(s[0]+1, H):
if cake[y][x0:x1+1].count(0) != x1-x0+1:
break
for x in range(x0, x1+1):
cake[y][x] = i
for c in cake:
print(*c)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
a82bee3b593d95974bad78f55ee6156122db8fd3
|
f998a574343292d050777f616b408a74fde05738
|
/eshop_docker/eshop/extra_apps/social_core/pipeline/mail.py
|
0011dff7ec36b703d0f114d0e63bcffcaea7276f
|
[] |
no_license
|
Boomshakal/Django
|
7987e0572fc902bd56360affea0b5087a4cb04a7
|
a149691c472eab3440028bf2460cd992acec0f8a
|
refs/heads/master
| 2023-01-11T06:16:29.283428
| 2022-12-23T08:00:05
| 2022-12-23T08:00:05
| 199,360,433
| 0
| 0
| null | 2020-06-06T09:37:02
| 2019-07-29T02:01:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
from ..exceptions import InvalidEmail
from .partial import partial
@partial
def mail_validation(backend, details, is_new=False, *args, **kwargs):
requires_validation = backend.REQUIRES_EMAIL_VALIDATION or \
backend.setting('FORCE_EMAIL_VALIDATION', False)
send_validation = details.get('email') and \
(is_new or backend.setting('PASSWORDLESS', False))
if requires_validation and send_validation:
data = backend.strategy.request_data()
if 'verification_code' in data:
backend.strategy.session_pop('email_validation_address')
if not backend.strategy.validate_email(details['email'],
data['verification_code']):
raise InvalidEmail(backend)
else:
current_partial = kwargs.get('current_partial')
backend.strategy.send_email_validation(backend,
details['email'],
current_partial.token)
backend.strategy.session_set('email_validation_address',
details['email'])
return backend.strategy.redirect(
backend.strategy.setting('EMAIL_VALIDATION_URL')
)
|
[
"362169885@qq.com"
] |
362169885@qq.com
|
f5003698eeb746238b83debfde8aa260cb48bdfd
|
ec56e3a57fb71f3fc4f19b168d3fa34cebb781ab
|
/tcga_encoder/analyses/old/tsne_from_z_space.py
|
34915972bbefbbd33094bd0c0e2d2d99481dc0c7
|
[
"MIT"
] |
permissive
|
tedmeeds/tcga_encoder
|
64d60148b0c69092cb499abec22618e740ba8b6c
|
805f9a5bcc422a43faea45baa0996c88d346e3b4
|
refs/heads/master
| 2021-01-13T04:50:42.643743
| 2017-08-25T13:09:38
| 2017-08-25T13:09:38
| 78,621,753
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,225
|
py
|
from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
from tcga_encoder.definitions.tcga import *
#from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
#from tcga_encoder.algorithms import *
import seaborn as sns
from sklearn.manifold import TSNE, locally_linear_embedding
def main( data_location, results_location ):
data_path = os.path.join( HOME_DIR ,data_location ) #, "data.h5" )
results_path = os.path.join( HOME_DIR, results_location )
data_filename = os.path.join( data_path, "data.h5")
fill_filename = os.path.join( results_path, "full_vae_fill.h5" )
tsne_dir = os.path.join( results_path, "tsne" )
check_and_mkdir(tsne_dir)
print "HOME_DIR: ", HOME_DIR
print "data_filename: ", data_filename
print "fill_filename: ", fill_filename
print "LOADING stores"
data_store = pd.HDFStore( data_filename, "r" )
fill_store = pd.HDFStore( fill_filename, "r" )
Z_train = fill_store["/Z/TRAIN/Z/mu"]
Z_val = fill_store["/Z/VAL/Z/mu"]
tissue = data_store["/CLINICAL/TISSUE"]
barcodes_train = Z_train.index.values
tissue_train = data_store["/CLINICAL/TISSUE"].loc[barcodes_train]
tissues = tissue_train.columns
tissue_idx = np.argmax( tissue_train.values, 1 )
#pdb.set_trace()
#class sklearn.manifold.TSNE(n_components=2, perplexity=30.0, early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000, n_iter_without_progress=30, min_grad_norm=1e-07, metric='euclidean', init='random', verbose=0, random_state=None, method='barnes_hut', angle=0.5)
print "Running TSNE"
Z_normed = Z_train.values# - Z_train.values.mean(0)
#Z_normed = Z_normed - Z_train.values.std(0)
Z_normed = Z_normed[:,:100]
perplexity=30
nbr = 2000
np.random.seed(1)
I = np.random.permutation( len(Z_normed ))[:nbr]
tsne = TSNE(n_components=3,verbose=1, learning_rate=1000, perplexity=perplexity, method='exact')
#embedded,dummy = locally_linear_embedding(Z_normed[I,:], n_neighbors=10, n_components=4)
n_components =5
w = np.random.randn( Z_normed.shape[1],n_components)
embedded = np.dot( Z_normed[I,:], w )
#pdb.set_trace()
np.savetxt( tsne_dir + "/z.csv", Z_normed[I,:], fmt='%.3f',delimiter=',')
labels = [tissues[idx] for idx in tissue_idx[I]]
np.savetxt( tsne_dir + "/labels.csv", labels, fmt='%s',delimiter=',')
embedded = tsne.fit_transform( embedded )
print "DONE!"
# z_2d = bh_sne(Z_n,perplexity=30)
colors = "bgrkycmbgrkycmbgrkycmbgrkycmbgrkycmbgrkycmbgrkycmbgrkycmbgrkycm"
markers = "ooooooosssssssvvvvvvvppppppphhhhhhhDDDDDDDooooooosssssssvvvvvvvppppppphhhhhhhDDDDDDD"
pp.figure( figsize=(12,12))
for t_idx in range( len(tissues) ):
ids = tissue_idx[I] == t_idx
#'o', mec="r", mew="2",ms=30,fillstyle="none"
if len(ids) >=10:
pp.plot( embedded[ids,:][:10,0], embedded[ids,:][:10,1], markers[t_idx], mec=colors[t_idx], mew="2", ms=10, fillstyle="none", alpha=0.5 )
#pp.show()
pp.savefig( tsne_dir + "/tsne_perplexity_%d.png"%(perplexity), format='png', dpi=300 )
#pdb.set_trace()
if __name__ == "__main__":
data_location = sys.argv[1]
results_location = sys.argv[2]
main( data_location, results_location )
|
[
"tmeeds@gmail.com"
] |
tmeeds@gmail.com
|
9aafc60a18e81c3c46b7f61ce8584b6642ac2b85
|
5f86944bdf1b810a84c63adc6ed01bbb48d2c59a
|
/kubernetes/client/models/v1alpha1_cluster_role_binding.py
|
0a4350d713ee282f409da55e0d78e80cd0581b15
|
[
"Apache-2.0"
] |
permissive
|
m4ttshaw/client-python
|
384c721ba57b7ccc824d5eca25834d0288b211e2
|
4eac56a8b65d56eb23d738ceb90d3afb6dbd96c1
|
refs/heads/master
| 2021-01-13T06:05:51.564765
| 2017-06-21T08:31:03
| 2017-06-21T08:31:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,473
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1alpha1ClusterRoleBinding(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, kind=None, metadata=None, role_ref=None, subjects=None):
"""
V1alpha1ClusterRoleBinding - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'role_ref': 'V1alpha1RoleRef',
'subjects': 'list[V1alpha1Subject]'
}
self.attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'role_ref': 'roleRef',
'subjects': 'subjects'
}
self._api_version = api_version
self._kind = kind
self._metadata = metadata
self._role_ref = role_ref
self._subjects = subjects
@property
def api_version(self):
"""
Gets the api_version of this V1alpha1ClusterRoleBinding.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1alpha1ClusterRoleBinding.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1alpha1ClusterRoleBinding.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1alpha1ClusterRoleBinding.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1alpha1ClusterRoleBinding.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1alpha1ClusterRoleBinding.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1alpha1ClusterRoleBinding.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1alpha1ClusterRoleBinding.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1alpha1ClusterRoleBinding.
Standard object's metadata.
:return: The metadata of this V1alpha1ClusterRoleBinding.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1alpha1ClusterRoleBinding.
Standard object's metadata.
:param metadata: The metadata of this V1alpha1ClusterRoleBinding.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def role_ref(self):
"""
Gets the role_ref of this V1alpha1ClusterRoleBinding.
RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.
:return: The role_ref of this V1alpha1ClusterRoleBinding.
:rtype: V1alpha1RoleRef
"""
return self._role_ref
@role_ref.setter
def role_ref(self, role_ref):
"""
Sets the role_ref of this V1alpha1ClusterRoleBinding.
RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.
:param role_ref: The role_ref of this V1alpha1ClusterRoleBinding.
:type: V1alpha1RoleRef
"""
if role_ref is None:
raise ValueError("Invalid value for `role_ref`, must not be `None`")
self._role_ref = role_ref
@property
def subjects(self):
"""
Gets the subjects of this V1alpha1ClusterRoleBinding.
Subjects holds references to the objects the role applies to.
:return: The subjects of this V1alpha1ClusterRoleBinding.
:rtype: list[V1alpha1Subject]
"""
return self._subjects
@subjects.setter
def subjects(self, subjects):
"""
Sets the subjects of this V1alpha1ClusterRoleBinding.
Subjects holds references to the objects the role applies to.
:param subjects: The subjects of this V1alpha1ClusterRoleBinding.
:type: list[V1alpha1Subject]
"""
if subjects is None:
raise ValueError("Invalid value for `subjects`, must not be `None`")
self._subjects = subjects
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1alpha1ClusterRoleBinding):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
2a51fc184829e1da5455b36180993db24eb91034
|
d0ca6faf4b672be1d97b6cf6302430a3dc970895
|
/apps/spark/src/spark/migrations/0001_initial.py
|
f94b6466d45d50367513e891f3d02575f4ac7780
|
[
"Apache-2.0"
] |
permissive
|
jesman/hue
|
8aaea0a6134e1624c12145159fae94d6e01e5db4
|
21edfc1b790510e512216ab5cc8aeb1a84255de3
|
refs/heads/master
| 2021-01-14T13:48:06.054767
| 2013-11-26T22:26:32
| 2013-11-26T23:10:41
| 14,733,058
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,825
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SparkScript'
db.create_table('spark_sparkscript', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('data', self.gf('django.db.models.fields.TextField')(default='{"name": "", "parameters": [], "script": "", "hadoopProperties": [], "type": "python", "properties": [], "resources": [], "job_id": null}')),
))
db.send_create_signal('spark', ['SparkScript'])
def backwards(self, orm):
# Deleting model 'SparkScript'
db.delete_table('spark_sparkscript')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'desktop.document': {
'Meta': {'object_name': 'Document'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'extra': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'default': "''"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'doc_owner'", 'to': "orm['auth.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['desktop.DocumentTag']", 'db_index': 'True', 'symmetrical': 'False'}),
'version': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
'desktop.documenttag': {
'Meta': {'object_name': 'DocumentTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'tag': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'spark.sparkscript': {
'Meta': {'object_name': 'SparkScript'},
'data': ('django.db.models.fields.TextField', [], {'default': '\'{"name": "", "parameters": [], "script": "", "hadoopProperties": [], "type": "python", "properties": [], "resources": [], "job_id": null}\''}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['spark']
|
[
"romain@cloudera.com"
] |
romain@cloudera.com
|
3180b25bad018eec1119e875c24d615762cfd99d
|
7add1f8fc31b09bb79efd2b25cc15e23666c1d1d
|
/tfx/dsl/context_managers/context_manager_test.py
|
6cd12f340655d602f1e13a0924a240f2424f8509
|
[
"Apache-2.0"
] |
permissive
|
twitter-forks/tfx
|
b867e9fee9533029ca799c4a4c5d1c5430ba05fe
|
cb3561224c54a5dad4d5679165d5b3bafc8b451b
|
refs/heads/master
| 2021-11-19T18:45:09.157744
| 2021-10-19T00:02:34
| 2021-10-19T00:02:34
| 205,426,993
| 2
| 1
|
Apache-2.0
| 2021-10-18T21:03:50
| 2019-08-30T17:21:03
|
Python
|
UTF-8
|
Python
| false
| false
| 7,687
|
py
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.dsl.context_manager."""
import threading
from typing import Dict, Any
import tensorflow as tf
from tfx.dsl.components.base import base_node
from tfx.dsl.context_managers import context_manager
class _FakeNode(base_node.BaseNode):
@property
def inputs(self) -> Dict[str, Any]:
return {}
@property
def outputs(self) -> Dict[str, Any]:
return {}
@property
def exec_properties(self) -> Dict[str, Any]:
return {}
class _FakeContext(context_manager.DslContext):
pass
class _FakeContextManager(context_manager.DslContextManager):
def create_context(self) -> _FakeContext:
return _FakeContext()
def enter(self, context: _FakeContext) -> _FakeContext:
return context
class ContextManagerTest(tf.test.TestCase):
def reset_registry(self) -> context_manager._DslContextRegistry:
result = context_manager._registry = context_manager._DslContextRegistry()
return result
def testContext_ContextIdAttrFactory(self):
# ID is in format <classname>:<num> where <num> is incremental
# regardless of the depth of the contexts.
self.reset_registry()
with _FakeContextManager() as c1:
self.assertEqual(c1.id, '_FakeContext:1')
with _FakeContextManager() as c2:
self.assertEqual(c2.id, '_FakeContext:2')
with _FakeContextManager() as c3:
self.assertEqual(c3.id, '_FakeContext:3')
with _FakeContextManager() as c4:
self.assertEqual(c4.id, '_FakeContext:4')
# ID count is reset after registry reset.
self.reset_registry()
with _FakeContextManager() as c5:
self.assertEqual(c5.id, '_FakeContext:1')
def testContext_ParentAttrFactory(self):
registry = self.reset_registry()
bg = registry.background_context
with _FakeContextManager() as c1:
self.assertIs(c1.parent, bg)
with _FakeContextManager() as c2:
self.assertIs(c2.parent, c1)
with _FakeContextManager() as c3:
self.assertEqual(c3.parent, c2)
with _FakeContextManager() as c4:
self.assertIs(c4.parent, c1)
def testContext_Ancestors(self):
registry = self.reset_registry()
bg = registry.background_context
self.assertEqual(list(bg.ancestors), [])
with _FakeContextManager() as c1:
self.assertEqual(list(c1.ancestors), [bg])
with _FakeContextManager() as c2:
self.assertEqual(list(c2.ancestors), [bg, c1])
with _FakeContextManager() as c3:
self.assertEqual(list(c3.ancestors), [bg, c1, c2])
with _FakeContextManager() as c4:
self.assertEqual(list(c4.ancestors), [bg, c1])
def testRegistry_AllContexts(self):
registry = self.reset_registry()
bg = registry.background_context
self.assertEqual(registry.all_contexts, [bg])
with _FakeContextManager() as c1:
self.assertEqual(registry.all_contexts, [bg, c1])
with _FakeContextManager() as c2:
self.assertEqual(registry.all_contexts, [bg, c1, c2])
with _FakeContextManager() as c3:
self.assertEqual(registry.all_contexts, [bg, c1, c2, c3])
with _FakeContextManager() as c4:
self.assertEqual(registry.all_contexts, [bg, c1, c2, c3, c4])
def testRegistry_ActiveContexts(self):
registry = self.reset_registry()
bg = registry.background_context
self.assertEqual(registry.active_contexts, [bg])
with _FakeContextManager() as c1:
self.assertEqual(registry.active_contexts, [bg, c1])
with _FakeContextManager() as c2:
self.assertEqual(registry.active_contexts, [bg, c1, c2])
with _FakeContextManager() as c3:
self.assertEqual(registry.active_contexts, [bg, c1, c2, c3])
with _FakeContextManager() as c4:
self.assertEqual(registry.active_contexts, [bg, c1, c4])
def testRegistry_NodeAndContextAssociations(self):
registry = self.reset_registry()
bg = registry.background_context
n0 = _FakeNode()
with _FakeContextManager() as c1:
n1 = _FakeNode()
with _FakeContextManager() as c2:
n2 = _FakeNode()
with _FakeContextManager() as c3:
n3 = _FakeNode()
with _FakeContextManager() as c4:
n4 = _FakeNode()
# Associated nodes for each context
self.assertEqual(registry.get_nodes(bg), [n0, n1, n2, n3, n4])
self.assertEqual(registry.get_nodes(c1), [n1, n2, n3, n4])
self.assertEqual(registry.get_nodes(c2), [n2, n3])
self.assertEqual(registry.get_nodes(c3), [n3])
self.assertEqual(registry.get_nodes(c4), [n4])
# Convenient property for calling registry.get_nodes()
self.assertEqual(bg.nodes, [n0, n1, n2, n3, n4])
self.assertEqual(c1.nodes, [n1, n2, n3, n4])
self.assertEqual(c2.nodes, [n2, n3])
self.assertEqual(c3.nodes, [n3])
self.assertEqual(c4.nodes, [n4])
# Associated contexts for each node
self.assertEqual(registry.get_contexts(n0), [bg])
self.assertEqual(registry.get_contexts(n1), [bg, c1])
self.assertEqual(registry.get_contexts(n2), [bg, c1, c2])
self.assertEqual(registry.get_contexts(n3), [bg, c1, c2, c3])
self.assertEqual(registry.get_contexts(n4), [bg, c1, c4])
def testContextManager_EnterMultipleTimes(self):
cm = _FakeContextManager()
with cm as c1:
pass
with cm as c2:
self.assertNotEqual(c1, c2)
with cm as c3:
self.assertNotEqual(c2, c3)
def testContextManager_EnterReturnValue(self):
class UltimateContextManager(_FakeContextManager):
def enter(self, context: _FakeContext) -> int:
return 42
with UltimateContextManager() as captured:
self.assertEqual(captured, 42)
def testRegistry_MultiThreads(self):
num_threads = 2
b = threading.Barrier(num_threads)
def test():
with _FakeContextManager() as c1:
self.assertEqual(c1.id, '_FakeContext:1')
b.wait()
with _FakeContextManager() as c2:
self.assertEqual(c2.id, '_FakeContext:2')
self.assertEqual(c2.parent, c1)
threads = [threading.Thread(target=test) for i in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join() # Expects no unhandled exceptions.
def testGetNodes(self):
self.reset_registry()
n0 = _FakeNode()
with _FakeContextManager() as c1:
n1 = _FakeNode()
with _FakeContextManager() as c2:
n2 = _FakeNode()
with _FakeContextManager() as c3:
n3 = _FakeNode()
with _FakeContextManager() as c4:
n4 = _FakeNode()
with self.subTest('With argument'):
self.assertEqual(context_manager.get_nodes(c1), [n1, n2, n3, n4])
self.assertEqual(context_manager.get_nodes(c2), [n2, n3])
self.assertEqual(context_manager.get_nodes(c3), [n3])
self.assertEqual(context_manager.get_nodes(c4), [n4])
with self.subTest('Without argument'):
# get_nodes() without argument queries nodes for background context,
# which works as a node registry
self.assertEqual(context_manager.get_nodes(), [n0, n1, n2, n3, n4])
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow-extended-nonhuman@googlegroups.com"
] |
tensorflow-extended-nonhuman@googlegroups.com
|
91cc85e0ffa21fb97bb0ff8de417a2d95386a351
|
2c4648efe8c7e408b8c3a649b2eed8bb846446ec
|
/codewars/Python/6 kyu/EqualSidesOfAnArray/find_even_index_test.py
|
8b6028dd54d750990bb81debe7dec85cc4fd15a3
|
[] |
no_license
|
Adasumizox/ProgrammingChallenges
|
9d79bd1b0ce4794b576124f9874aabb86d5c0713
|
3630fcde088d7991e344eb1b84805e9e756aa1a2
|
refs/heads/master
| 2021-07-16T08:16:57.538577
| 2020-07-19T19:58:28
| 2020-07-19T19:58:28
| 190,159,085
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,832
|
py
|
from find_even_index import find_even_index
import unittest
from random import randint
class TestFindEvenIndex(unittest.TestCase):
def test(self):
self.assertEqual(find_even_index([1,2,3,4,3,2,1]), 3, 'Basic test')
self.assertEqual(find_even_index([1,100,50,-51,1,1]), 1, 'Basic test')
self.assertEqual(find_even_index([1,2,3,4,5,6]), -1, 'Basic test')
self.assertEqual(find_even_index([20,10,30,10,10,15,35]), 3, 'Basic test')
self.assertEqual(find_even_index([20,10,-80,10,10,15,35]), 0, 'Basic test')
self.assertEqual(find_even_index([10,-80,10,10,15,35,20]), 6, 'Basic test')
self.assertEqual(find_even_index(range(1,100)),-1, 'Basic test')
self.assertEqual(find_even_index([0,0,0,0,0]), 0, 'Basic test')
self.assertEqual(find_even_index([-1,-2,-3,-4,-3,-2,-1]), 3, 'Basic test')
self.assertEqual(find_even_index(range(-100,-1)), -1, 'Basic test')
def test_rand(self):
find_even_sol=lambda arr, l=0, r="null", i=0: (lambda r: -1 if i>=len(arr) else i if r==l else find_even_sol(arr, l+arr[i], r-(0 if i+1>=len(arr) else arr[i+1]), i+1))(r if r!="null" else sum(arr[1:]))
contract=lambda arr: (lambda pos: arr[:pos]+[sum(arr[pos:])])(randint(0,len(arr)-1))
for _ in range(40):
left=[randint(-20, 20) for qu in range(randint(10,20))]
right=left[:]
if randint(0,1): left[randint(0,len(left)-1)]+=randint(-20,20)
left=sorted(contract(left), key=lambda a: randint(1,1000))
right=sorted(contract(right), key=lambda a: randint(1,1000))
arr=([]+left[:]+[randint(-20,20)]+right[:])[:]
self.assertEqual(find_even_index(arr[:]), find_even_sol(arr), "It should work for random inputs too")
if __name__ == '__main__':
unittest.main()
|
[
"darkdan099@gmail.com"
] |
darkdan099@gmail.com
|
645e0aebaab4b939d2f1eaa75c24879619c3c55e
|
73d5f8918e7933f31a1ead5f23000989ff8d1445
|
/buildbuild/api/tests/test_api_team_list_search.py
|
f6b2e34370b2cc2fb16426f7cd16ea1c0462076e
|
[
"BSD-3-Clause"
] |
permissive
|
wikibootup/buildbuild
|
c8f1d3f241131059315ba1ca368bfa173449ee9a
|
7629b3ac172803d474af312f22eb4a9631342ed3
|
refs/heads/master
| 2021-01-18T07:22:25.530180
| 2014-11-20T19:03:23
| 2014-11-20T19:03:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
from django.test import TestCase
from django.test.client import Client
from teams.models import Team
class TestAPITeamListSearch(TestCase):
def setUp(self):
self.test_string = "test_string"
self.team_with_test_string = Team.objects.create_team(
name="team_name_with_" + self.test_string,
# prefix 'test_' is excluded in this test case
# because of model validation ( max_length=30 on Team.name )
)
self.team_without_test_string = Team.objects.create_team(
name="team_name_without_", # + self.test_string,
)
self.client = Client()
self.response = self.client.get("/api/teams/?search=" + self.test_string)
def test_api_team_list_search_should_return_valid_result(self):
self.assertContains(self.response, self.team_with_test_string.name)
self.assertNotContains(self.response, self.team_without_test_string.name)
|
[
"dobestan@gmail.com"
] |
dobestan@gmail.com
|
8542295dc0339d09a52885dcaa9e153216812568
|
ffb05b145989e01da075e2a607fb291955251f46
|
/mtraits/strait.py
|
8a9a1b7d9c0d49bab9a31ca2e20f96ba6118265e
|
[] |
no_license
|
micheles/papers
|
a5e7f2fa0cf305cd3f8face7c7ecc0db70ce7cc7
|
be9070f8b7e8192b84a102444b1238266bdc55a0
|
refs/heads/master
| 2023-06-07T16:46:46.306040
| 2018-07-14T04:17:51
| 2018-07-14T04:17:51
| 32,264,461
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,528
|
py
|
__version__ = '0.5.3'
__all__ = ['__version__', 'include', 'MetaTOS']
import inspect
import types
import warnings
class OverridingError(NameError):
pass
class OverridingWarning(Warning):
pass
class Super(object):
# this is needed to fix a shortcoming of unbound super objects,
# i.e. this is how the unbound version of super should work
def __init__(self, thisclass):
self.__thisclass__ = thisclass
def __get__(self, obj, objcls):
return super(self.__thisclass__, obj or objcls)
def oldstyle(*bases):
"Return True if there are no bases or all bases are old-style"
return not bases or set(map(type, bases)) == set([types.ClassType])
class Namespace(dict):
"A named dictionary containing the attribute of a class and its ancestors"
@classmethod
def from_cls(klass, cls):
if oldstyle(cls):
mro = inspect.getmro(cls)
else:
mro = cls.__mro__[:-1] # all except object
dic = merge(subc.__dict__ for subc in reversed(mro))
return klass(cls.__name__, dic)
def __init__(self, name, attrs):
self.__name__ = name
self.update(attrs)
def merge(dicts):
"""Merge a sequence of dictionaries. In case of name clashes,
the last dict in the sequence wins."""
dic = {}
for d in dicts:
dic.update(d)
return dic
class MetaTOS(type):
"The metaclass of the Trait Object System"
def __new__(mcl, name, bases, dic):
if len(bases) > 1:
raise TypeError(
'Multiple inheritance of bases %s is forbidden for TOS classes'
% str(bases))
elif oldstyle(*bases): # converts into new-style
bases += (object,)
cls = mcl.__super.__new__(mcl, name, bases, dic)
setattr(cls, '_%s__super' % name, Super(cls))
return cls
MetaTOS._MetaTOS__super = Super(MetaTOS)
def find_common_names(namespaces):
"Perform n*(n-1)/2 namespace overlapping checks on a set of n namespaces"
n = len(namespaces)
if n <= 1:
return
names = map(set, namespaces)
for i in range(0, n):
for j in range(i+1, n):
ci, cj = namespaces[i], namespaces[j]
common = names[i] & names[j]
if common:
yield common, ci, cj
def check_overridden(namespaces, exclude=frozenset(), raise_='error'):
"Raise an OverridingError for common names not in the exclude set"
for common, n1, n2 in find_common_names(namespaces):
overridden = ', '.join(common - exclude)
if overridden:
msg = '%s overrides names in %s: {%s}' % (
n1.__name__, n2.__name__, overridden)
if raise_ == 'error':
raise OverridingError(msg)
elif raise_ == 'warning':
warnings.warn(msg, OverridingWarning, stacklevel=2)
known_metas = set([MetaTOS])
def get_right_meta(metatos, bases):
# there is only one base because of the single-inheritance constraint
try:
base = bases[0]
except IndexError:
base = object
meta = type(base)
if meta in (types.ClassType, type): # is a builtin meta
return metatos
elif any(issubclass(meta, m) for m in known_metas):
return meta
# meta is independent from all known_metas, make a new one with
# __new__ method coming from MetaTOS
newmeta = type(
'_TOS' + meta.__name__, (meta,), dict(__new__=metatos.__new__))
setattr(newmeta, '_%s__super' % metatos.__name__, Super(newmeta))
known_metas.add(newmeta)
return newmeta
exclude_attrs = set('__doc__ __module__ __dict__ __weakref__'.split())
def new(metatos, name, bases, attrs, traits):
# traits as in Squeak take the precedence over the base class
# but they are overridden by attributes in the class
namespaces = map(Namespace.from_cls, traits)
check_overridden(namespaces, exclude=set(attrs) | exclude_attrs)
meta = get_right_meta(metatos, bases)
cls = meta(name, bases, merge(namespaces + [Namespace(name, attrs)]))
cls.__traits__ = traits
for t in traits:
setattr(cls, '_%s__super' % t.__name__, Super(cls))
return cls
def include(*traits, **kw):
"Returns a class factory"
metatos = kw.get('MetaTOS', MetaTOS) # other kw free for future extensions
def makecls(name, bases, dic):
return new(metatos, name, bases, dic, traits)
makecls.__name__ = 'include_%s' % '_'.join(m.__name__ for m in traits)
return makecls
|
[
"michele.simionato@gmail.com"
] |
michele.simionato@gmail.com
|
ceee035fae6adfbdcf3ec2a990ba6b0f0ef0fa01
|
7e145d1fff87cdabf7c9ae9c08637f299fbd3849
|
/8. String to Integer (atoi).py
|
e2c542ddf189458ca878044d1437b0bbd14f4fa0
|
[] |
no_license
|
dundunmao/LeetCode2019
|
2b39ef181a7f66efc9de7d459b11eb1a4a7f60a8
|
9b38a7742a819ac3795ea295e371e26bb5bfc28c
|
refs/heads/master
| 2020-09-16T16:46:50.482697
| 2020-06-07T08:01:16
| 2020-06-07T08:01:16
| 223,833,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,998
|
py
|
class Solution:
# @return an integer
def myAtoi(self, str):
str = str.strip() #去掉所有空格
if len(str) == 0:
return 0
tmp = ""
result = 0
i = 0
sign = 1
if str[0] == "-": #处理正负号
sign = -1
i = 1
if str[0] == "+": #处理正负号
i = 1
MAX_INT = 2147483647
MIN_INT = -2147483648
for i in range(i, len(str)): #把digit的取出来,遇到非digit的break
if str[i].isdigit():
tmp += str[i]
else:
break
if len(tmp) > 0: #计算长度
result = sign * int(tmp)
if result > MAX_INT > 0: #处理溢出情况
return MAX_INT
elif result < MIN_INT < 0:
return MIN_INT
else:
return result
# class Solution(object):
# def myAtoi(self, str):
# """
# :type str: str
# :rtype: int
# """
# if str == '' or str is None:
# return 0
# str.strip()
# i = 0
# while str[0] == '-' or str[0] == '+' or str[i] == '0' or not str[i].isdigit():
# if str[0] == '-' or str[0] == '+' or str[i] == '0':
# i += 1
# if i > len(str)-1:
# break
# if i < len(str) and not str[i].isdigit():
# return 0
#
# le = 0
# j = i
# nums = 0
# while j < len(str):
# if str[j].isdigit():
# le += 1
# j += 1
# else:
# break
#
# for k in range(i, i + le):
# nums += int(str[k]) * 10 ** (le - 1)
# # print nums
# le -= 1
#
# if str[0] == '-':
# return -nums
# else:
# return nums
if __name__ == "__main__":
# s = "12"
s = "-3.14159"
x = Solution()
print(x.myAtoi(s))
|
[
"dundunmao@gmail.com"
] |
dundunmao@gmail.com
|
cb02c0864c123cdf0d4f5cb9a32740ba3ceb4d35
|
a3926c09872e1f74b57431fbb3e711918a11dc0a
|
/python/array/1403_minimum_subsequence_in_non_increasing_order.py
|
569a2d60dcf2044789138831a3d5a708757a6cfa
|
[
"MIT"
] |
permissive
|
linshaoyong/leetcode
|
e64297dc6afcebcee0614a153a566323bf223779
|
57080da5fbe5d62cbc0b8a34e362a8b0978d5b59
|
refs/heads/main
| 2022-09-15T00:05:36.476268
| 2022-08-16T14:09:11
| 2022-08-16T14:09:11
| 196,914,051
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
class Solution(object):
def minSubsequence(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
half = sum(nums) // 2
snums = sorted(nums, reverse=True)
s, k = 0, 0
for i, n in enumerate(snums):
s += n
k = i
if s > half:
break
return snums[:k + 1]
def test_min_subsequence():
s = Solution()
assert [10, 9] == s.minSubsequence([4, 3, 10, 9, 8])
assert [7, 7, 6] == s.minSubsequence([4, 4, 7, 6, 7])
assert [6] == s.minSubsequence([6])
|
[
"linshaoyong@gmail.com"
] |
linshaoyong@gmail.com
|
84e3b4bc849e6265d6bf69e8105cdf2fc4a7b2fc
|
35b0a18a89516ec84806ccb4662246673f109311
|
/homework/utils/utils_hw4.py
|
8ad12f3734ea88def8ee88e8468fd773013a1718
|
[] |
no_license
|
yqdch/MLDS-Note
|
5db53236411c9f1f730f02734edc42437dd868d9
|
d2800930ce501b4e12a439ac8cd6a75f0217c46d
|
refs/heads/master
| 2021-09-22T01:20:03.243647
| 2018-09-04T15:32:14
| 2018-09-04T15:32:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
from torchvision import transforms
transform = transforms.Compose([transforms.ToPILImage(mode='RGB'),
transforms.Grayscale(1),
transforms.Resize((84, 84)),
transforms.ToTensor()])
# process the image..
def pre_process(x):
return transform(x)
|
[
"tinyshine@yeah.net"
] |
tinyshine@yeah.net
|
74fe39e241501dac412c523f88a7df4820066301
|
50d10939f20e140a116ef222671bc2abb56fa5fd
|
/daemon/test.py
|
8673809400aada1d9fa3662d43dad8d965d3ea67
|
[
"CC-BY-4.0"
] |
permissive
|
mattvenn/energy-wristband
|
27d82c6c30bc8156aea42705b4ae2b17d9c1f55d
|
d5afa1f3b86fd9e1f323f82f7af7fb2adb28d1c6
|
refs/heads/master
| 2020-04-08T07:40:58.606406
| 2017-03-23T10:31:52
| 2017-03-23T10:31:52
| 26,497,101
| 3
| 3
| null | 2017-03-23T10:31:53
| 2014-11-11T17:59:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,822
|
py
|
import unittest
import time
from diff import diff_energy
import os
sens=50
max_energy=3000
max_time=30
class Test_Diff(unittest.TestCase):
def setUp(self):
import logging
logging.basicConfig(level=logging.INFO)
self.d = diff_energy(logging, max_energy=max_energy, sens=sens, max_time=max_time)
def test_convert(self):
self.assertEqual(self.d.energy_to_div(max_energy),4)
self.assertEqual(self.d.energy_to_div(max_energy/5),1)
def test_convert_limits(self):
self.assertEqual(self.d.energy_to_div(-2*max_energy),1)
self.assertEqual(self.d.energy_to_div(2*max_energy),4)
def test_no_history(self):
self.assertEqual(self.d.get_last_valid(1),1)
def test_time_too_long(self):
now = time.time()
self.d.hist = {"t": now - max_time - 1, "e": 100}
self.assertEqual(self.d.get_last_valid(1),1)
def test_large_change_up(self):
now = time.time()
self.d.hist = {"t": now - max_time / 2, "e": 100}
self.assertEqual(self.d.get_last_valid(1500),100)
def test_large_change_down(self):
now = time.time()
self.d.hist = {"t": now - max_time / 2, "e": 2000}
self.assertEqual(self.d.get_last_valid(100),2000)
def test_ignore_large_slow_change(self):
e = 200
self.assertEqual(self.d.get_last_valid(e),e)
time.sleep(1.0)
e += sens
self.assertEqual(self.d.get_last_valid(e),e)
time.sleep(1.0)
e += sens
self.assertEqual(self.d.get_last_valid(e),e)
time.sleep(1.0)
e += sens
self.assertEqual(self.d.get_last_valid(e),e)
time.sleep(1.0)
e += sens
self.assertEqual(self.d.get_last_valid(e),e)
def test_ignore_small_change(self):
e = 200
self.assertEqual(self.d.get_last_valid(e),e)
time.sleep(0.5)
e += sens / 4
self.assertEqual(self.d.get_last_valid(e),e)
time.sleep(0.5)
e += sens / 4
self.assertEqual(self.d.get_last_valid(e),e)
time.sleep(0.5)
e += sens / 4
self.assertEqual(self.d.get_last_valid(e),e)
def test_small_change_over_boundary(self):
boundary = self.d.energy_per_div
now = time.time()
self.d.hist = {"t": now - 10, "e": boundary - 10}
time.sleep(1)
self.assertEqual(self.d.get_last_valid(boundary + 10),boundary + 10)
def test_no_repetition(self):
self.assertEqual(self.d.get_last_valid(200),200)
time.sleep(1)
self.assertEqual(self.d.get_last_valid(250),250)
time.sleep(1)
self.assertEqual(self.d.get_last_valid(3000),250)
time.sleep(1)
self.assertEqual(self.d.get_last_valid(2800),3000)
if __name__ == '__main__':
unittest.main()
|
[
"matt@mattvenn.net"
] |
matt@mattvenn.net
|
cbde5fdd7190d1955e4101bdcf828544eb7389e6
|
145129d75d8255e609cdaf1bc9aa66424e4de0d1
|
/pattern10.py
|
a3b75c166d4c647c813af35e6a4366f65cae4fe9
|
[] |
no_license
|
Neeraj-kaushik/coding_ninja
|
7ca2d004b149ff0193400569864c48749e331aca
|
23eb74bb8d4d80032b58c1408ac445aa87037a49
|
refs/heads/master
| 2022-11-07T19:43:05.869574
| 2020-06-19T17:31:34
| 2020-06-19T17:31:34
| 267,366,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
n=int(input())
i=1
while i<=n:
p=1
j=n
while j>=i:
print(p,end="")
j=j-1
p=p+1
print()
i=i+1
|
[
"nkthecoder@gmail.com"
] |
nkthecoder@gmail.com
|
bb2496b1a827a445a662dae77a03705dc4c98659
|
852003f278d597a4a5e4cddfc12a480d563fb7db
|
/tests/settings.py
|
df4231c5aa2515f0bfd25444b2a2eafece753338
|
[
"MIT"
] |
permissive
|
City-of-Helsinki/helsinki-profile-gdpr-api
|
376faef774b673eaea543e92ca82eefb0b2c1a4a
|
808dcd30a745f6d18cdf36ccaf07b0cd25844ab0
|
refs/heads/main
| 2023-04-29T12:20:32.747297
| 2023-04-18T12:18:34
| 2023-04-18T12:26:44
| 346,269,690
| 4
| 0
|
MIT
| 2023-04-19T05:28:03
| 2021-03-10T07:29:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,244
|
py
|
SECRET_KEY = "secret"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
INSTALLED_APPS = (
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"helusers.apps.HelusersConfig",
"helusers.apps.HelusersAdminConfig",
"helsinki_gdpr",
"tests",
)
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "tests.urls"
AUTH_USER_MODEL = "tests.User"
GDPR_API_MODEL = "tests.Profile"
GDPR_API_QUERY_SCOPE = "testprefix.gdprquery"
GDPR_API_DELETE_SCOPE = "testprefix.gdprdelete"
DEBUG = True
USE_TZ = True
OIDC_API_TOKEN_AUTH = {
"AUDIENCE": "test_audience",
"ISSUER": "https://test_issuer_1",
"REQUIRE_API_SCOPE_FOR_AUTHENTICATION": False,
"API_AUTHORIZATION_FIELD": "",
"API_SCOPE_PREFIX": "",
}
|
[
"juha.louhiranta@anders.fi"
] |
juha.louhiranta@anders.fi
|
755da0bc4ea001022621c3901a91605255fac548
|
f75609812d20d46a9f94ee0cfdb91c321d26b63d
|
/flask/flask_fundamentals/Dojo_Survey/server.py
|
019a971b2dca48d2d6f92ada60c9697c8990bd46
|
[] |
no_license
|
IanAranha/Python2021
|
eff47a20451f61b144b17f48321a7b06308aadca
|
d9769b8b387b77753b77f6efe3a9a270a1f158d3
|
refs/heads/main
| 2023-04-02T08:20:24.382913
| 2021-04-10T22:27:10
| 2021-04-10T22:27:10
| 345,918,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
from flask import Flask, render_template, redirect, request
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/users", methods={"POST"})
def results():
return render_template("result.html")
@app.route("/back", methods=["post"])
def back():
return redirect("/")
@app.route("/danger")
def danger():
print('A user tried to visit /danger. We have redirected the user to /')
return redirect("/")
if __name__ == "__main__":
app.run(debug=True)
|
[
"ianorama@gmail.com"
] |
ianorama@gmail.com
|
9dd52a5dbaf126ed8780d8d3fabfc6508c0af245
|
32226e72c8cbaa734b2bdee081c2a2d4d0322702
|
/visualization/grill/pusher_reward_type_ablation.py
|
745f1dc8766a01ca39a8d6d7bf1d0f8fa7e1bc90
|
[
"MIT"
] |
permissive
|
Asap7772/rail-rl-franka-eval
|
2b1cbad7adae958b3b53930a837df8a31ab885dc
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
refs/heads/master
| 2022-11-15T07:08:33.416025
| 2020-07-12T22:05:32
| 2020-07-12T22:05:32
| 279,155,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,142
|
py
|
import matplotlib
from visualization.grill.config import (
output_dir,
ashvin_base_dir,
format_func,
configure_matplotlib,
)
import matplotlib.pyplot as plt
from railrl.visualization import plot_util as plot
configure_matplotlib(matplotlib)
f = plot.filter_by_flat_params(
{'replay_kwargs.fraction_goals_are_env_goals': 0.5})
exps = plot.load_exps([
ashvin_base_dir + 's3doodad/share/steven/pushing-multipushing/pusher-reward-variants'],
f, suppress_output=True)
plot.tag_exps(exps, "name", "dsae")
plot.comparison(exps,
["Final puck_distance Mean", "Final hand_distance Mean"],
figsize=(6, 4),
vary=["vae_wrapped_env_kwargs.reward_params.type"],
default_vary={"reward_params.type": "unknown"},
smooth=plot.padded_ma_filter(10),
xlim=(0, 250000), ylim=(0.15, 0.22), method_order=None)
plt.gca().xaxis.set_major_formatter(plt.FuncFormatter(format_func))
plt.xlabel("Timesteps")
plt.ylabel("")
plt.title("Visual Pusher")
plt.legend([])
plt.tight_layout()
plt.savefig(output_dir + "pusher_reward_type_ablation.pdf")
|
[
"asap7772@berkeley.edu"
] |
asap7772@berkeley.edu
|
164292f20f8de66cf509569b2cdaffd15af4baee
|
b9a6440766ac6d09cbe5bcb0dd9ec035e79b68de
|
/0x0F-python-object_relational_mapping/1-filter_states.py
|
6f2411d250a3d57fbab1662f954bca9cb3995269
|
[] |
no_license
|
zulsb/holbertonschool-higher_level_programming
|
aa684ce2bad9f583dd54224e7cb1d60d2189b229
|
0a23d2ffc4ec5810213b6fcd82732f221c97a553
|
refs/heads/master
| 2021-06-25T15:16:48.849508
| 2021-05-23T00:07:13
| 2021-05-23T00:07:13
| 226,905,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
#!/usr/bin/python3
"""
Script that lists states with a name starting with N (upper N)
from the database hbtn_0e_0_usa.
"""
import MySQLdb
from sys import argv
if __name__ == "__main__":
"""Connect to database."""
conec_db = MySQLdb.connect(
host="localhost",
port=3306,
user=argv[1],
passwd=argv[2],
db=argv[3])
"""Create cursor to exec queries using SQL."""
cursor = conec_db.cursor()
cursor.execute("""SELECT * FROM states
WHERE name LIKE BINARY 'N%' ORDER BY id ASC""")
for row in cursor.fetchall():
print(row)
cursor.close()
conec_db.close()
|
[
"zulsb2093@gmail.com"
] |
zulsb2093@gmail.com
|
d7024372dfb48700aef449f128ccb72330a581de
|
77d6ae92c38d56f2aa7a57fd24dd97bec6fa6cc4
|
/blog/admin.py
|
a94a9be0c5919ee1f095e4b3bb1ae4e28bfb1c13
|
[] |
no_license
|
Gentility01/my-project
|
aaab030abad105094aa1c0206995a95a756448be
|
9cd0805ffc7a02b811f6481ad79bda8e4b14a786
|
refs/heads/master
| 2023-06-17T13:39:01.353408
| 2021-07-07T01:20:59
| 2021-07-07T01:20:59
| 383,476,466
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
from django.contrib import admin
from .models import Post,Like,Comment
# Register your models here.
admin.site.register(Post)
admin.site.register(Like)
admin.site.register(Comment)
# admin.site.register(Post_pictures)
|
[
"mastergentility5@gmail.com"
] |
mastergentility5@gmail.com
|
5d1e60ce7008a10f3d47c22c09d40c60ab591b0f
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/surface/looker/instances/export.py
|
5b33b981ed98210dc6b0a5035626ffad0122cc93
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697
| 2023-08-23T18:23:16
| 2023-08-23T18:23:16
| 335,182,594
| 9
| 2
|
NOASSERTION
| 2022-10-29T20:49:13
| 2021-02-02T05:47:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,190
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export a Looker instance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.looker import instances
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.looker import flags
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.GA)
class Export(base.Command):
"""Export a Looker instance.
This command can fail for the following reasons:
* The instance specified does not exist.
* The active account does not have permission to access the given
instance.
* The Google Cloud Storage bucket does not exist.
"""
detailed_help = {'EXAMPLES': """\
To export an instance with the name `my-looker-instance` in the default
region, run:
$ {command} my-looker-instance --target-gcs-uri='gs://bucketName/folderName'
--kms-key='projects/my-project/locations/us-central1/keyRings/my-key-ring/cryptoKeys/my-key'
Note that the kms-key flag should be the full name of the kms key.
"""}
@staticmethod
def Args(parser):
"""Register flags for this command."""
flags.AddExportInstanceArgs(parser)
def Run(self, args):
instance_ref = args.CONCEPTS.instance.Parse()
op = instances.ExportInstance(instance_ref, args, self.ReleaseTrack())
log.status.Print(
'Export request issued for: [{}]\n'
'Check operation [{}] for status.'.format(args.instance, op.name)
)
return op
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
e6d17a0f40ceb577e98f6ae04e3eae4ea842f9db
|
be51250bcf59e1b47ed417c45e203c50aa233aae
|
/dojo-python-flask-mysql/pr1/servererr.py
|
fb918ee40f7bd663d312ead3bde46ed6a7560cc1
|
[] |
no_license
|
shoredata/dojo-python
|
4f064c76632bf94a385bb9f552562eb5640398b2
|
e7b7a542fa086088252ce92257f37c4b5eedd0c4
|
refs/heads/master
| 2020-05-02T22:07:31.547469
| 2019-03-28T17:44:06
| 2019-03-28T17:44:06
| 178,242,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
from flask import Flask
# import the function connectToMySQL from the file mysqlconnection.py
from mysqlconnerr import connectToMySQL
app = Flask(__name__)
# invoke the connectToMySQL function and pass it the name of the database we're using
# connectToMySQL returns an instance of MySQLConnection, which we will store in the variable 'mysql'
mysql = connectToMySQL('mydb')
# now, we may invoke the query_db method
query = "SELECT * FROM users;"
sqldata = mysql.query_db(query)
# print("all the users", sqldata)
print("mysql>>> mydb::users::"+query)
for i in sqldata:
print(i)
# for k in i:
# print(i,k,i[k])
if __name__ == "__main__":
app.run(debug=True)
|
[
"31630233+shoredata@users.noreply.github.com"
] |
31630233+shoredata@users.noreply.github.com
|
d623fe68c6326fee1aba92885d989524184c5f2d
|
87d5b21265c381104de8f45aa67842a4adc880eb
|
/486. Predict the Winner.py
|
447e9a702208ffb5145c18af27a90a00db128a70
|
[] |
no_license
|
MYMSSENDOG/leetcodes
|
ac047fe0d951e0946740cb75103fc94aae967166
|
8a52a417a903a0742034161471a084bc1e494d68
|
refs/heads/master
| 2020-09-23T16:55:08.579319
| 2020-09-03T19:44:26
| 2020-09-03T19:44:26
| 225,543,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
class Solution:
def PredictTheWinner(self, nums):
n = len(nums)
dp = [[0] * (n+1) for _ in range(n+1)]
for i in range(n-1,-1,-1):
for j in range(i+1):
l = j
r = i-j
dp[l][r] = max(nums[l] - dp[l+1][r], nums[-r-1] - dp[l][r + 1])
return dp[0][0] >= 0
sol = Solution()
nums = [1,5,233,7]
print(sol.PredictTheWinner(nums))
|
[
"fhqmtkfkd@naver.com"
] |
fhqmtkfkd@naver.com
|
46d2e6cb4678a7bb86f2a93e3287a37554d642b4
|
0466559817d3a1be9409da2c83db99c4db3bacfe
|
/hubcheck/pageobjects/widgets/tags_view_form.py
|
9dfbebcaaa5bac24ee44bdb846f38977e4b932c8
|
[
"MIT"
] |
permissive
|
ken2190/hubcheck
|
955cf9b75a1ee77e28256dfd3a780cfbc17de961
|
2ff506eb56ba00f035300862f8848e4168452a17
|
refs/heads/master
| 2023-03-20T15:17:12.949715
| 2015-09-29T16:11:18
| 2015-09-29T16:11:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,682
|
py
|
from hubcheck.pageobjects.basepagewidget import BasePageWidget
import re
class TagsViewForm(BasePageWidget):
def __init__(self, owner, locatordict={}):
super(TagsViewForm,self).__init__(owner,locatordict)
# load hub's classes
TagsViewForm_Locators = self.load_class('TagsViewForm_Locators')
TagSearchBox = self.load_class('TagSearchBox')
SortOrderOptions = self.load_class('SortOrderOptions')
SearchResults = self.load_class('SearchResults')
TagsViewResultsRow = self.load_class('TagsViewResultsRow')
ListPageNav = self.load_class('ListPageNav')
# update this object's locator
self.locators.update(TagsViewForm_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.searchbox = TagSearchBox(self,
{
'base' : 'searchbox',
'tags' : 'tags',
'tagsac' : 'tagsac',
'tagsacchoices' : 'tagsacchoices',
'tagsactoken' : 'tagsactoken',
'tagsacdelete' : 'tagsacdelete',
'submit' : 'submit',
})
self.sortoptions = SortOrderOptions(self,
{
'base' : 'sortoptions',
'date' : 'sortbydate',
'title' : 'sortbytitle',
})
self.footer = ListPageNav(self,{'base':'footer'})
self.search_results = SearchResults(self,
{
'base' : 'searchresults',
'counts' : 'sr_counts',
'row' : 'sr_row',
'substrow' : 'sr_substrow',
}, TagsViewResultsRow,
{
'src_title' : 'title',
'src_text' : 'text',
'src_href' : 'href',
})
# update the component's locators with this objects overrides
self._updateLocators()
def search_for(self,terms):
return self.searchbox.search_for(terms)
def goto_page_number(self,pagenumber):
return self.footer.goto_page_number(pagenumber)
def goto_page_relative(self,relation):
return self.footer.goto_page_relative(relation)
def get_caption_counts(self):
return self.search_results.header_counts()
def get_pagination_counts(self):
return self.footer.get_pagination_counts()
def get_current_page_number(self):
return self.footer.get_current_page_number()
def get_link_page_numbers(self):
return self.footer.get_link_page_numbers()
def search_result_rows(self):
return iter(self.search_results)
class TagsViewForm_Locators_Base(object):
"""locators for TagsViewForm object"""
locators = {
'base' : "css=#main form",
'searchbox' : "css=.data-entry",
'tags' : "css=#actags",
'tagsac' : "css=#token-input-actags",
'tagsacchoices' : "css=.token-input-dropdown-act",
'tagsactoken' : "css=.token-input-token-act",
'tagsacdelete' : "css=.token-input-delete-token-act",
'submit' : "css=.entry-search-submit",
'sortoptions' : "css=.entries-menu",
'sortbytitle' : "css=.entries-menu a[title='Sort by title']",
'sortbydate' : "css=.entries-menu a[title='Sort by newest to oldest']",
'footer' : "css=.list-footer",
# 'searchresults' : "css=#search .results",
'searchresults' : "css=.container-block",
'sr_substrow' : "css=#search .results li:nth-of-type({row_num})",
'sr_row' : "css=#search .results li",
'sr_counts' : "css=#rel-search span",
'src_title' : "css=#search .results li:nth-of-type({row_num}) .title",
'src_text' : "css=#search .results li:nth-of-type({row_num}) p:nth-of-type(2)",
'src_href' : "css=#search .results li:nth-of-type({row_num}) .href",
}
|
[
"telldsk@gmail.com"
] |
telldsk@gmail.com
|
ef45fa15c61956b4c8f11b18890b9b8cf99f1ac7
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_tremble.py
|
c57447f66e5255d1408b3edd615cc3538b9fba11
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
#calss header
class _TREMBLE():
def __init__(self,):
self.name = "TREMBLE"
self.definitions = [u'to shake slightly, usually because you are cold, frightened, or very emotional: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
f22f021684ba14c9aea722e7b575fa83cc9e8627
|
907eecc07842e12cfef8d7dcc367052c528a7e45
|
/python/test_builder_of_things.py
|
c3c788e8bf65bc9f1c47bb67bc6c1db7aadd6d01
|
[] |
no_license
|
desertSniper87/codewars
|
0126a6962a7e22f9fd2e649c8a4abe2649d1988d
|
03f918e818a1c5a8b68ff23e24ad09698c32346b
|
refs/heads/master
| 2021-05-05T04:41:20.424603
| 2021-04-24T16:31:52
| 2021-04-24T16:31:52
| 118,615,045
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,648
|
py
|
import unittest
from builder_of_things import *
class TestClass(unittest.TestCase):
def testName(self):
jane = Thing('Jane')
self.assertEqual(jane.name, 'Jane')
self.assertEqual(jane.is_a_woman, True)
self.assertEqual(jane.is_a_man, False)
def testArms(self):
jane = Thing('Jane')
self.assertEqual(isinstance(jane.arms, tuple), True)
self.assertEqual(len(jane.arms), 2)
self.assertEqual(all(isinstance(v, Thing) for v in jane.arms), True)
self.assertEqual(all(v.name=="arm" for v in jane.arms), True)
self.assertEqual(all(v.is_arm for v in jane.arms), True)
self.assertEqual(len(jane.arms), 2)
self.assertEqual(all(isinstance(v, Thing) for v in jane.arms), True)
def testHead(self):
jane = Thing('Jane')
self.assertEqual(isinstance(jane.head, Thing), True)
self.assertEqual(jane.head.name, "head")
def testEyes(self):
jane = Thing('Jane')
self.assertEqual(len(jane.head.eyes), 2)
self.assertEqual(all(isinstance(v, Thing) for v in jane.head.eyes), True)
self.assertEqual(all(v.name=='eye' for v in jane.head.eyes), True)
def testFingers(self):
jane = Thing('Jane')
self.assertEqual(all(len(v.fingers)==5 for v in jane.arms), True)
def testParent(self):
jane = Thing('Jane')
self.assertEqual(jane.parent_of, "joe")
def testEyeColor(self):
jane = Thing('Jane')
self.assertEqual(all(v.color=='blue' for v in jane.head.eyes), True)
def testEyeShape(self):
jane = Thing('Jane')
self.assertEqual(all(v.color=='blue' for v in jane.eyes), True)
self.assertEqual(all(v.shape=='round' for v in jane.eyes), True)
def testEyesColor(self):
jane = Thing('Jane')
self.assertEqual(all(v.color=='green' for v in jane.eyes), True)
self.assertEqual(all(v.pupil.color=='black' for v in jane.eyes), True)
# def testSpeech(self):
# jane = Thing('Jane')
# def fnc(phrase):
# return "%s says: %s" % (name, phrase)
# jane.can.speak(fnc)
# self.assertEqual(jane.speak('hi'), "Jane says: hi")
# def testSpeech2(self):
# jane = Thing('Jane')
# fnc = lambda phrase: "%s says: %s" % (name, phrase)
# jane.can.speak(fnc, 'spoke')
# jane.speak('hi')
# self.assertEqual(jane.spoke, ["Jane says: hi"])
# jane.speak('goodbye')
# self.assertEqual(jane.spoke, ["Jane says: hi", "Jane says: goodbye"])
def main():
unittest.main()
if __name__ == '__main__':
main()
|
[
"torshobuet@gmail.com"
] |
torshobuet@gmail.com
|
b1041408c8d0b9d51353afa02be2a9407d84e133
|
edb9dce04a0e1c7cae6a4fe54c8dc89cef69d8f0
|
/Chapter-05/pandas_handlemissingdata.py
|
8c1dc0ad66881643d36a5514b833e4bfacaa3926
|
[] |
no_license
|
lovejing0306/PythonforDataAnalysis
|
a4a38dbc0d24e69a5469c662be7e5cfdeba057cb
|
be8d4862acc7538b52379cec36047b1c5bd48b3e
|
refs/heads/master
| 2021-06-09T20:40:04.157547
| 2016-11-20T02:23:07
| 2016-11-20T02:23:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,801
|
py
|
# coding=utf-8
from pandas import DataFrame, Series
import pandas as pd
import numpy as np
# 测试
if __name__ == '__main__':
string_data = Series(['aardvark', 'artichoke', np.nan, 'avocado'])
print(string_data)
print(string_data.isnull())
string_data[0] = None # python内置的None值也会被当作NaN来处理
print(string_data.isnull())
print(string_data.notnull())
## 滤除缺失数据
# Series
data = Series([1, np.nan, 3.5, np.nan, 7])
print(data)
print(data.dropna())
print(data[data.notnull()]) # 通过布尔索引达到相同的效果
# DataFrame
data = DataFrame([[1., 6.5, 3.],
[1., np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 6.5, 3.]])
print(data)
print(data.dropna()) # 默认丢弃含有NaN的行
print(data.dropna(how='all')) # 丢弃全部都是NaN的行
data[4] = np.nan
print(data)
print(data.dropna(axis=1)) # 丢弃列
print(data.dropna(axis=1, how='all'))
df = DataFrame(np.random.randn(7,3))
df.ix[:4, 1] = np.nan
df.ix[:2, 2] = np.nan
print(df)
print(df.dropna())
print(df.dropna(thresh=3))
## 填充缺失数据
print(df.fillna(0))
print(df.fillna({1:0.5, 2:-1})) # 通过字典实现对不同列不同值的填充
dfInplace = df.fillna(0, inplace=True)
print(dfInplace) # fillna默认返回新对象,但也可以对现有对象进行就地修改
print(df)
df = DataFrame(np.random.randn(6,3))
print(df)
df.ix[2:, 1] = np.nan
df.ix[3:, 2] = np.nan
print(df)
print(df.fillna(method='ffill'))
print(df.fillna(method='ffill', limit=2))
data = Series([1., np.nan, 3.5, np.nan, 7])
print(data.fillna(data.mean()))
|
[
"lovejing0306@gmail.com"
] |
lovejing0306@gmail.com
|
c8ef1d94e1d70ae6da235286cc8081214dd6866f
|
e71b6d14fbdbc57c7234ca45a47329d7d02fc6f7
|
/flask_api/venv/lib/python3.7/site-packages/vsts/release/v4_1/models/release_definition_gate.py
|
45dbf84e28f1c28054605140573590d08557e663
|
[] |
no_license
|
u-blavins/secret_sasquatch_society
|
c36993c738ab29a6a4879bfbeb78a5803f4f2a57
|
0214eadcdfa9b40254e331a6617c50b422212f4c
|
refs/heads/master
| 2020-08-14T00:39:52.948272
| 2020-01-22T13:54:58
| 2020-01-22T13:54:58
| 215,058,646
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ReleaseDefinitionGate(Model):
"""ReleaseDefinitionGate.
:param tasks:
:type tasks: list of :class:`WorkflowTask <release.v4_1.models.WorkflowTask>`
"""
_attribute_map = {
'tasks': {'key': 'tasks', 'type': '[WorkflowTask]'}
}
def __init__(self, tasks=None):
super(ReleaseDefinitionGate, self).__init__()
self.tasks = tasks
|
[
"usama.blavins1@gmail.com"
] |
usama.blavins1@gmail.com
|
ac54da57e3eea33615811d6513ec2872ea1f784b
|
72af42076bac692f9a42e0a914913e031738cc55
|
/01, 특강_210705_0706/02, source/CookData(2021.01.15)/Code14-01.py
|
ba3157038d2415dc09aa47adb6edb5f1ec20f74f
|
[] |
no_license
|
goareum93/Algorithm
|
f0ab0ee7926f89802d851c2a80f98cba08116f6c
|
ec68f2526b1ea2904891b929a7bbc74139a6402e
|
refs/heads/master
| 2023-07-01T07:17:16.987779
| 2021-08-05T14:52:51
| 2021-08-05T14:52:51
| 376,908,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
## 함수 선언 부분 ##
def knapsack():
print('## 메모이제이션 배열 ##')
array = [[0 for _ in range(maxWeight+1)] for _ in range(rowCount+1)] # 빈 배열을 만들고 모두 0으로
for row in range(1, rowCount+1) : # 1~4개 (4회)
print(row, '개 -->', end = ' ')
for col in range(1, maxWeight+1) : # 1colg ~ 7colg
if weight[row] > col : # 물건의 무게가 열보다 크면 == 물건이 가방에 안 들어가면
array[row][col] = array[row-1][col]
else : # 물건의 부피가 s보다 작거나 같으면
value1 = money[row] + array[row-1][col-weight[row]] # 각 그림의 1-1
value2 = array[row-1][col] # 각 그림의 1-2
array[row][col] = max(value1, value2)
print('%2d' % array[row][col], end = ' ')
print()
return array[rowCount][maxWeight]
## 전역 변수 선언 부분 ##
maxWeight = 7 # 배낭 최대 무게
rowCount = 4 # 보석 숫자
weight = [0, 6, 4,3, 5] # 보석 무게 (0, 금괴, 수정, 루비, 진주)
money = [0,13, 8, 6, 12] # 보석 가격 (0, 금괴, 수정, 루비, 진주)
## 메인 코드 부분 ##
maxValue = knapsack()
print()
print('배낭에 담을 수 있는 보석의 최대 가격 -->', maxValue, '억원')
|
[
"goareum7@gmail.com"
] |
goareum7@gmail.com
|
381c39adca87bdc4c70516ba84929b4bbb345c7f
|
73cacd0f22036bec4aa147f7c26961b4b991af22
|
/castero/datafile.py
|
99b25f7030014950516c1844bb0c4d828d3fd6fa
|
[
"MIT"
] |
permissive
|
kyokley/castero
|
cb15fe45fc84547ad1e6dcb1afb2181bdae86146
|
6998e3cbdd722efe53fdc23bb4bb46750dad2d8d
|
refs/heads/master
| 2022-11-06T01:48:06.925790
| 2020-06-11T01:53:50
| 2020-06-11T01:53:50
| 272,463,362
| 0
| 0
|
MIT
| 2020-06-15T14:39:10
| 2020-06-15T14:39:09
| null |
UTF-8
|
Python
| false
| false
| 4,245
|
py
|
import collections
import os
import requests
from shutil import copyfile
import castero
from castero.net import Net
class DataFile:
"""Extendable class for objects with filesystem data.
Used when handling files with data that can reasonably be stored in a
dictionary. Particularly used in the Config class and the Feeds class.
Extended by classes which are based on a data file.
"""
PACKAGE = os.path.dirname(__file__)
HOME = os.path.expanduser('~')
XDG_CONFIG_HOME = os.getenv('XDG_CONFIG_HOME',
os.path.join(HOME, '.config'))
XDG_DATA_HOME = os.getenv('XDG_DATA_HOME',
os.path.join(HOME, '.local', 'share'))
CONFIG_DIR = os.path.join(XDG_CONFIG_HOME, castero.__title__)
DATA_DIR = os.path.join(XDG_DATA_HOME, castero.__title__)
DEFAULT_DOWNLOADED_DIR = os.path.join(DATA_DIR, "downloaded")
def __init__(self, path, default_path) -> None:
"""
Args:
path: the path to the data file
default_path: the path to the default data file
"""
assert os.path.exists(default_path)
self.data = collections.OrderedDict()
self._path = path
self._default_path = default_path
# if path doesn't exit, create it based on default_path
if not os.path.exists(self._path):
DataFile.ensure_path(self._path)
copyfile(self._default_path, self._path)
def __iter__(self) -> iter:
"""Iterator for the keys of self.data
In order to iterate over data values, you should use something like:
for key in file_instance:
value = file_instance[key]
"""
return self.data.__iter__()
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, item):
if item in self.data:
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
self.data.pop(key, None)
@staticmethod
def ensure_path(filename):
"""Ensure that the path to the filename exists, creating it if needed.
"""
path = os.path.dirname(filename)
if not os.path.exists(path):
os.makedirs(path)
@staticmethod
def download_to_file(url, file, name, download_queue, display=None):
"""Downloads a URL to a local file.
Args:
url: the source url
file: the destination path
name: the user-friendly name of the content
download_queue: the download_queue overseeing this download
display: (optional) the display to write status updates to
"""
chunk_size = 1024
chuck_size_label = "KB"
try:
response = Net.Get(url, stream=True)
except requests.exceptions.RequestException as e:
if display is not None:
display.change_status("RequestException: %s" % str(e))
download_queue.next()
return
else:
handle = open(file, "wb")
downloaded = 0
for chunk in response.iter_content(chunk_size=chunk_size):
if display is not None:
status_str = "Downloading \"%s\": %d%s" % (
name, downloaded / chunk_size, chuck_size_label
)
if download_queue.length > 1:
status_str += " (+%d downloads in queue)" % \
(download_queue.length - 1)
display.change_status(status_str)
if chunk:
handle.write(chunk)
downloaded += len(chunk)
if display is not None:
display.change_status("Episode successfully downloaded.")
display.menus_valid = False
download_queue.next()
def load(self) -> None:
"""Loads the data file.
Should be implemented by classes which extend this class.
"""
pass
def write(self) -> None:
"""Writes to the data file.
Should be implemented by classes which extend this class.
"""
pass
|
[
"jake@faltro.com"
] |
jake@faltro.com
|
bab08a4477751c2f1fc761d6c0504c5f4dfaba39
|
1e9c67785cd2a07fbd12b63bd93a2eba2272f237
|
/image_task_kg/make_KG.py
|
22d5463b6fa171d09e5cc20e81cda93da2ed656a
|
[] |
no_license
|
monisha-jega/mmd
|
2975d0f77bce4db38795fa201f515f35498f0eb3
|
d4f9d2c94409c2877ff5a5a2242e7e7ed2f87921
|
refs/heads/master
| 2022-07-20T17:01:39.043859
| 2020-05-16T23:31:35
| 2020-05-16T23:31:35
| 264,543,426
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,850
|
py
|
from __future__ import print_function
import json, os, pickle
from parameters import *
def convert_json(the_json):
new_json = {}
for key, val in the_json.items():
new_json[key.lower()] = val
return new_json
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
domain_features = [
['price'],
#['style'],
['type'],
['fit'],
#['brand'],
['gender'],
#['neck'],
['material', 'fabric'],
#['length'],
#['sleeves'],
#['model_worn'],
['currency'],
['color']
]
num_features = len(domain_features)
feature_existence_count = [0 for i in range(num_features)]
root_dir = "../../raw_catalog/"
dirlist = [root_dir + name for name in ['public_jsons', 'public_jsons (2)', 'public_jsons (3)', 'public_jsons (4)']]
count = 0
KG = {}
for diri in dirlist[:]:
print(len(os.listdir(diri)))
for json_file in os.listdir(diri):
#print("ok")
the_json = convert_json(json.load(open(diri +"/" + json_file)))
feature_vec = ["" for i in range(num_features)]
for l in range(num_features):
feature_names = domain_features[l]
for feature_name in feature_names:
if feature_name in the_json:
if the_json[feature_name] == "" or (l == 0 and (not is_int(the_json[feature_name]) or int(the_json[feature_name]) == 0)):
pass
else:
feature_vec[l] = the_json[feature_name]
feature_existence_count[l] += 1
KG[the_json["image_filename"]] = feature_vec
for orientation, links in the_json['image_filename_all'].items():
for link in links:
KG[link] = feature_vec
count += 1
if count%20000 == 0:
print(count, end="")
print(" ")
print()
print(feature_existence_count)
json.dump(KG, open(data_dump_dir+"image_kg.json", "wb"))
pickle.dump(KG, open(data_dump_dir+"image_kg.pkl", "wb"))
|
[
"monishaj@Monishas-MacBook-Pro.local"
] |
monishaj@Monishas-MacBook-Pro.local
|
da7b653eafb429de4d61b697f9d80f9c7895357e
|
075ccb25d98e2e55adbbaf0723b99158747bf172
|
/nickles_and_dimes.py
|
cc7764bfda7d891fcabc1e2ed4c5b18154d36da3
|
[] |
no_license
|
Th3Lourde/Mathematical_Modeling
|
6863694d7a1526cfb665ecf32189522da1d2ee83
|
f890273305263a90ac1b18c9fc12ad1aa70b23cf
|
refs/heads/master
| 2020-04-24T01:23:26.447512
| 2019-04-08T02:38:56
| 2019-04-08T02:38:56
| 171,593,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
'''
11 coins
70 cents
nickles and dimes
'''
total = 70
coin_key = []
n = 5
d = 10
# Start with one dime and all nickles
# Hypothesis: 3 dimes, 8 nickles
# Done
# Equation: 5x+10y = 70
# Equation: x + y = 11
# .
# .
# .
|
[
"th3sylvia.lourde@gmail.com"
] |
th3sylvia.lourde@gmail.com
|
47fb4ad7eb88aa972c8ada25f83b2c9c0ba6d233
|
eebacbc58a1c99fb6e32f8cd56cac6e18947d3e7
|
/3.advanced_features/1.advanced_features.py
|
3dd7fc0bc7d1f5260a24d7e40a67c53d99fe69b8
|
[] |
no_license
|
fzingithub/LearnPythonFromLiao
|
ad7f959d7e667a464f2b9a6b1cedfd0f08baaf8e
|
fcb0f2e7f905aca253b3986c4a1ceab6b82b7cae
|
refs/heads/master
| 2020-03-29T19:37:32.831341
| 2018-09-27T10:39:11
| 2018-09-27T10:39:11
| 150,273,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 21 18:43:50 2018
@author: FZ
"""
#make a list 1...99
L = list(range(1,100,2))
print (L)
#高级特性,1行代码能实现的功能,决不写5行代码。请始终牢记,代码越少,开发效率越高。
|
[
"1194585271@qq.com"
] |
1194585271@qq.com
|
3fc3f033f79febb3ec41acc993e537e72912483a
|
15373eaa353e8aece47a26741b7fb27795268bf6
|
/medium/833_find_and_replace_in_string.py
|
7ef69a2ccd6f43a06373517a1eba3a6f382b014a
|
[] |
no_license
|
esddse/leetcode
|
e1a9bacf04c68a8d642a1e53c90e6c2dda2c1980
|
0ceccdb262149f7916cb30fa5f3dae93aef9e9cd
|
refs/heads/master
| 2021-06-08T19:15:14.346584
| 2020-01-09T01:41:23
| 2020-01-09T01:41:23
| 109,675,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
class Solution:
def findReplaceString(self, S: str, indexes: List[int], sources: List[str], targets: List[str]) -> str:
replacements = sorted(zip(indexes, sources, targets), key=lambda item:item[0])
length = len(S)
start = 0
new_S = ""
for idx, source, target in replacements:
if idx >= length:
break
new_S += S[start:idx]
sl = len(source)
if S[idx:idx+sl] == source:
new_S += target
start = idx+sl
else:
start = idx
new_S += S[start:]
return new_S
|
[
"tjz427@sina.cn"
] |
tjz427@sina.cn
|
018d690f2b09a7839e2233446c5334d07ba7e40a
|
6c5ce1e621e0bd140d127527bf13be2093f4a016
|
/ex073/venv/Scripts/easy_install-3.7-script.py
|
04b8ee8d192cd98ecbf9446220d1339ffc006211
|
[
"MIT"
] |
permissive
|
ArthurAlesi/Python-Exercicios-CursoEmVideo
|
124e2ee82c3476a5a49baafed657788591a232c1
|
ed0f0086ddbc0092df9d16ec2d8fdbabcb480cdd
|
refs/heads/master
| 2022-12-31T13:21:30.001538
| 2020-09-24T02:09:23
| 2020-09-24T02:09:23
| 268,917,509
| 0
| 0
| null | null | null | null |
ISO-8859-2
|
Python
| false
| false
| 508
|
py
|
#!C:\Users\User\Documents\github-MeusRepositórios\Python-Exercicios-CursoEmVideo\ex073\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"54421573+ArthurAlesi@users.noreply.github.com"
] |
54421573+ArthurAlesi@users.noreply.github.com
|
2706a914a8a9b70a04385ebad0180e6781fbffb5
|
07af05141f371ad1c2ab11634d4f5fad20ede2e0
|
/python/src/nnabla/backward_function/bc_add2.py
|
b95e0477b113afd762dead0d62af1338523c465d
|
[
"Apache-2.0"
] |
permissive
|
chunxiaosz/nnabla
|
a9c9b30140def0bdf91dea24d70cfa9400258d66
|
9f4249313129d0fd23d304453830157fee96a2e5
|
refs/heads/master
| 2020-12-03T05:11:24.724125
| 2019-09-04T06:39:41
| 2019-09-04T06:39:41
| 95,741,841
| 0
| 0
|
Apache-2.0
| 2019-09-04T06:39:42
| 2017-06-29T05:29:11
|
C++
|
UTF-8
|
Python
| false
| false
| 1,839
|
py
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nnabla as nn
import nnabla.functions as F
from .backward_function import BackwardFunction
class BcAdd2Backward(BackwardFunction):
def name(self):
return 'BcAdd2Backward'
def _create_forward_inputs_and_outputs(self, inputs, outputs):
# Inputs on the forward graph
inputs_fwd = []
for i in range(self._num_inputs_fwd):
need_grad = self.forward_func.inputs[i].need_grad
v = nn.Variable(inputs[i].shape, need_grad=need_grad)
v.data = inputs[i].data
v.grad = outputs[i].data
inputs_fwd += [v]
# Outputs on the forward graph
outputs_fwd = []
for i in range(self._num_outputs_fwd):
inp = inputs[self._num_inputs_fwd + i]
v = nn.Variable(inp.shape)
v.grad = inp.data
outputs_fwd += [v]
return inputs_fwd, outputs_fwd
def backward_impl(self, inputs, outputs, prop_down, accum):
# inputs: [inputs_fwd_graph] + [inputs_bwd_graph] or
# [inputs_fwd_graph] + [outputs_fwd_graph] + [inputs_bwd_graph]
raise NotImplementedError(
"The backward method of BcAdd2Backward class is not implemented.")
|
[
"Kazuki.Yoshiyama@sony.com"
] |
Kazuki.Yoshiyama@sony.com
|
b6eebc3348f8b8c588194824b12e99245fc3b3e3
|
72ea510ceaa5a4aa1918ea0cf2bb699439d2587b
|
/Python/problem0082.py
|
66f7113ebe2a267d3c64be861a1d3e5db40e7244
|
[
"MIT"
] |
permissive
|
1050669722/LeetCode-Answers
|
f18680e0fe74199a630fff214977e91fe428c550
|
c8f4d1ccaac09cda63b60d75144335347b06dc81
|
refs/heads/master
| 2023-03-08T01:25:19.720931
| 2021-02-22T00:34:43
| 2021-02-22T00:34:43
| 270,304,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if not (head and head.next):
return head
d = {}
tmp = head
while tmp:
if tmp.val in d:
d[tmp.val] += 1
else:
d[tmp.val] = 1
tmp = head
head = ListNode(None)
head.next = tmp
pre, cur = head, head.next
while cur:
if d[cur.val] == 1:
cur = cur.next
pre = pre.next
else:
for _ in range(d[cur.val]):
cur = cur.next
pre.next = cur
return head.next
|
[
"1050669722@qq.com"
] |
1050669722@qq.com
|
8f04c0f007f92b6b025b75fbb32b9204d00f39d6
|
67d95b72da34dcfb3bf0224e66c3f3d345c7c5be
|
/src/spaceone/inventory/connector/aws_secrets_manager_connector/schema/service_type.py
|
d671de6dd8ac7a3aae846cdd457597fc2dd8e8f5
|
[
"Apache-2.0"
] |
permissive
|
khl6235/plugin-aws-cloudservices
|
10971267902a000f3965bbf606283ab71c15823a
|
e13554df78ff97daefa7011559c00adc44fa10ea
|
refs/heads/master
| 2022-12-12T03:12:10.744727
| 2020-09-11T06:15:42
| 2020-09-11T06:15:42
| 288,607,364
| 0
| 0
|
Apache-2.0
| 2020-08-19T01:54:13
| 2020-08-19T01:54:12
| null |
UTF-8
|
Python
| false
| false
| 1,535
|
py
|
from spaceone.inventory.libs.schema.dynamic_field import TextDyField, DateTimeDyField, SearchField
from spaceone.inventory.libs.schema.resource import CloudServiceTypeResource, CloudServiceTypeResponse, \
CloudServiceTypeMeta
cst_secret = CloudServiceTypeResource()
cst_secret.name = 'Secret'
cst_secret.provider = 'aws'
cst_secret.group = 'SecretsManager'
cst_secret.labels = ['Security']
cst_secret.tags = {
'spaceone:icon': 'https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/aws/AWS-Secrets-Manager.svg',
'spaceone:is_major': 'true',
}
cst_secret._metadata = CloudServiceTypeMeta.set_meta(
fields=[
TextDyField.data_source('Name', 'data.name'),
TextDyField.data_source('Description', 'data.description'),
DateTimeDyField.data_source('Last Retrieved', 'data.last_accessed_date'),
],
search=[
SearchField.set(name='Name', key='data.name'),
SearchField.set(name='ARN', key='data.arn'),
SearchField.set(name='Last Changed Time', key='data.last_changed_date', data_type='datetime'),
SearchField.set(name='Last Accessed Time', key='data.last_accessed_date', data_type='datetime'),
SearchField.set(name='Rotation Enabled', key='data.rotation_enabled', data_type='boolean'),
SearchField.set(name='Region', key='data.region_name'),
SearchField.set(name='AWS Account ID', key='data.account_id'),
]
)
CLOUD_SERVICE_TYPES = [
CloudServiceTypeResponse({'resource': cst_secret}),
]
|
[
"bluese@megazone.com"
] |
bluese@megazone.com
|
c51f6f32ab60667f3221123353d8004395f8d50b
|
7585c77f49d4a3643e4740b2ceae081c20fc4183
|
/example06-06-01.py
|
06ef28969f2a708ee44808db2e680530b77f4953
|
[] |
no_license
|
Minari766/study_python
|
c4243df47f23e8fda5bcdf16b65f3b3af97f888c
|
b1e48e750126f377a15f8be8b3c2547687416ec4
|
refs/heads/master
| 2022-12-15T10:29:05.306690
| 2020-09-03T13:05:21
| 2020-09-03T13:05:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,668
|
py
|
# coding:utf-8
import tkinter as tk
import tkinter.messagebox as tmsg
import random
#ボタンがクリックされたときの処理
def ButtonClick():
b = editbox1.get() #テキスト入力欄に入力された文字列を取得。変数.getメソッドを使用
#Lesson 5-4のプログラム。4桁の数字かどうかを判定
#4桁の数字かどうかを判断する
isok = False
if len(b)!= 4:
tmsg.showerror("エラー","4桁の数字を入れてください")
else:
kazuok = True
for i in range(4):
if (b[i] < "0") or (b[i] > "9"):
tmsg.showerror("エラー", "数字ではありません")
kazuok = False
break
if kazuok:
isok = True
if isok:
#4桁の数字だった場合。ヒットを判定
hit = 0
for i in range(4):
if a[i] == int(b[i]):
hit = hit +1
#ブローを判定
blow = 0
for j in range(4):
for i in range(4):
if (int(b[j]) == a[i]) and (a[i] != int(b[i])) and (a[j] != int(b[j])):
blow = blow + 1
break
#ヒットが4なら正解となり、終了
if hit == 4:
tmsg.showinfo("正解!", "おめでとうございます!")
#終了
root.destroy()
else: #ヒット数とブロー数を表示。
rirekibox.insert(tk.END, b + " /H:" + str(hit) + " B:" + str(blow) + "\n")#ウィンドウ向けに新たに調整した部分
#メインのプログラム
#予めランダムな4つの数字を設定
a = [random.randint(0, 9),
random.randint(0, 9),
random.randint(0, 9),
random.randint(0, 9)]
#ウィンドウを作る
root = tk.Tk()
root.geometry("600x400")
root.title("数当てゲーム") #ウィンドウを作る
#履歴表示のテキストボックスを作る
rirekibox = tk.Text(root, font = ("Meiryo UI", 14))
rirekibox.place(x=400, y=0, width=200, height=400)
#ラベルを作る
label1 = tk.Label(root, text="数を入力してください", font=("Meiryo UI", 14)) #文章を入れる
label1.place(x = 20, y = 20) #変数の入力欄を動かす
#テキストボックス(プレイヤーの入力欄)を作る
editbox1 = tk.Entry(width =4, font = ("Meiryo UI", 28)) #テキスト入力欄を作る
editbox1.place(x = 160, y = 20)
#ボタンを作る
button1 = tk.Button(root, text = "チェック", font = ("Meiryo UI", 14), command = ButtonClick)
button1.place(x = 240, y = 20)
#ウィンドウを表示する
root.mainloop() #ウィンドウを表示する
|
[
"mina3.ryu0728@gmail.com"
] |
mina3.ryu0728@gmail.com
|
afc4588addf41ced43e5b3c252a496aba4eb81b9
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_116/1400.py
|
835aa64a29d9a0037854dec8a6d7fb866cca78a5
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
'''Qualification Round Problem A. Speaking in Tongues'''
import sys
SIZE = 4
def checkLine( line ):
o = 0
x = 0
for cell in line:
if cell == 'O':
o += 1
elif cell == 'X':
x += 1
elif cell == '.':
return None
if o == 0:
return 'X won'
elif x == 0:
return 'O won'
else:
return None
def checkResult( board ):
# horizontal
for line in board:
result = checkLine( line )
if result is not None:
return result
# vertical
for i in range( SIZE ):
line = ( row[ i ] for row in board )
result = checkLine( line )
if result is not None:
return result
# diagonal
line = ( row[ i ] for i, row in enumerate( board ) )
result = checkLine( line )
if result is not None:
return result
line = ( row[ SIZE - i - 1 ] for i, row in enumerate( board ) )
result = checkLine( line )
if result is not None:
return result
# completion-check
for line in board:
if line.find( '.' ) >= 0:
return 'Game has not completed'
return 'Draw'
def main( input ):
count = int( input.readline() )
for index in range( 1, count + 1 ):
board = []
for rowIndex in range( SIZE ):
line = input.readline().strip()
board.append( line )
input.readline()
result = checkResult( board )
print( 'Case #{}: {}'.format( index, result ) )
main( sys.stdin )
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
65f4ae8993271aeb836d270b7b7b32681944b932
|
46f8860e0e8d4252ad85d2121bb2387c74287a14
|
/problem/p_1346_check_if_N_and_its_double_exist/solutions.py
|
243895d2c72cb967c256c1796b9de2a4eb65c7cd
|
[] |
no_license
|
Lee-W/leetcode
|
2a11c853cf0a7f8bca39f94f93cc75401e5a1afa
|
b66e5a6016525bec98e7865d6e31f1dc9f0b4be6
|
refs/heads/master
| 2023-09-01T15:52:56.570276
| 2022-08-02T07:22:11
| 2022-08-02T07:22:11
| 180,076,811
| 3
| 1
| null | 2023-08-11T19:57:00
| 2019-04-08T05:35:55
|
Python
|
UTF-8
|
Python
| false
| false
| 370
|
py
|
from typing import List
class Solution:
def checkIfExist(self, arr: List[int]) -> bool:
for m_i, m in [(num_i, num) for num_i, num in enumerate(arr) if not num % 2]:
for n_i, num in enumerate(arr):
if m_i == n_i:
continue
if num * 2 == m:
return True
return False
|
[
"weilee.rx@gmail.com"
] |
weilee.rx@gmail.com
|
976e54ad71a17887bafad148b1c31dc652196bf7
|
ac4b9385b7ad2063ea51237fbd8d1b74baffd016
|
/.history/s1_3_getHtml_20210209182455.py
|
29f07839537a635652c10e242644034e7aa175eb
|
[] |
no_license
|
preethanpa/ssoemprep
|
76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f
|
ce37127845253c768d01aeae85e5d0d1ade64516
|
refs/heads/main
| 2023-03-09T00:15:55.130818
| 2021-02-20T06:54:58
| 2021-02-20T06:54:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,757
|
py
|
# This module is called from 3R Automation Component.
import os
import sys
# pdftotree is available as part of the virtual environment for 3R Python processing
import pdftotree
import json
from pprint import pprint
import pdfminer
import ocr_extract as imgpdf
from utils.ocr.handle_image import *
# pdf_doc = json.loads(sys.argv[1])['doc_name']
# html_path = json.loads(sys.argv[1])['html_path']
# Use the following for testing
pdf_doc = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/pdf/Sri_khyati_CV.pdf'
html_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/html/Sri_khyati_CV.html'
def create_hocr(pdf_doc='', html_path='', model_path='./model/model.pkl'):
return pdftotree.parse(pdf_doc, html_path=html_path, model_type=None, model_path=model_path, visualize=False)
create_hocr_output = None
try:
create_hocr_output = create_hocr(pdf_doc=pdf_doc, html_path=html_path)
except pdfminer.pdfparser.PDFSyntaxError as pdfException:
create_hocr_output = pdfException
# Use the following for testing non PDF files
# print(f'{os.path.basename(pdf_doc).split(".")[0]+".pdf"}')
# print(f'{os.path.abspath(pdf_doc).split(".")[0]+".pdf"}')
# try:
# # imgpdf.convert_image_to_pdf(pdf_doc, os.path(pdf_doc)+os.path.basename(pdf_doc).split('.')[0]+'.pdf')
# imgpdf.convert_image_to_pdf(pdf_doc, os.path.dirname(pdf_doc), os.path.abspath(pdf_doc).split(".")[0]+".pdf")
# except Exception as exc:
# print(exc)
# Output of "print" statement is passed to the calling program
proc_status = "OK" if create_hocr_output == None else "Not a PDF document. Please provide a PDF file for processing."
json_out = {"pdf_doc": pdf_doc, "process_status": proc_status}
print(json_out)
|
[
"{abhi@third-ray.com}"
] |
{abhi@third-ray.com}
|
9b245ffc9a5b5714555bcedb7a015c8e4e6c6d80
|
8ec05f1d5800e0b98afa92367f74bed9f95e0ee9
|
/venv/Scripts/rst2man.py
|
9ef84bd9b4cc305a5aa6ffa93a6cc0aafa425c88
|
[] |
no_license
|
ayanchyaziz123/ecom-final-year-project
|
28362922a88c71aba29d22f29c7f34e1cad6189f
|
d21fdd885b3b768935dc29171c5a6761c4b88e9c
|
refs/heads/master
| 2023-08-12T17:10:23.826744
| 2021-10-06T12:36:17
| 2021-10-06T12:36:17
| 405,435,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
#!f:\proshop_django-master\venv\scripts\python.exe
# Author:
# Contact: grubert@users.sf.net
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
|
[
"aaziz9642@gmail.com"
] |
aaziz9642@gmail.com
|
769ca7862fc492c3162d45ff8ce8222afda2829c
|
f4b60f5e49baf60976987946c20a8ebca4880602
|
/lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/hvs/adj.py
|
fb48fc6714990d8e24798555483c62576a4eb77b
|
[] |
no_license
|
cqbomb/qytang_aci
|
12e508d54d9f774b537c33563762e694783d6ba8
|
a7fab9d6cda7fadcc995672e55c0ef7e7187696e
|
refs/heads/master
| 2022-12-21T13:30:05.240231
| 2018-12-04T01:46:53
| 2018-12-04T01:46:53
| 159,911,666
| 0
| 0
| null | 2022-12-07T23:53:02
| 2018-12-01T05:17:50
|
Python
|
UTF-8
|
Python
| false
| false
| 7,640
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Adj(Mo):
"""
The connectivity to an external network.
"""
meta = ClassMeta("cobra.model.hvs.Adj")
meta.moClassName = "hvsAdj"
meta.rnFormat = "adj-[%(nbrKey)s]"
meta.category = MoCategory.REGULAR
meta.label = "Adjacency"
meta.writeAccessMask = 0x5
meta.readAccessMask = 0x5
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.hvs.RtNicAdj")
meta.childClasses.add("cobra.model.hvs.RsLsNode")
meta.childNamesAndRnPrefix.append(("cobra.model.hvs.RtNicAdj", "rtcompNicAdj-"))
meta.childNamesAndRnPrefix.append(("cobra.model.hvs.RsLsNode", "rsLsNode"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.comp.Hv")
meta.rnPrefixes = [
('adj-', True),
]
prop = PropMeta("str", "addr", "addr", 234, PropCategory.REGULAR)
prop.label = "Neighbor Address"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("addr", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "configIssues", "configIssues", 238, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("missing-mgmtip", "management-address-is-not-configured-on-loosenode-(unmanaged-switch)", 1)
prop._addConstant("none", "none", 0)
meta.props.add("configIssues", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "ifId", "ifId", 236, PropCategory.REGULAR)
prop.label = "Interface Id"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("ifId", prop)
prop = PropMeta("str", "ifName", "ifName", 235, PropCategory.REGULAR)
prop.label = "Neighbor Interface"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("ifName", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 13777, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 229, PropCategory.REGULAR)
prop.label = "Name"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 16)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nbrDesc", "nbrDesc", 232, PropCategory.REGULAR)
prop.label = "Neighbor Description"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("nbrDesc", prop)
prop = PropMeta("str", "nbrId", "nbrId", 231, PropCategory.REGULAR)
prop.label = "Neighbor Identifier"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("nbrId", prop)
prop = PropMeta("str", "nbrKey", "nbrKey", 233, PropCategory.REGULAR)
prop.label = "Neighbor Key"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 512)]
meta.props.add("nbrKey", prop)
prop = PropMeta("str", "nbrName", "nbrName", 230, PropCategory.REGULAR)
prop.label = "Name"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 16)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("nbrName", prop)
prop = PropMeta("str", "nbrType", "nbrType", 237, PropCategory.REGULAR)
prop.label = "Neighbor Type, Leaf or Loosenode"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "leaf"
prop._addConstant("leaf", "leaf", 1)
prop._addConstant("loosenode", "loosenode", 2)
meta.props.add("nbrType", prop)
prop = PropMeta("str", "proto", "proto", 228, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("CDP", "cdp", 2)
prop._addConstant("LLDP", "lldp", 1)
prop._addConstant("none", "none", 0)
meta.props.add("proto", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "nbrKey"))
getattr(meta.props, "nbrKey").needDelimiter = True
def __init__(self, parentMoOrDn, nbrKey, markDirty=True, **creationProps):
namingVals = [nbrKey]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"collinsctk@qytang.com"
] |
collinsctk@qytang.com
|
ef356e1edaae73bf881c1937ec46f1ac044ffbe1
|
ec56e3a57fb71f3fc4f19b168d3fa34cebb781ab
|
/tcga_encoder/models/regularizers.py
|
7326b08e9ac4b6ace69433dc138071b247dc3932
|
[
"MIT"
] |
permissive
|
tedmeeds/tcga_encoder
|
64d60148b0c69092cb499abec22618e740ba8b6c
|
805f9a5bcc422a43faea45baa0996c88d346e3b4
|
refs/heads/master
| 2021-01-13T04:50:42.643743
| 2017-08-25T13:09:38
| 2017-08-25T13:09:38
| 78,621,753
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,136
|
py
|
import tensorflow as tf
from scipy import stats
def Drop( X, keep_rate ):
keep_mask = stats.bernoulli( keep_rate).rvs( X.shape )
Y = X*keep_mask + (1-keep_mask)*0
return Y
def DropColumns( X, cols2drop ):
Y = X.copy()
Y[:,cols2drop] = 0
return Y
class Regularizer(object):
def __init__( self, lam_value ):
self.lam = lam_value
def Apply( self, w ):
raise NotImplemented, "Must derive class"
class L2Regularizer(Regularizer):
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.square(w) )
class L1Regularizer(Regularizer):
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.abs(w) )
class LqRegularizer(Regularizer):
def __init__( self, lam_value, q ):
self.lam = lam_value
self.q = q
self.eps = 1e-6
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.pow( tf.abs(w), self.q ) + self.eps )
class SortedL1RegularizerAxis2(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.abs(w[:,:,1:]-w[:,:,:-1]) + self.eps )
class SortedL1RegularizerAxis1(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.abs(w[:,1:,:]-w[:,:-1,:]) + self.eps )
class SortedL1RegularizerAxis0(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.abs(w[1:,:,:]-w[:-1,:,:]) + self.eps )
class SortedAbsL1RegularizerAxis2(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
aw = tf.abs(w)
return self.lam*tf.reduce_sum( tf.abs(aw[:,:,1:]-aw[:,:,:-1]) + self.eps )
class SortedAbsL1RegularizerAxis1(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
aw = tf.abs(w)
return self.lam*tf.reduce_sum( tf.abs(aw[:,1:,:]-aw[:,:-1,:]) + self.eps )
class SortedAbsL1RegularizerAxis0(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
aw = tf.abs(w)
return self.lam*tf.reduce_sum( tf.abs(aw[1:,:,:]-aw[:-1,:,:]) + self.eps )
# ------------------
class SortedL2RegularizerAxis2(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.square(w[:,:,1:]-w[:,:,:-1]) + self.eps )
class SortedL2RegularizerAxis1(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.square(w[:,1:,:]-w[:,:-1,:]) + self.eps )
class SortedL2RegularizerAxis0(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
return self.lam*tf.reduce_sum( tf.square(w[1:,:,:]-w[:-1,:,:]) + self.eps )
class SortedAbsL2RegularizerAxis2(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
aw = tf.abs(w)
return self.lam*tf.reduce_sum( tf.square(aw[:,:,1:]-aw[:,:,:-1]) + self.eps )
class SortedAbsL2RegularizerAxis1(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
aw = tf.abs(w)
return self.lam*tf.reduce_sum( tf.square(aw[:,1:,:]-aw[:,:-1,:]) + self.eps )
class SortedAbsL2RegularizerAxis0(Regularizer):
def __init__( self, lam_value ):
self.lam = lam_value
#self.q = q
self.eps = 1e-6
def Apply( self, w ):
aw = tf.abs(w)
return self.lam*tf.reduce_sum( tf.square(aw[1:,:,:]-aw[:-1,:,:]) + self.eps )
|
[
"tmeeds@gmail.com"
] |
tmeeds@gmail.com
|
ba780aaf42ac35517e278f877cdb859dab20abd9
|
0bf6ecbdebc7424a8946b29127d55c5bc1e7442e
|
/wetLab/migrations/0017_auto_20161107_1626.py
|
a4121e0b72612c717750f8d9b822000923594343
|
[] |
no_license
|
dekkerlab/cLIMS
|
2351a9c81f3e3ba982e073500a4a5cf2fd38ed51
|
e76731032a5707027b53746a8f2cc9b01ab7c04e
|
refs/heads/master
| 2021-03-27T06:28:49.718401
| 2017-10-10T19:22:33
| 2017-10-10T19:22:33
| 71,837,345
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-07 16:26
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wetLab', '0016_auto_20161107_1556'),
]
operations = [
migrations.RenameField(
model_name='treatmentrnai',
old_name='treatmentRnai_target_sequence',
new_name='treatmentRnai_targetNucleotide_seq',
),
]
|
[
"nanda@ankitas-mbp.ad.umassmed.edu"
] |
nanda@ankitas-mbp.ad.umassmed.edu
|
77bf44bd368a70e3933cfcf5f32ec64eab7ecdd9
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/detail_placement_view_service/transports/base.py
|
85ed85c711e8a89a9ba736c1cc3efae58586a2a2
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185
| 2021-08-29T20:39:47
| 2021-08-29T20:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,807
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v7.resources.types import detail_placement_view
from google.ads.googleads.v7.services.types import detail_placement_view_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class DetailPlacementViewServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for DetailPlacementViewService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_detail_placement_view: gapic_v1.method.wrap_method(
self.get_detail_placement_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_detail_placement_view(self) -> typing.Callable[
[detail_placement_view_service.GetDetailPlacementViewRequest],
detail_placement_view.DetailPlacementView]:
raise NotImplementedError
__all__ = (
'DetailPlacementViewServiceTransport',
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
220ca96c3e7cbb881449a5efc32e58889f288fbc
|
239eafa1bdf684ae8b8663c1f34f8f60df5f523e
|
/20180305_emeson_rnaediting/findWord.py
|
98024d4dce24ad435493433bc73ad27ca88798c9
|
[
"Apache-2.0"
] |
permissive
|
shengqh/vangard
|
83069b0e2ff2951a8afe6a0e70ec542bb071d2f0
|
8ee611d7eaab2a8fac37aa756921fee2e195c86a
|
refs/heads/master
| 2021-06-29T00:42:43.953492
| 2019-01-02T17:50:15
| 2019-01-02T17:50:15
| 114,392,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,173
|
py
|
import argparse
import sys
import logging
import os
import gzip
sys.path.insert(0, '/home/shengq2/program/ngsperl/lib/RNAediting')
from WordRoleUtils import WordRole
roles = {}
roles["Turnee_word"] = [
WordRole("TurneeA", "AACCAT", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeB", "AACGTC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeC", "AACTCA", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeD", "AAGACT", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeE", "ACTATT", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeH", "ATATGA", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeI", "CAATAT", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeJ", "CCTCGG", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeK", "CGCTTC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeN", "GCAGAA", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeP", "GCGTCC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeQ", "GGAGTC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeT", "GTTGCC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeU", "TACCGG", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeV", "TCAGCC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeW", "TTCGGC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
WordRole("TurneeX", "TTGACC", 6, "GCTGGACCGGTATGTAGCA", 0.9, 25, "RTRCGTRRTCCTRTTGAGCATAGCCGGTTCAATTCGCGGACTAAGGCCATCATGAA", ['A', 'G']),
]
logger = logging.getLogger('findWord')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
wordCounts = {}
discardCounts = {}
rootFolder = "/scratch/VANGARD/20180305_turnee_rnaediting/"
fileName = rootFolder + "1364-TM-1_S1_R1_001.fastq.gz"
gzipped = fileName.endswith(".gz")
if gzipped:
f = gzip.open(fileName, 'rt')
else:
f = open(fileName, 'r')
def findWord(sequence, roles, wordCounts):
for userName, userRoles in roles.iteritems():
if not userName in wordCounts:
wordCounts[userName] = {}
userCounts = wordCounts[userName]
if not userName in discardCounts:
discardCounts[userName] = {}
userDiscardCounts = discardCounts[userName]
for role in userRoles:
curWord = role.getWord(sequence)
if curWord.discarded:
if role.SampleName in userDiscardCounts:
userDiscardCounts[role.SampleName] = userDiscardCounts[role.SampleName] + 1
else:
userDiscardCounts[role.SampleName] = 1
return(None)
if len(curWord.word) > 0:
if not role.SampleName in userCounts:
userCounts[role.SampleName] = {}
sampleCounts = userCounts[role.SampleName]
if not curWord.word in sampleCounts:
sampleCounts[curWord.word] = 1
else:
sampleCounts[curWord.word] = sampleCounts[curWord.word] + 1
return(role)
return(None)
fastqMap = {}
try:
count = 0
while True:
header = f.readline()
if '' == header:
break
if not header.startswith("@"):
continue
sequence = f.readline().strip()
line3 = f.readline()
line4 = f.readline()
role = findWord(sequence, roles, wordCounts)
if role != None:
if not role.SampleName in fastqMap:
fastqFile = rootFolder + role.SampleName + ".fastq.gz"
fastqMap[role.SampleName] = gzip.open(fastqFile, "wt")
sw = fastqMap[role.SampleName]
sw.write("%s\n" % header.strip())
sw.write("%s\n" % sequence)
sw.write("%s\n" % line3.strip())
sw.write("%s\n" % line4.strip())
count = count+1
if count % 100000 == 0:
logger.info("%d reads processed" % count)
#if count % 20000 == 0:
# break
finally:
f.close()
logger.info("total %d reads processed" % count)
for sw in fastqMap.values():
sw.close()
for userName, userCounts in wordCounts.iteritems():
samples = sorted(userCounts.iterkeys())
totalCounts = {sample:sum(userCounts[sample].values()) for sample in samples}
words = sorted(set( val for dic in userCounts.values() for val in dic.keys()))
with open(rootFolder + userName + ".count.txt", "w") as swCount:
with open(rootFolder + userName + ".perc.txt", "w") as swPerc:
header = "Word\t%s\n" % "\t".join(samples)
swCount.write(header)
swPerc.write(header)
for word in words:
swCount.write(word)
swPerc.write(word)
for sample in samples:
sampleCounts = userCounts[sample]
if word in sampleCounts:
swCount.write("\t%d" % sampleCounts[word])
swPerc.write("\t%.4f" % (sampleCounts[word] * 1.0 / totalCounts[sample]))
else:
swCount.write("\t0")
swPerc.write("\t0")
swCount.write("\n")
swPerc.write("\n")
swCount.write("TotalWord\t%s\n" % "\t".join([str(totalCounts[sample]) for sample in samples]))
swCount.write("DiscardRead\t%s\n" % "\t".join([str(discardCounts[userName][sample]) for sample in samples]))
gPerc = {}
for sample in samples:
gPerc[sample] = {}
sampleCounts = userCounts[sample]
for word in sampleCounts:
maxChrInd = len(word)
wCount = sampleCounts[word]
for chrInx in range(0, len(word)) :
if chrInx in gPerc[sample]:
chrMap = gPerc[sample][chrInx]
else:
chrMap = {'G':0, 'R':0}
gPerc[sample][chrInx] = chrMap
if word[chrInx] == 'G':
chrMap['G'] = chrMap['G'] + wCount
else:
chrMap['R'] = chrMap['R'] + wCount
with open(rootFolder + userName + ".G.txt", "w") as swG:
swG.write("Word\t%s\n" % "\t".join(samples))
for chrInd in range(0, maxChrInd):
word = 'R' * chrInd + 'G' + 'R' *(maxChrInd - chrInd - 1)
swG.write(word)
for sample in samples:
chrMap = gPerc[sample][chrInd]
perc = chrMap['G'] * 1.0 / (chrMap['G'] + chrMap['R'])
swG.write('\t%.4f' % perc)
swG.write('\n')
logger.info("done")
|
[
"shengqh@gmail.com"
] |
shengqh@gmail.com
|
7d62f9b9485d0a086b982af2224380bc8381d6ae
|
52e2064daa678499d3d48f5704d68c2eeb549156
|
/Facturacion/urls.py
|
f1fdb7159af18a2548897044a26bb76ac5ae4dcd
|
[] |
no_license
|
matheo97/ERP
|
159723cb42fba179561834d89c45af00163173df
|
7ff4f452c6c84c85759a32351fc25cc111dd0c1f
|
refs/heads/master
| 2021-11-12T12:00:55.403003
| 2019-03-06T14:58:02
| 2019-03-06T14:58:02
| 174,161,863
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,114
|
py
|
from django.conf.urls import url, include
from .views import *
urlpatterns = [
url(r'^generar_cotizacion/$', generarCotizacion.as_view(), name="generar_cotizacion"),
url(r'^generar_factura/$', generarFactura.as_view(), name="generar_factura"),
url(r'^editar_factura/(?P<id_factura>\w+)$', editarFactura.as_view(), name="editar_factura"),
url(r'^generar_factura_cotizacion/(?P<id_cotizacion>\d+)$', generarFacturadeCotizacion.as_view(), name="generar_factura_cotizacion"),
url(r'^generar_remision/$', generarRemision.as_view(), name="generar_remision"),
url(r'^generar_cotizacion/eliminar_presentacion_carrito$', eliminarPresentacionCarrito.as_view(), name="eliminar_presentacion_carrito"),
url(r'^generar_factura/eliminar_presentacion_carrito$', eliminarPresentacionCarrito.as_view(), name="eliminar_presentacion_carrito"),
url(r'^listar_facturacion/(?P<id_cliente>.+)$', listarFacturacion.as_view(), name="listar_facturacion"),
url(r'^listar_facturas/(?P<id_cliente>.+)$', listar_facturas, name="listar_facturas"),
url(r'^listar_cotizaciones/(?P<id_cliente>.+)$', listar_cotizaciones, name="listar_cotizaciones"),
url(r'^listar_remisiones/(?P<id_cliente>.+)$', listar_remisiones, name="listar_remisiones"),
url(r'^generar_cotizacion_pdf/(?P<id_cotizacion>\d+)$', generarCotizacionPDF.as_view(), name="generarCotizacion_pdf"),
url(r'^generar_factura_pdf/(?P<id_factura>\w+)$', generarFacturaPDF.as_view(), name="generarFactura_pdf"),
url(r'^generar_factura/validar_consecutivo/', validar_consecutivo, name="validar_consecutivo"),
url(r'^generar_factura_cotizacion/validar_consecutivo/', validar_consecutivo, name="generar_factura_cotizacion_1"),
url(r'^generar_remision_pdf/(?P<id_remision>\d+)$', generarRemisionPDF.as_view(), name="generarRemision_pdf"),
url(r'^ver_factura/(?P<id_factura>\d+)$', verFactura.as_view(), name="ver_factura"),
url(r'^limpiar_carrito/(?P<documento>\w+)$', limpiar_carrito, name="limpiar_carrito"),
url(r'^eliminar_factura/$', eliminar_factura.as_view(), name="eliminar_factura"),
]
|
[
"mateo.salazar@correounivalle.edu.co"
] |
mateo.salazar@correounivalle.edu.co
|
5752a80b9c4d569351b2dea27135216d006cfe5a
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_focusing.py
|
3bfe426f06c2ae27b809740c8cfc639425e5f31f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
#calss header
class _FOCUSING():
def __init__(self,):
self.name = "FOCUSING"
self.definitions = focus
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['focus']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
81652859f613988bdd9753ef3a50a151f8e9cdf2
|
462682b3b29304b561eaea3833c29e84d1e95c0e
|
/PythonLoops/03-Patterns.py
|
1d0b2cb5c40e17317ac93192942cee55c45f54a2
|
[] |
no_license
|
ravi4all/PythonDecMorning
|
4452b8340ce0b4ab067bd769725c5a6f831b7f45
|
1e20da3c90d407dbef714770ad54e72f16be0eec
|
refs/heads/master
| 2021-09-02T11:01:28.686860
| 2018-01-02T05:05:06
| 2018-01-02T05:05:06
| 113,133,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
Python 3.6.2 (v3.6.2:5fd33b5, Jul 8 2017, 04:57:36) [MSC v.1900 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> a = 2
>>> a = "*"
>>> print(a*5)
*****
>>> for i in range(1,6):
print('*' * ((i*2)-1)))
SyntaxError: invalid syntax
>>> for i in range(1,6):
print('*' * ((i*2)-1))
*
***
*****
*******
*********
>>> for i in range(1,6):
print('*' * i)
*
**
***
****
*****
>>> for i in range(1,6):
print(' ' * (6-i) + '*' * (2*i + 1))
***
*****
*******
*********
***********
>>> for i in range(1,6):
print(' ' * (6-i) + '*' * (2*i - 1))
*
***
*****
*******
*********
>>> for i in reversed(range(1,6)):
print(' ' * (6-i) + '*' * (2*i - 1))
*********
*******
*****
***
*
>>>
|
[
"noreply@github.com"
] |
ravi4all.noreply@github.com
|
50c7515de17781b2aa3264982c36d11d47c5e5bd
|
4f408d65db60911f56110c351cb3b64835e0c5fb
|
/caffe2/python/operator_test/video_input_op_test.py
|
f447305f341c0f4a1633c9d321e6e10d3adb13eb
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
KeyKy/caffe2_SSD
|
a02c065aef2dbcfd00faae8be0440d7a4ff0fb76
|
7235688ea5e212dbe8609d780dd94c8c7d9fef54
|
refs/heads/master
| 2021-09-18T14:36:11.247427
| 2018-07-10T09:59:35
| 2018-07-10T09:59:35
| 89,928,918
| 8
| 5
| null | 2018-07-27T02:14:38
| 2017-05-01T14:04:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,796
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import shutil
import lmdb
import unittest
import tempfile
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, cnn
import numpy as np
class VideoInputOpTest(unittest.TestCase):
def create_a_list(self, output_file, line, n):
# create a list that repeat a line n times
# used for creating a list file for simple test input
with open(output_file, 'w') as file:
for _i in range(n):
file.write(line)
def create_video_db(self, list_file, output_file, use_list=False):
# Write to lmdb database...
LMDB_MAP_SIZE = 1 << 40 # MODIFY
env = lmdb.open(output_file, map_size=LMDB_MAP_SIZE)
total_size = 0
file_name = []
start_frame = []
label = []
index = 0
with env.begin(write=True) as txn:
with open(list_file, 'r') as data:
for line in data:
p = line.split()
file_name = p[0]
start_frame = int(p[1])
label = int(p[2])
if not use_list:
with open(file_name, mode='rb') as file:
video_data = file.read()
else:
video_data = file_name
tensor_protos = caffe2_pb2.TensorProtos()
video_tensor = tensor_protos.protos.add()
video_tensor.data_type = 4 # string data
video_tensor.string_data.append(video_data)
label_tensor = tensor_protos.protos.add()
label_tensor.data_type = 2
label_tensor.int32_data.append(label)
start_frame_tensor = tensor_protos.protos.add()
start_frame_tensor.data_type = 2
start_frame_tensor.int32_data.append(start_frame)
txn.put(
'{}'.format(index).encode('ascii'),
tensor_protos.SerializeToString()
)
index = index + 1
total_size = total_size + len(video_data) + sys.getsizeof(int)
return total_size
def test_read_from_db(self):
random_label = np.random.randint(0, 100)
VIDEO = "/mnt/vol/gfsdataswarm-oregon/users/trandu/sample.avi"
temp_list = tempfile.NamedTemporaryFile(delete=False).name
line_str = '{} 0 {}\n'.format(VIDEO, random_label)
self.create_a_list(
temp_list,
line_str,
16)
video_db_dir = tempfile.mkdtemp()
self.create_video_db(temp_list, video_db_dir)
model = cnn.CNNModelHelper(name="Video Loader from LMDB")
reader = model.CreateDB(
"sample",
db=video_db_dir,
db_type="lmdb")
model.VideoInput(
reader,
["data", "label"],
name="data",
batch_size=10,
width=171,
height=128,
crop=112,
length=8,
sampling_rate=2,
mirror=1,
use_local_file=0,
temporal_jitter=1)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
data = workspace.FetchBlob("data")
label = workspace.FetchBlob("label")
np.testing.assert_equal(label, random_label)
np.testing.assert_equal(data.shape, [10, 3, 8, 112, 112])
os.remove(temp_list)
shutil.rmtree(video_db_dir)
if __name__ == "__main__":
unittest.main()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
bd7bf40d30fa7f4cbeda69eb2b1844a862c81a84
|
341c7c9a8a8482c02c8427db3560b85427f1a7df
|
/regexlist.py
|
9b7d231832c0ede1093ab8c3878927849cda8f23
|
[] |
no_license
|
amitks815/pycode
|
7d20df97ce423e0a521658c8d9e1929ed04a0992
|
0d03598cd09821f38284fd48510fae236348dc29
|
refs/heads/master
| 2020-04-08T19:21:19.193705
| 2018-11-29T10:52:58
| 2018-11-29T10:52:58
| 159,651,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
import re
list1=[]
with open ("file.txt") as f :
#content=f.readlines()
for line in f.readlines():
list1.append(line)
print(list1)
pattern=(r'[A-za-z0-9.]+@[A-za-z0-9]+.[A-za-z]+')
for val in list1:
matches=re.search(pattern,val)
if matches:
print(matches.group(0))
|
[
"42370714+amitks815@users.noreply.github.com"
] |
42370714+amitks815@users.noreply.github.com
|
e3e353b9df75f077853dcb92e5f69f8cec164206
|
13a7859b59e401c83e12fd97ab93db9577f87dee
|
/forH4G/h4gFittingTools/MyCMSStyle.py
|
d2afed6baf568e36ea0afd8117ff8d3466d3b9fc
|
[] |
no_license
|
bmarzocc/Scripts
|
4a59527c3a8282d1dce2b76ee642d953743ed16c
|
6cfc8c3bf10132a1ee55bfcca823c2cf711d783a
|
refs/heads/master
| 2023-03-22T13:40:43.987502
| 2021-03-09T09:15:50
| 2021-03-09T09:15:50
| 255,868,174
| 0
| 0
| null | 2020-04-15T09:29:38
| 2020-04-15T09:29:37
| null |
UTF-8
|
Python
| false
| false
| 1,108
|
py
|
from ROOT import *
def SetAxisTextSizes(obj, yoff=0, ysize=1, xoff=0, xsize=1):
obj.GetYaxis().SetTitleOffset(1.1+yoff)
obj.GetYaxis().SetTitleSize(0.0425*ysize)
obj.GetYaxis().SetLabelSize(0.04*ysize)
obj.GetXaxis().SetTitleOffset(1.1+xoff)
obj.GetXaxis().SetTitleSize(0.0425*xsize)
obj.GetXaxis().SetLabelSize(0.04*xsize)
try:
obj.GetZaxis().SetTitleOffset(1.1)
obj.GetZaxis().SetTitleSize(0.0425)
obj.GetZaxis().SetLabelSize(0.04)
except AttributeError:
a=1
def SetGeneralStyle():
gStyle.SetFrameLineWidth(2)
def SetPadStyle(obj):
obj.SetTicky()
obj.SetTickx()
def DrawCMSLabels(obj, lumi=''):
pad = obj.cd()
l = pad.GetLeftMargin()
t = pad.GetTopMargin()
r = pad.GetRightMargin()
b = pad.GetBottomMargin()
lat = TLatex()
lat.SetTextSize(0.045)
lat.SetTextAlign(11)
lat.SetTextFont(42)
cmsTag = "#bf{CMS}"
lumiTag = lumi+' fb^{-1} (13 TeV)'
if lumi == '':
cmsTag = "#bf{CMS} #it{Simulation}"
lumiTag = '(13 TeV)'
lat.DrawLatexNDC(l+0.01, 1-t+0.02, cmsTag)
lat.SetTextAlign(31)
lat.DrawLatexNDC(1-r-0.001, 1-t+0.02, lumiTag)
|
[
"wamorkar.t@husky.neu.edu"
] |
wamorkar.t@husky.neu.edu
|
b32c05a30d39745a342625d64163bd4819dcf565
|
6a55fc908497a0d4ada6eae74d64a057b609c261
|
/model-optimizer/extensions/front/tf/InterpolateTransposes.py
|
91616c0508d912fc5ea71d727d2ee4c924e1408c
|
[
"Apache-2.0"
] |
permissive
|
anton-potapov/openvino
|
9f24be70026a27ea55dafa6e7e2b6b18c6c18e88
|
84119afe9a8c965e0a0cd920fff53aee67b05108
|
refs/heads/master
| 2023-04-27T16:34:50.724901
| 2020-06-10T11:13:08
| 2020-06-10T11:13:08
| 271,256,329
| 1
| 0
|
Apache-2.0
| 2021-04-23T08:22:48
| 2020-06-10T11:16:29
| null |
UTF-8
|
Python
| false
| false
| 3,105
|
py
|
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from mo.front.common.partial_infer.utils import int64_array
from mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral
from mo.graph.graph import Graph, Node
from mo.middle.pattern_match import find_pattern_matches, inverse_dict
class InterpolateTranspose(FrontReplacementFromConfigFileGeneral):
"""
Delete useless transposes around ResizeNearestNeighbor op. In TF this op is working in NHWC layout,
Resample in OpenVINO working in NCHW layout. If all graph has NCHW layout we should delete transposes around
Resample: (NCHW->NHWC) -> Resample -> (NHWC -> NCHW) to run this op in NCHW without changes of layout.
"""
enabled = True
replacement_id = 'InterpolateTranspose'
graph_condition = [lambda graph: graph.graph['layout'] == 'NCHW']
pattern_nodes = [
('interpolate', {'kind': 'op', 'op': 'Interpolate'}),
('transpose_1', {'kind': 'op', 'op': 'Transpose'}),
('transpose_1_order', {'kind': 'op', 'op': 'Const',
'value': lambda x: x is not None and np.array_equal(x, int64_array([0, 2, 3, 1]))}),
('transpose_2', {'kind': 'op', 'op': 'Transpose'}),
('transpose_2_order', {'kind': 'op', 'op': 'Const',
'value': lambda x: x is not None and np.array_equal(x, int64_array([0, 3, 1, 2]))}),
]
pattern_edges = [
('transpose_1', 'interpolate', {'in': 0, 'out': 0}),
('transpose_1_order', 'transpose_1', {'in': 1, 'out': 0}),
('interpolate', 'transpose_2', {'in': 0, 'out': 0}),
('transpose_2_order', 'transpose_2', {'in': 1, 'out': 0}),
]
def transform_graph(self, graph: Graph, replacement_descriptions: dict):
matches = find_pattern_matches(graph, self.pattern_nodes, self.pattern_edges)
for match in list(matches):
inverse_match = inverse_dict(match)
interpolate = Node(graph, inverse_match['interpolate'])
transpose_1 = Node(graph, inverse_match['transpose_1'])
transpose_2 = Node(graph, inverse_match['transpose_2'])
# because we remove Transpose layers the ResizeNearestNeighbor should be updated for NCHW layout
interpolate.axes = int64_array([2, 3])
transpose_1.in_port(0).get_connection().set_destination(interpolate.in_port(0))
transpose_2.out_port(0).get_connection().set_source(interpolate.out_port(0))
graph.remove_nodes_from([transpose_1.id, transpose_2.id])
|
[
"alexey.suhov@intel.com"
] |
alexey.suhov@intel.com
|
541914f4535d123610ab11ffa73cc29570302b09
|
3add939686f188d4381ea4cc699fca285ff10a46
|
/utils/map_county_position.py
|
a5afbb71d01fb3f177dddf8f28d8cc81bdec910b
|
[] |
no_license
|
weixinl/2019MCM
|
a66fdf5ae85dc0dce1df655d2ccae85ed3b553ea
|
5caddf4b01044475d2a22097742861eb5cc698b4
|
refs/heads/master
| 2022-06-25T09:32:57.042550
| 2019-03-07T06:28:38
| 2019-03-07T06:28:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,487
|
py
|
# to get new file map a place to a position on map
import pandas as pd
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
nflis_path="../MCM_NFLIS_Data.xlsx"
nflis_df=pd.read_excel(nflis_path,sheet_name="Data")
zipcode_path='../zipcode.csv'
zipcode_df=pd.read_csv(zipcode_path)
zipcode_df=zipcode_df.rename(columns={'city':'county','state':'state','latitude':'latitude',\
'longitude':'longitude'})
#further process nflis
nflis_df=nflis_df.rename(columns={'COUNTY':'county','State':'state','FIPS_County':'fips_county',\
'FIPS_State':'fips_state','TotalDrugReportsCounty':'county_report'})
tmp_year=2017
nflis_df=nflis_df.loc[nflis_df['YYYY']==tmp_year]
nflis_df=nflis_df[['county','state','county_report']]
nflis_df=nflis_df.groupby(['county','state']).mean().reset_index()
#convert to upper
zipcode_county=zipcode_df['county']
zipcode_county=zipcode_county.str.upper()
zipcode_df['county']=zipcode_county
# print(zipcode_df)
map_df=pd.merge(nflis_df,zipcode_df,how='left',on=['county','state'])
map_df=map_df.drop_duplicates(subset=['county','state'],keep='first')
map_df=map_df.dropna(axis=0,how='any')
map_df=map_df[['county','state','latitude','longitude','county_report']]
# map_df.to_csv('county2position.csv')
longitude_list=map_df['longitude'].tolist()
longitude_list=np.array(longitude_list)
# print(longitude_list)
max_longitude=longitude_list.max()
min_longitude=longitude_list.min()
horizon_start=0
horizon_end=99
k_longitude=(horizon_end-horizon_start)/(max_longitude-min_longitude)
b_longitude=horizon_start-k_longitude*min_longitude
# print(k_longitude)
latitude_list=map_df['latitude'].tolist()
latitude_list=np.array(latitude_list)
# print(longitude_list)
max_latitude=latitude_list.max()
min_latitude=latitude_list.min()
vertical_start=0
vertical_end=99
k_latitude=(vertical_end-vertical_start)/(max_latitude-min_latitude)
b_latitude=vertical_start-k_latitude*min_latitude
img_arr=np.zeros((100,100))
for row in map_df.iterrows():
row=row[1]
tmp_longitude=row['longitude']
tmp_latitude=row['latitude']
tmp_county_report=row['county_report']
tmp_horizon=k_longitude*tmp_longitude+b_longitude
tmp_vertical=k_latitude*tmp_latitude+b_latitude
tmp_vertical=int(tmp_vertical+0.1)
tmp_horizon=int(tmp_horizon+0.1)
img_arr[tmp_horizon][tmp_vertical]=tmp_county_report
img = Image.fromarray(img_arr)
img=img.convert('L')
img.show()
img.save(str(tmp_year)+'_img.jpg')
# print(map_df)
|
[
"weixinluwx@foxmail.com"
] |
weixinluwx@foxmail.com
|
37909ffcf88ac36105ccbcdb2881775b527b6187
|
9b422078f4ae22fe16610f2ebc54b8c7d905ccad
|
/xlsxwriter/test/comparison/test_chart_size02.py
|
bb8624007d0a0803fa1ea7fec99c5b6f4a73cd6e
|
[
"BSD-2-Clause-Views"
] |
permissive
|
projectsmahendra/XlsxWriter
|
73d8c73ea648a911deea63cb46b9069fb4116b60
|
9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45
|
refs/heads/master
| 2023-07-21T19:40:41.103336
| 2023-07-08T16:54:37
| 2023-07-08T16:54:37
| 353,636,960
| 0
| 0
|
NOASSERTION
| 2021-04-01T08:57:21
| 2021-04-01T08:57:20
| null |
UTF-8
|
Python
| false
| false
| 1,360
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_size01.xlsx')
def test_create_file(self):
"""Test XlsxWriter chartarea properties."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [61355904, 61365248]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_size({'x_scale': 1.066666666, 'y_scale': 1.11111111})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
a5ed97929c86c1220ab0718c4de35447549ecd42
|
fdd9e3131ead660db9485304438993a2a249fb1f
|
/tests/test_npc_cli/test_describe/test_describe_systems_cmd.py
|
7d33b59443410bbcf0d0664b05ce152289b67f07
|
[
"MIT"
] |
permissive
|
aurule/npc
|
6807aa0723e765cb33fe5f5b49b0f579a6207153
|
2e1b2e92e2a4908d791846f184ee7e4de2f6682e
|
refs/heads/develop
| 2023-09-02T02:46:47.900892
| 2023-08-30T17:31:00
| 2023-08-30T17:31:00
| 47,045,977
| 14
| 2
|
MIT
| 2023-08-18T20:49:12
| 2015-11-29T01:40:18
|
Python
|
UTF-8
|
Python
| false
| false
| 590
|
py
|
from click.testing import CliRunner
from tests.fixtures import tmp_campaign
from npc_cli import cli
def test_shows_system_name(tmp_path):
runner = CliRunner()
with runner.isolated_filesystem(temp_dir=tmp_path):
result = runner.invoke(cli, ['describe', 'systems'])
assert "New World of Darkness" in result.output
def test_shows_current_campaign_system(tmp_campaign):
runner = CliRunner()
with runner.isolated_filesystem(temp_dir=tmp_campaign.root):
result = runner.invoke(cli, ['describe', 'systems'])
assert "Currently using" in result.output
|
[
"paige.andrews@modolabs.com"
] |
paige.andrews@modolabs.com
|
bbb794515424fcae3a62640604b057d36412e869
|
2e74cff6c9639f3903ccde662e79359d0724285e
|
/2019_late/20190920/swea_5188_최소합.py
|
73b522689d9b934a03bfafa00236176b182caa0d
|
[] |
no_license
|
dodonmountain/algorithm
|
e29988071f651e51ba65e3926302f94a3d4074a5
|
ce33e0d74220839aed4b17a47fa0069458a4324e
|
refs/heads/master
| 2022-11-05T05:14:01.527015
| 2022-11-01T04:29:37
| 2022-11-01T04:29:37
| 200,008,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
import sys
sys.stdin = open('5188.txt')
dx, dy = (1,0), (0,1)
def dfs(h,c):
global tmp
if h == (N-1,N-1):
if c < tmp:
tmp = c
return
for i in range(2):
nx, ny = h[0], h[1]
if 0 <= h[0] + dx[i] < N:
nx = h[0] + dx[i]
if 0 <= h[1] + dy[i] < N:
ny = h[1] + dy[i]
if not visit[nx][ny]:
if c+board[nx][ny] <= tmp:
visit[nx][ny] = 1
dfs((nx,ny),c+board[nx][ny])
visit[nx][ny] = 0
for t_case in range(int(input())):
N = int(input())
board,pp,ll = [],0,0
for _ in range(N):
board.append(list(map(int, input().split())))
for i in range(1, N):
pp += board[i][N-1];ll += board[i][0]
if sum(board[0])+ pp > sum(board[N-1]) + ll:
tmp = sum(board[N-1]) + ll
tmp = sum(board[0]) + pp
visit = [[0] * N for _ in range(N)]
dfs((0,0),board[0][0])
print('#{} {}'.format(t_case+1, tmp))
|
[
"lkh151515@gmail.com"
] |
lkh151515@gmail.com
|
12d348348906199a44c75cc418f75704de8a63e2
|
0db97db08743783019efe022190f409d22ff95bd
|
/aliyun/api/rest/Ram20140214AddUserRequest.py
|
e2541abe6859593f3da162236c1ccb39f69cba4c
|
[
"Apache-2.0"
] |
permissive
|
snowyxx/aliyun-python-demo
|
8052e2a165f1b869affe632dda484d6ca203bd9b
|
ed40887ddff440b85b77f9b2a1fcda11cca55c8b
|
refs/heads/master
| 2021-01-10T03:37:31.657793
| 2016-01-21T02:03:14
| 2016-01-21T02:03:14
| 49,921,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
'''
Created by auto_sdk on 2015.01.27
'''
from aliyun.api.base import RestApi
class Ram20140214AddUserRequest(RestApi):
def __init__(self,domain='ram.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.AccountSpace = None
self.Comments = None
self.UserName = None
def getapiname(self):
return 'ram.aliyuncs.com.AddUser.2014-02-14'
|
[
"snowyxx@126.com"
] |
snowyxx@126.com
|
4b27a3e0aa124b223b6e2bb7cf56736f2f5905d6
|
e6a8793b1b12d47e57f00485350d122946618245
|
/home/migrations/0005_remove_page_is_footer_1.py
|
02c0d3ec0eed67be17f85c803e78a91e63b41459
|
[] |
no_license
|
Fabricourt/school
|
70b2eba2c0b8ff9b9290eb0f68d730698a6d3a63
|
dad80c36be34b432dfadef195eb9e867f82cafff
|
refs/heads/main
| 2023-01-01T15:48:43.760288
| 2020-10-26T11:15:32
| 2020-10-26T11:15:32
| 305,829,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
# Generated by Django 3.1.2 on 2020-10-17 13:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0004_auto_20201017_1552'),
]
operations = [
migrations.RemoveField(
model_name='page',
name='is_footer_1',
),
]
|
[
"mfalme2030@gmail.com"
] |
mfalme2030@gmail.com
|
097187bfe4e27f5d2ceaa882fa7a70cfd771c020
|
5ee5853eb335fcf575d4344366ef9b4bce03570d
|
/p847h/shorest_path_length.py
|
9777473ab2a7326a108ab1a574ad0307396ed666
|
[
"MIT"
] |
permissive
|
l33tdaima/l33tdaima
|
15463fb2f8d61286a4a3a7bacaaee2ab1f7c4f43
|
f35305c618b383a79d05074d891cf0f7acabd88f
|
refs/heads/main
| 2023-07-20T21:52:26.330301
| 2023-07-19T02:30:22
| 2023-07-19T02:30:22
| 99,509,451
| 1
| 0
|
MIT
| 2018-10-31T15:10:49
| 2017-08-06T19:44:29
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 939
|
py
|
from cmath import exp
from curses.ascii import SO
class Solution:
def shortestPathLength(self, graph: list[list[int]]) -> int:
memo, final, steps = set(), (1 << len(graph)) - 1, 0
queue = [(i, 1 << i) for i in range(len(graph))]
while True:
new = []
for node, state in queue:
if state == final:
return steps
for v in graph[node]:
if (v, state | 1 << v) not in memo:
new.append((v, state | 1 << v))
memo.add((v, state | 1 << v))
queue = new
steps += 1
# TESTS
for graph, expected in [
([[1, 2, 3], [0], [0], [0]], 4),
([[1], [0, 2, 4], [1, 3, 4], [2], [1, 2]], 4),
]:
sol = Solution()
actual = sol.shortestPathLength(graph)
print("Shortest path to visit all nodes in", graph, "->", actual)
assert actual == expected
|
[
"l33tdaima@github.com"
] |
l33tdaima@github.com
|
62f9354ab6fcf808aef2ae3afc2deb20f08226ab
|
89d5dbd7672d9e03e7ca5277a9fd942b2444db86
|
/Advance python/08_global_local_variable.py
|
c69f04343d0079f2abeadbbb65295b89401249d5
|
[] |
no_license
|
rameshparajuli-github/python-programming
|
6ff218d4d1cdde73a578e4f2f3ba6241310d6408
|
5d2638a24e8b50614f2c15f04eb1f00dba0f6175
|
refs/heads/master
| 2023-06-17T18:40:47.797595
| 2021-07-16T14:02:52
| 2021-07-16T14:02:52
| 386,656,247
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
a=54 # Global Variable
def func1():
global a # This line change Global variable
print(f"This statement:{a}")
a=9 # Local Variable # yadi teo mathi ko global a use nagareko vaya yo line ma 54 naii print hunthyo
print(f"This statement:{a}")
func1()
print(f"This statement:{a}")
|
[
"susmitscore619@gmail.com"
] |
susmitscore619@gmail.com
|
3a40a65282330a85aeb70fd636c76af7cc562d0b
|
a7a29bc1643e14ae74f95d0b6695de32b6d6cfb5
|
/0x0B-python-input_output/1-number_of_lines.py
|
148a4fadbe23197c0272795e3bbb6c927d9f17e7
|
[] |
no_license
|
ToqYang/holbertonschool-higher_level_programming
|
95752a663307534e16d57a73cc4a8b0170f86614
|
862f88652619711eb0d1c7f821467b15d3f9b7cf
|
refs/heads/master
| 2020-07-23T01:41:19.574505
| 2020-02-14T04:46:35
| 2020-02-14T04:46:35
| 207,403,438
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
#!/usr/bin/python3
""" Read a file """
def number_of_lines(filename=""):
""" Received the filename for it can does a syscall for read the file
Return: Counter of the lines"""
count = 0
with open(filename, mode="r", encoding="utf-8") as fil:
for line in fil:
count += 1
return count
|
[
"santitoya-2001@outlook.com"
] |
santitoya-2001@outlook.com
|
caf49e720e7a11db13c00411024b3f18209fde61
|
9c20b0f0ad729b77e970dedaf4a138c99b4364bc
|
/Lib/site-packages/phonenumbers/shortdata/region_PW.py
|
2b53f97da4c5bba6ee25876c99a74d7e9f7544e6
|
[] |
no_license
|
GlovesMaker/Sklepinternetowy
|
4459f8651d2280e4840cfb293de28f9413df68af
|
d05372e96f7238c9459caf4f7a890a5a6f2bb2c3
|
refs/heads/master
| 2022-12-22T02:43:33.628016
| 2018-09-11T18:20:37
| 2018-09-11T18:20:37
| 167,855,928
| 0
| 1
| null | 2022-12-08T05:55:04
| 2019-01-27T20:36:42
|
Python
|
UTF-8
|
Python
| false
| false
| 548
|
py
|
"""Auto-generated file, do not edit by hand. PW metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_PW = PhoneMetadata(id='PW', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='9\\d\\d', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='911', example_number='911', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='911', example_number='911', possible_length=(3,)),
short_data=True)
|
[
"buchar123@gmail.com"
] |
buchar123@gmail.com
|
9a4ba82eb6c3c0b5c1df859b72e504c966f1763a
|
74482894c61156c13902044b4d39917df8ed9551
|
/test/test_tokens_forwarding_fail_data_item.py
|
ce8cc93181cac7dc8210c609b3d54fa4af76f3a7
|
[
"MIT"
] |
permissive
|
xan187/Crypto_APIs_2.0_SDK_Python
|
bb8898556ba014cc7a4dd31b10e24bec23b74a19
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
refs/heads/main
| 2023-06-22T15:45:08.273635
| 2021-07-21T03:41:05
| 2021-07-21T03:41:05
| 387,982,780
| 1
| 0
|
NOASSERTION
| 2021-07-21T03:35:29
| 2021-07-21T03:35:29
| null |
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.tokens_forwarding_fail_data_item import TokensForwardingFailDataItem
class TestTokensForwardingFailDataItem(unittest.TestCase):
"""TokensForwardingFailDataItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTokensForwardingFailDataItem(self):
"""Test TokensForwardingFailDataItem"""
# FIXME: construct object with mandatory attributes with example values
# model = TokensForwardingFailDataItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"kristiyan.ivanov@menasoftware.com"
] |
kristiyan.ivanov@menasoftware.com
|
ad96a6a7aee94d1afcd1bceb24d8af138afdff98
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/fuchsia-gn-sdk/src/gn_run_binary.py
|
b39f49b5c30357b323b41f6911d8d6f0b7c05077
|
[
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
#!/usr/bin/env python3.8
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script for GN to run an arbitrary binary.
Run with:
python3.8 gn_run_binary.py <binary_name> [args ...]
"""
import os
import subprocess
import sys
# This script is designed to run binaries produced by the current build. We
# may prefix it with "./" to avoid picking up system versions that might
# also be on the path.
path = sys.argv[1]
if not os.path.isabs(path):
path = './' + path
# The rest of the arguments are passed directly to the executable.
args = [path] + sys.argv[2:]
sys.exit(subprocess.call(args))
|
[
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] |
chromium-scoped@luci-project-accounts.iam.gserviceaccount.com
|
9bd8e8da9f5987759171e2b03d02241ddba91fc8
|
a96bbc3da8557e68cb01db671b930fec9f46c0c2
|
/blog/migrations/0005_comment.py
|
2c007c09ada8afc58aec82f33ab624bfc2cadb43
|
[] |
no_license
|
winterash2/django_first_blog
|
dd3de9f01f8e2b2df21fba9cd3d636c6fbd94fd1
|
06f059570ae5851db97b5c7f9b9d043da033c023
|
refs/heads/master
| 2022-11-26T18:44:47.316248
| 2020-08-10T09:10:16
| 2020-08-10T09:10:16
| 286,345,428
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 918
|
py
|
# Generated by Django 3.0.9 on 2020-08-07 02:01
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20200806_1540'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
),
]
|
[
"winterash2@naver.com"
] |
winterash2@naver.com
|
760764837e284ea13127fd1fbbc619a6dbbba28f
|
c071eb46184635818e8349ce9c2a78d6c6e460fc
|
/system/python_stubs/-745935208/PySide2/QtGui/QTextBlockUserData.py
|
a44da54e48e31d0038371f89e05a4a2201acfe19
|
[] |
no_license
|
sidbmw/PyCharm-Settings
|
a71bc594c83829a1522e215155686381b8ac5c6e
|
083f9fe945ee5358346e5d86b17130d521d1b954
|
refs/heads/master
| 2020-04-05T14:24:03.216082
| 2018-12-28T02:29:29
| 2018-12-28T02:29:29
| 156,927,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
# encoding: utf-8
# module PySide2.QtGui
# from C:\Users\siddh\AppData\Local\Programs\Python\Python37\lib\site-packages\PySide2\QtGui.pyd
# by generator 1.146
# no doc
# imports
import PySide2.QtCore as __PySide2_QtCore
import Shiboken as __Shiboken
class QTextBlockUserData(__Shiboken.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
|
[
"siddharthnatamai@gmail.com"
] |
siddharthnatamai@gmail.com
|
3772b803daea556dbaca21372e0a9c473332e531
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/sCH5gcyoRqq3Gfzyi_12.py
|
581973007ad8ee1c31dd8d0af12ef826a10ecb9d
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
"""
Create a function which validates whether a given number exists, and could
represent a real life quantity. Inputs will be given as a string.
### Examples
valid_str_number("3.2") ➞ True
valid_str_number("324") ➞ True
valid_str_number("54..4") ➞ False
valid_str_number("number") ➞ False
### Notes
Accept numbers such as `.5` and `0003`.
"""
import re
def valid_str_number(s):
return bool(re.fullmatch('\d*\.?\d*', s))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
fb33dce1897d5dc122db0542d61b43b2317c257a
|
015106a1a964305ef8ceb478cc56fd7d4fbd86d5
|
/468.py
|
b55bc68b7bb8270e8e126714f8a586f504225d6b
|
[] |
no_license
|
zenmeder/leetcode
|
51a0fa4dc6a82aca4c67b5f4e0ee8916d26f976a
|
0fddcc61923d760faa5fc60311861cbe89a54ba9
|
refs/heads/master
| 2020-12-02T18:16:10.825121
| 2018-10-30T11:47:53
| 2018-10-30T11:47:53
| 96,505,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
#!/usr/local/bin/ python3
# -*- coding:utf-8 -*-
# __author__ = "zenmeder"
class Solution(object):
def validIPAddress(self, IP):
"""
:type IP: str
:rtype: str
"""
if self.isIPV4(IP):
return "IPv4"
elif self.isIPV6(IP):
return "IPv6"
else:
return "Neither"
def isIPV4(self, IP):
ip = IP.split('.')
if len(ip) != 4:
return False
for num in ip:
if '0' <= num <= '255' and (num[0] != '0' or num == '0') and len(num) <= 3:
continue
else:
return False
return True
def isIPV6(self, IP):
ip = IP.split(':')
if len(ip) != 8:
return False
for num in ip:
print(num)
if '0' <= num.upper() <= 'FFFF' and len(num) <= 4:
continue
else:
return False
return True
print(Solution().validIPAddress("192.0.0.1"))
|
[
"zenmeder@gmail.com"
] |
zenmeder@gmail.com
|
987357f3daea5355b973c676404a76288914d82e
|
e9bf5fb440305c7b17935438fd515ca2541babc4
|
/kinl.py
|
7b30a2b77605cfb7bc749dfe93d4fb9c1031dc87
|
[] |
no_license
|
jyothiesai/guvij
|
748518673509d4849803fc22b03cd6b2b0b3392f
|
fdcd29f5548c12095f259ff2f74024317787b110
|
refs/heads/master
| 2020-04-16T01:42:44.848569
| 2019-08-07T13:24:44
| 2019-08-07T13:24:44
| 165,183,822
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
#jyothi
n,k=raw_input().split(' ')
n,k=int(n),int(k)
l=[int(x) for x in raw_input().split(' ')]
if k in l:
print('yes')
else:
print('no')
|
[
"noreply@github.com"
] |
jyothiesai.noreply@github.com
|
6801ecc4ad1ea58b5cafd1fd372f067ab59d8863
|
6712885a4c2a056eee3b4488382b9afc2149f799
|
/New LeetCode Problems/remove_outermost_parens.py
|
669927e0a71b7d1f372ff3cddc03ce00f0133f34
|
[] |
no_license
|
matthewharrilal/CS-Questions-GRIND
|
cac1cb562e5dad79ee4e224895d034f9c71d9ed3
|
7196c5e8df495d43ee91f218d6253c8a88a7d59d
|
refs/heads/master
| 2020-04-16T01:30:53.289837
| 2019-07-24T04:34:21
| 2019-07-24T04:34:21
| 165,176,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
# https://leetcode.com/problems/remove-outermost-parentheses/
# def removeOuterParentheses(self, S):
# """
# :type S: str
# :rtype: str
# """
|
[
"matthewharrilal@gmail.com"
] |
matthewharrilal@gmail.com
|
d47fcca6b67ca58f9a12f1e5299051c3e3d948b8
|
c39e466c2b6fdffbc410f24669f214e13fb87781
|
/PYTHON/EJERCICIOS/TEMA 5/COMPIS/017_G8-Carla Guillén Pingarrón_638450_assignsubmission_file_/S5_G8.py
|
a05a77ba9258fb980599167fb5bccc7bb6b79d0f
|
[] |
no_license
|
enanibus/biopython
|
3a58efbcc92f1ce60285a115c620de9295b7d281
|
613d334a5c0502059930d9381a9464ef533cca1c
|
refs/heads/master
| 2021-01-12T17:27:39.516793
| 2017-01-02T18:30:09
| 2017-01-02T18:30:09
| 71,573,732
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,820
|
py
|
##Group 8: Álvaro Alfayate, Andrea de la Fuente, Carla Guillén y Jorge Nuevo.
def ReadFasta(FileName): ##Definimos una función que lea el archivo FASTA elegidoy extraiga la información requerida
MyFile=open(FileName,'r')
ReadSeq='' #Una variable vacia que va a almacenar el Fasta leído
for Line in MyFile: ##Unimos todas las líneas del fasta.
if '>' in Line: ##Si es la primera línea definimos esta condición
#No hacemos un strip para poder separar la primera línea de la secuencia por un \n
ReadSeq=ReadSeq+Line #Añadimos la primera línea con el \n
else:
Line=Line.strip().upper() #Con la secuencia si hacemos strip, para unir toda la secuencia junta.
ReadSeq=ReadSeq+Line
MySeq_RE=r'([NX]M_\d+\.\d).+\n([AGCT]+)' #Definimos la expresión regular que nos extrae por un lado el accession number y por otro la secuencia.
MySeq_Comp=re.compile(MySeq_RE)
SeqInfo=MySeq_Comp.search(ReadSeq).groups() #Buscamos nuestra expresión regular en la secuencia leída y sacamos los grupos.
return (SeqInfo) ##SeqInfo es una lista donde el primer elemento es el accesion number y el segundo la secuencia de DNA
MyFile.close()
def CreateDictionary(DicFile): ##Definimos una función que crea diccionarios a partir del archivo que le pasemos.
MyFile=open(DicFile,'r')
MyDic_RE=r'([ATGC]{3})\t([^BJUXZ])\t([A-Z][a-z]{2})' ##Definimos una expresión variable que saca por un lado el codon, por otro los aminoácidos (en ambos códigos)
MyDic_Comp=re.compile(MyDic_RE)
Data2=''
GENCODE={}
for Line in MyFile: ##Recorremos todas las líneas del archivo y las unimos en Data 2
Data2=Data2+Line.strip()
MyRes2=MyDic_Comp.findall(Data2) ##Busca en Data2 todos los elementos que cumplen la secuencia consenso y los almacena en MyRes2 como una lista de listas (2D)
x=0
for n in range(0,len(MyRes2)):##Durante la longitud de la lista MyRes2 va a ejecutar este bloque de código.
GENCODE[MyRes2[x][0]]=MyRes2[x][1:] #Forma un diccionario recorriendo todas las líneas del archivo (que corresponden a la primera dimensión de la lista)
x+=1 #Avanzamos una posición en la primera dimensión --> A la siguiente línea del archivo de código genético
return (GENCODE)
MyFile.close()
def ComplementaryGenerator(SeqName): #Creamos una función que nos devuelve la hebra complementaria de la secuencia de la primera función
SeqReverse=SeqName[::-1] ##Se invierte la secuencia, de forma que se va a leer la secuencia + en dirección 3'-5'
SeqComplementary='' ##Se genera la variable donde se almacenará la secuencia complementaria
GenCode={'A':'T','C':'G','G':'C','T':'A'} ##Diccionario con los nucleótidos complementarios
for Nucleotide in SeqReverse: ##Vamos itinerando por cada nucleótido de la secuencia
##Se van añadiendo los nucleótidos complementarios 1 a 1 en nuestra variable, generando la secuencia complementaria en dirección 5'-3'.
SeqComplementary=SeqComplementary+GenCode[Nucleotide]
return(SeqComplementary) ##Ahora SeqComplementary será la variable resultado de correr esta función.
def TranslateDNA(DNASeq,COMPSEQ,DicFile,ExportName):
MyFile=open(ExportName+'.txt','w')
Counter='+' #Declaramos Seq como +. Es un contador de en qué secuencia estamos
for Seq in (DNASeq,COMPSEQ):
if Counter=='+': ##Al empezar estamos en la secuencia +
print('\t\t\t\t\t\t\t\t\t\tPLUS STRAND\n')
MyFile.write('\t\t\t\t\t\t\t\t\t\tPLUS STRAND\n')
if Counter=='-': #Para que escriba Minus Strand en este caso
MyFile.write('\n\t\t\t\t\t\t\t\t\t\tMINUS STRAND\n\n')
print('\n\t\t\t\t\t\t\t\t\t\tMINUS STRAND\n\n')
for CodingFrame in range(0,3): #Bucle para leer en las tres pautas de lectura
ProtSeq=''
MyFile.write('\n\t\t\t\t\t\t\t\t\t\t Frame '+str(CodingFrame+1)+'\n\n')#Escribe el Frame en el que está (Sumando +1 pues el rango empieza en 0)
print('\n\t\t\t\t\t\t\t\t\t\t Frame '+str(CodingFrame+1)+'\n\n')
while True:
if CodingFrame>(((len(Seq)/3)-1)*3): ##Esta condición permite correr el código hasta que se alcanza el final de la secuencia.
break
SubSeq=Seq[CodingFrame]+Seq[CodingFrame+1]+Seq[CodingFrame+2] ##Formamos el codón y lo asignamos a SubSeq.
ProtSeq=ProtSeq+DicFile[SubSeq][0] ##Traducimos el codón actual a código de una letra y lo añadimos a la secuencia traducida que ya estuviera.
CodingFrame+=3 #Movemos 3 nucleótidos para leer el siguiente codón
print(ProtSeq)
MyFile.write(ProtSeq+'\n') #Escribimos la secuencia
Counter='-' #Cuando terminamos el bloque con SeqName, para la empezar con la reversa Seq será -
MyFile.close()
def Body():
DNAList=ReadFasta(sys.argv[1]) #Lista que contiene el DNA y el Accession number
GenCode=CreateDictionary('GeneticCode_standard.csv')
CompSeq=ComplementaryGenerator(DNAList[1]) #CompSeq contiene ahora la secuencia complementaria correspondente de llamar la función ComplementaryGenerator
Protein=TranslateDNA(DNAList[1],CompSeq,GenCode,DNAList[0]) ##DNAList[1] contiene la secuencia de DNA extraida y DNAList[0] el Accession Number
if __name__=='__main__':
import sys
import re
if len(sys.argv)<2:
print('Please, introduce as an argument the file you want to translate.') #Si no nos introduce el argumento con la secuencia, se lo pide.
if not('.fasta') in sys.argv[1]: #Si introducimos como argumento un archivo que no es fasta te indica que introduzcas un fasta
print('You have to introduce a fasta sequence')
else:
Body()
|
[
"juanenriqueztraba@gmail.com"
] |
juanenriqueztraba@gmail.com
|
3e3fb85fab836eb760653086cf3ecdb7a8ab1a66
|
b75c3da63a9f6c6fbc37c6ccfa12578d93935624
|
/leetcode/241. Different Ways to Add Parentheses/Python3/diff_ways.py
|
e3937693534511eb81a14b1075bdf327c461071e
|
[] |
no_license
|
bryand1/solutions
|
ce5b09b14b73bd6da214eac159af7d4439cdd6dd
|
319741d720e1f0bb1b94629df410de392cbc755c
|
refs/heads/master
| 2021-07-09T08:39:45.301754
| 2019-03-01T16:31:36
| 2019-03-01T16:31:36
| 144,401,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
from operator import add, sub, mul
op = {'+': add, '-': sub, '*': mul}
class Solution:
def diffWaysToCompute(self, expr):
"""
:type input: str
:rtype: List[int]
"""
arr = self.parse(expr)
return self.diffWaysToComputeRec(arr, 0, len(arr))
def diffWaysToComputeRec(self, arr, lo, hi):
if hi - lo == 1:
return [arr[lo]]
res = []
for i in range(lo + 1, hi, 2):
op = arr[i]
lhs = self.diffWaysToComputeRec(arr, lo, i)
rhs = self.diffWaysToComputeRec(arr, i + 1, hi)
for a in lhs:
for b in rhs:
res.append(op(a, b))
return res
def parse(self, expr):
res = []
n = 0
for char in expr:
if char.isdigit():
n = 10 * n + int(char)
else:
res.append(n)
n = 0
res.append(op[char])
res.append(n)
return res
|
[
"bryand1@gmail.com"
] |
bryand1@gmail.com
|
c52e35b3ac6d4beb8a90ab36aef6698e7deb2c12
|
4c3e992678341ccaa1d4d14e97dac2e0682026d1
|
/addons/purchase/models/stock_config_settings.py
|
f07bb2fa1319b3603ec86c3a653f03c531b9923b
|
[] |
no_license
|
gahan-corporation/wyatt
|
3a6add8f8f815bd26643e1e7c81aea024945130d
|
77e56da362bec56f13bf0abc9f8cf13e98461111
|
refs/heads/master
| 2021-09-03T18:56:15.726392
| 2018-01-08T02:54:47
| 2018-01-08T02:54:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from gerp import api, fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
po_lead = fields.Float(related='company_id.po_lead')
use_po_lead = fields.Boolean(
string="Security Lead Time for Purchase",
oldname='default_new_po_lead',
help="Margin of error for vendor lead times. When the system generates Purchase Orders for reordering products,they will be scheduled that many days earlier to cope with unexpected vendor delays.")
@api.onchange('use_po_lead')
def _onchange_use_po_lead(self):
if not self.use_po_lead:
self.po_lead = 0.0
def get_values(self):
res = super(ResConfigSettings, self).get_values()
res.update(
use_po_lead=self.env['ir.config_parameter'].sudo().get_param('purchase.use_po_lead')
)
return res
def set_values(self):
super(ResConfigSettings, self).set_values()
self.env['ir.config_parameter'].sudo().set_param('purchase.use_po_lead', self.use_po_lead)
|
[
"duchess@gahan-corporation.com"
] |
duchess@gahan-corporation.com
|
a710d37abf1d105c4e455d43f64b6efdd6cb4977
|
3a9f2b3d79cf214704829427ee280f4b49dca70a
|
/saigon/rat/RuckusAutoTest/scripts/zd/ats_ZD_Combo_SNMP_V2_Continue_Walking.py
|
566d4e7bb980151a16a79e016fed8da87f2030ed
|
[] |
no_license
|
jichunwei/MyGitHub-1
|
ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791
|
f826fc89a030c6c4e08052d2d43af0b1b4b410e3
|
refs/heads/master
| 2021-01-21T10:19:22.900905
| 2016-08-20T03:34:52
| 2016-08-20T03:34:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,449
|
py
|
'''
Continue walking via SNMP V2
Continue walking nodes many times, can get information correctly.
1. Continue walking system objects 50 times via SNMP V2, compare the information from SNMP and CLI.
expect result: All steps should result properly.
How to:
1) Get system information via ZD CLI
2) Continue walking system object 50 times, parsing the system result.
3) Compare the result for each walking are same
4) Compare the result from SNMP and CLI are same
Created on 2011-4-14
@author: cherry.cheng@ruckuswireless.com
'''
import sys
import libZD_TestSuite as testsuite
from RuckusAutoTest.common import lib_KwList as kwlist
def define_test_cfg(tcfg):
test_cfgs = []
test_name = 'CB_Scaling_ZD_CLI_Process_Check'
common_name = 'apmgr and stamgr daemon pid mark'
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 0, False))
test_name = 'CB_ZD_CLI_Set_SNMP_Agent'
common_name = 'Disable SNMP Agent Version 2'
test_cfgs.append(({'snmp_agent_cfg': {'version': 2, 'enabled': False}}, test_name, common_name, 0, False))
test_name = 'CB_ZD_CLI_Set_SNMP_Agent'
common_name = 'Disable SNMP Agent Version 3'
test_cfgs.append(({'snmp_agent_cfg': {'version': 3, 'enabled': False}}, test_name, common_name, 0, False))
test_name = 'CB_ZD_CLI_Set_SNMP_Agent'
common_name = 'Enable SNMP Agent Version 2'
test_cfgs.append(({'snmp_agent_cfg':tcfg['set_snmp_agent_cfg']}, test_name, common_name, 0, False))
test_case_name = '[Continue Walking]'
test_name = 'CB_ZD_CLI_Get_Sys_Basic_Info'
common_name = '%sGet System Info via ZD CLI' % (test_case_name,)
test_cfgs.append(( {}, test_name, common_name, 1, False))
test_name = 'CB_ZD_SNMP_Contine_Walking_Sys_Basic_Info'
common_name = '%sVerify continue walking to get system basic info' % (test_case_name,)
test_cfgs.append(( {'snmp_agent_cfg': tcfg['set_snmp_agent_cfg'],
'snmp_cfg': tcfg['snmp_cfg'],
'times': tcfg['times']},
test_name, common_name, 2, False))
test_name = 'CB_ZD_SNMP_Verify_Sys_Basic_Info_SNMPGet_CLIGet'
common_name = '%sVerify System Info between SNMP Get and CLI Get' % (test_case_name,)
test_cfgs.append(( {}, test_name, common_name, 2, False))
test_name = 'CB_Scaling_ZD_CLI_Process_Check'
common_name = 'apmgr and stamgr daemon pid checking.'
param_cfg = dict()
test_cfgs.append((param_cfg, test_name, common_name, 0, False))
return test_cfgs
def define_test_parameters(tbcfg):
set_snmp_agent_cfg = {'version': 2,
'enabled': True,
'ro_community': 'public',
'rw_community': 'private',
'contact': 'support@ruckuswireless.com',
'location': 'shenzhen',}
snmp_cfg = {#'ip_addr': tbcfg['ZD']['ip_addr'],
'timeout': 20,
'retries': 3,}
tcfg = {'snmp_cfg': snmp_cfg,
'set_snmp_agent_cfg': set_snmp_agent_cfg,
'times': 50, }
return tcfg
def create_test_suite(**kwargs):
tb = testsuite.getTestbed2(**kwargs)
tbcfg = testsuite.getTestbedConfig(tb)
if str(tb.tbtype) == "ZD_Stations_IPV6":
zd_ip_version = tbcfg['ip_cfg']['zd_ip_cfg']['ip_version']
ap_ip_version = tbcfg['ip_cfg']['ap_ip_cfg']['ip_version']
ts_name = 'ZD SNMP V2 ZD %s AP %s - Continue Walking' % (zd_ip_version, ap_ip_version)
else:
ts_name = 'ZD SNMP V2 - Continue Walking'
tcfg = define_test_parameters(tbcfg)
ts = testsuite.get_testsuite(ts_name, 'Verify Continue Walking', combotest=True)
test_cfgs = define_test_cfg(tcfg)
test_order = 1
test_added = 0
for test_params, testname, common_name, exc_level, is_cleanup in test_cfgs:
if testsuite.addTestCase(ts, testname, common_name, test_params, test_order, exc_level, is_cleanup) > 0:
test_added += 1
test_order += 1
print "Add test case with test name: %s\n\t\common name: %s" % (testname, common_name)
print "\n-- Summary: added %d test cases into test suite '%s'" % (test_added, ts.name)
if __name__ == "__main__":
_dict = kwlist.as_dict(sys.argv[1:])
create_test_suite(**_dict)
|
[
"tan@xx.com"
] |
tan@xx.com
|
6d377d17ec1cfb614d0530853ef4216679d840c8
|
c8781d3dc17202fcc1b5358475071c0a834c7f82
|
/ShowAndSearch/__init__.py
|
58b6fee75af3be485a88240f3e64ee1dfc0cc118
|
[
"Apache-2.0"
] |
permissive
|
guchengxi1994/show-and-search
|
7b73d4a7a0250a0f70cf07b0de7695d6c8051545
|
e955a6677f3cd23b1f7ed247e828a5852ec6ab20
|
refs/heads/master
| 2022-12-22T06:28:36.601500
| 2020-09-22T05:17:14
| 2020-09-22T05:17:14
| 295,630,132
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
'''
lanhuage: python
Descripttion:
version: beta
Author: xiaoshuyui
Date: 2020-09-15 13:53:11
LastEditors: xiaoshuyui
LastEditTime: 2020-09-22 11:20:14
'''
__version__ = '0.0.0'
__appname__ = 'show and search'
|
[
"528490652@qq.com"
] |
528490652@qq.com
|
a83539222531d53944838325e21dc6f020e934e1
|
4edc95b0e9f739c5faa29704d8d0fe31d6074114
|
/0x0F-python-object_relational_mapping/7-model_state_fetch_all.py
|
52c794399d96e997e84932ac6d2b888beb5f6f22
|
[] |
no_license
|
Caroll1889/holbertonschool-higher_level_programming
|
47a78074af5ec93f2e4bcf0cfb0579fb0f12c441
|
f3c222c101e05bf5876951fc7a2566f3ce0ff7e6
|
refs/heads/master
| 2020-07-22T22:51:35.948398
| 2020-02-14T16:35:29
| 2020-02-14T16:35:29
| 207,356,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
#!/usr/bin/python3
""" """
from sys import argv
from model_state import Base, State
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import sqlalchemy
if __name__ == "__main__":
en = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(argv[1],
argv[2],
argv[3]))
Base.metadata.create_all(en)
Session = sessionmaker(bind=en)
session = Session()
for states in session.query(State).order_by(State.id):
print("{}: {}".format(states.id, states.name))
session.close()
|
[
"diahancaroll@hotmail.com"
] |
diahancaroll@hotmail.com
|
bbaaec7298b66a183a83e952cd94bf42b7c78062
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/jedi/api/project.py
|
eed8f3f9954ee61c107899d3452f6b026bed0a75
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 6,235
|
py
|
import os
import json
from jedi._compatibility import FileNotFoundError, NotADirectoryError
from jedi.api.environment import SameEnvironment, \
get_cached_default_environment
from jedi.api.exceptions import WrongVersion
from jedi._compatibility import force_unicode
from jedi.evaluate.sys_path import discover_buildout_paths
from jedi.evaluate.cache import evaluator_as_method_param_cache
from jedi.common.utils import traverse_parents
_CONFIG_FOLDER = '.jedi'
_CONTAINS_POTENTIAL_PROJECT = 'setup.py', '.git', '.hg', 'requirements.txt', 'MANIFEST.in'
_SERIALIZER_VERSION = 1
def _remove_duplicates_from_path(path):
used = set()
for p in path:
if p in used:
continue
used.add(p)
yield p
def _force_unicode_list(lst):
return list(map(force_unicode, lst))
class Project(object):
# TODO serialize environment
_serializer_ignore_attributes = ('_environment',)
_environment = None
@staticmethod
def _get_json_path(base_path):
return os.path.join(base_path, _CONFIG_FOLDER, 'project.json')
@classmethod
def load(cls, path):
"""
:param path: The path of the directory you want to use as a project.
"""
with open(cls._get_json_path(path)) as f:
version, data = json.load(f)
if version == 1:
self = cls.__new__()
self.__dict__.update(data)
return self
else:
raise WrongVersion(
"The Jedi version of this project seems newer than what we can handle."
)
def __init__(self, path, **kwargs):
"""
:param path: The base path for this project.
:param sys_path: list of str. You can override the sys path if you
want. By default the ``sys.path.`` is generated from the
environment (virtualenvs, etc).
:param smart_sys_path: If this is enabled (default), adds paths from
local directories. Otherwise you will have to rely on your packages
being properly configured on the ``sys.path``.
"""
def py2_comp(path, environment=None, sys_path=None,
smart_sys_path=True, _django=False):
self._path = path
if isinstance(environment, SameEnvironment):
self._environment = environment
self._sys_path = sys_path
self._smart_sys_path = smart_sys_path
self._django = _django
py2_comp(path, **kwargs)
def _get_base_sys_path(self, environment=None):
if self._sys_path is not None:
return self._sys_path
# The sys path has not been set explicitly.
if environment is None:
environment = self.get_environment()
sys_path = list(environment.get_sys_path())
try:
sys_path.remove('')
except ValueError:
pass
return sys_path
@evaluator_as_method_param_cache()
def _get_sys_path(self, evaluator, environment=None):
"""
Keep this method private for all users of jedi. However internally this
one is used like a public method.
"""
suffixed = []
prefixed = []
sys_path = list(self._get_base_sys_path(environment))
if self._smart_sys_path:
prefixed.append(self._path)
if evaluator.script_path is not None:
suffixed += discover_buildout_paths(evaluator, evaluator.script_path)
traversed = list(traverse_parents(evaluator.script_path))
# AFAIK some libraries have imports like `foo.foo.bar`, which
# leads to the conclusion to by default prefer longer paths
# rather than shorter ones by default.
suffixed += reversed(traversed)
if self._django:
prefixed.append(self._path)
path = prefixed + sys_path + suffixed
return list(_force_unicode_list(_remove_duplicates_from_path(path)))
def save(self):
data = dict(self.__dict__)
for attribute in self._serializer_ignore_attributes:
data.pop(attribute, None)
with open(self._get_json_path(self._path), 'wb') as f:
return json.dump((_SERIALIZER_VERSION, data), f)
def get_environment(self):
if self._environment is None:
return get_cached_default_environment()
return self._environment
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._path)
def _is_potential_project(path):
for name in _CONTAINS_POTENTIAL_PROJECT:
if os.path.exists(os.path.join(path, name)):
return True
return False
def _is_django_path(directory):
""" Detects the path of the very well known Django library (if used) """
try:
with open(os.path.join(directory, 'manage.py'), 'rb') as f:
return b"DJANGO_SETTINGS_MODULE" in f.read()
except (FileNotFoundError, NotADirectoryError):
return False
return False
def get_default_project(path=None):
if path is None:
path = os.getcwd()
check = os.path.realpath(path)
probable_path = None
first_no_init_file = None
for dir in traverse_parents(check, include_current=True):
try:
return Project.load(dir)
except (FileNotFoundError, NotADirectoryError):
pass
if first_no_init_file is None:
if os.path.exists(os.path.join(dir, '__init__.py')):
# In the case that a __init__.py exists, it's in 99% just a
# Python package and the project sits at least one level above.
continue
else:
first_no_init_file = dir
if _is_django_path(dir):
return Project(dir, _django=True)
if probable_path is None and _is_potential_project(dir):
probable_path = dir
if probable_path is not None:
# TODO search for setup.py etc
return Project(probable_path)
if first_no_init_file is not None:
return Project(first_no_init_file)
curdir = path if os.path.isdir(path) else os.path.dirname(path)
return Project(curdir)
|
[
"nicolas.holzschuch@inria.fr"
] |
nicolas.holzschuch@inria.fr
|
2f6935ac11ea708cd225be988e352fdc29e40119
|
b5313b8442b26e4a54172e55eb84d501ee4cae12
|
/run.py
|
783d9038c8e358e77f451405fc4d41584bbd6c56
|
[
"MIT"
] |
permissive
|
summeraz/ljmc
|
bca23b37c29847ebb9a763146e9f67fb32843912
|
207326f61d6be4063d06dfc2df6fb1f61dd57e27
|
refs/heads/master
| 2020-03-20T06:10:31.396309
| 2018-06-13T22:49:39
| 2018-06-13T22:49:39
| 137,241,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
from __future__ import division
import pyximport; pyximport.install()
from forcefield import *
from mc import *
from system import *
from utils import *
### Define Lennard-Jones Parameters ###
sigma = 1.0
epsilon = 1.0
cutoff = 2.5
### Define System Parameters ###
n_particles = 125
number_density = 0.5
### Define Monte Carlo Parameters ###
temperature = 1.2 # Temperature of the simulation
dx = 0.1 # Initial maximum displacement
target = 0.5 # Target acceptance probabality
n_relax = 2500 # Number of timesteps to relax from initial configuration
n_mc = 25000 # Total number of MC steps
#############################################################################
#######
# RUN #
#######
# Create the force field
forcefield = ForceField(sigma=sigma, epsilon=epsilon, cutoff=cutoff)
# Create the system
system = System(n_particles, number_density, forcefield)
# Initialize the neighborlist
system.build_nlist(skin=0.5)
# Create Monte Carlo instance
mc = MonteCarlo(system=system, dx=dx, temperature=temperature, target=target)
# Relax the system and optimize `dx`
mc.relax(n_relax, adjust_freq=50)
# Monte Carlo production run
mc.run(traj_filename='traj.xyz', steps=n_mc, freq=100)
|
[
"andrew.z.summers@gmail.com"
] |
andrew.z.summers@gmail.com
|
95e1a0cfc755b266419200defd030c1fe6f9f3bb
|
0566cf76b456518875edecece15e763a36a4795f
|
/scrapers/megafilmeshd21_net.py
|
79e1c527816b062bae23ffbd86c1a90255b460d7
|
[] |
no_license
|
theclonedude/Scraping_BeautifulSoup_phantomjs
|
684b1f7a993e0d2555daa7a5455cf19bd29b0b1b
|
faf653feae46c21a72d13b2123cdebdb2f7c05d8
|
refs/heads/master
| 2023-03-16T19:36:14.867361
| 2018-06-14T14:21:02
| 2018-06-14T14:21:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,897
|
py
|
# coding=utf-8
from sandcrawler.scraper import ScraperBase, SimpleScraperBase
class Megafilmeshd21Net(SimpleScraperBase):
BASE_URL = 'http://megafilmeshd21.net'
OTHER_URLS = ['http://maxfilmesonline.net']
SCRAPER_TYPES = [ ScraperBase.SCRAPER_TYPE_OSP, ]
LANGUAGE = 'por'
MEDIA_TYPES = [ ScraperBase.MEDIA_TYPE_FILM, ScraperBase.MEDIA_TYPE_TV, ]
URL_TYPES = [ScraperBase.URL_TYPE_SEARCH, ScraperBase.URL_TYPE_LISTING, ]
def _fetch_search_url(self, search_term, media_type):
return '{base_url}?&s={search_term}'.format(base_url=self.BASE_URL, search_term=search_term)
def _fetch_no_results_text(self):
return None
def _fetch_next_button(self, soup):
next_button = soup.select_one('a.nextpostslink')
if next_button:
return next_button.href
return None
def _parse_search_result_page(self, soup):
found=0
for result in soup.select('a.thumb'):
self.submit_search_result(
link_url=result.href,
link_title=result.text,
image=self.util.find_image_src_or_none(result, 'img'),
)
found=1
if not found:
return self.submit_search_no_results()
def _parse_parse_page(self, soup):
index_page_title = self.util.get_page_title(soup)
series_season = series_episode = None
title = soup.select_one('h1')
if title and title.text:
series_season, series_episode = self.util.extract_season_episode(title.text)
for link in soup.select('nav.lista-players a.btn-player'):
self.submit_parse_result(
index_page_title=index_page_title,
link_url=link['data-href'],
link_title=link.text,
series_season=series_season,
series_episode=series_episode,
)
|
[
"stryokka@gmail.com"
] |
stryokka@gmail.com
|
c24872cd28d1af12915e4db50558f458d2ddbe15
|
9af204535dfc39d5c9a2dc4e2daf538cb2454caf
|
/src/tasks/shapes/parameters.py
|
e9f86a6d13a821edc1beebb17b97b1a42a49e26f
|
[] |
no_license
|
kevinyu/reimagined-robot
|
c94f51e1b025dc3636a40b06cf8d914238596f9e
|
970e451a70b43d1cd7ac7f8f3700ea8e9eb88aa3
|
refs/heads/master
| 2021-01-20T15:57:41.095221
| 2017-05-24T01:40:25
| 2017-05-24T01:40:51
| 90,803,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,627
|
py
|
import os
import numpy as np
import theano
import theano.tensor as T
import config
from utils import float_x
from utils.complex import ComplexTuple
from shapes.properties import properties
from shapes.objects import shapes
if os.path.exists(os.path.join(config.SAVE_DIR, "S0.npy")):
S0_array = np.load(os.path.join(config.SAVE_DIR, "S0.npy"))
S0 = ComplexTuple(
theano.shared(float_x(S0_array[0])),
theano.shared(float_x(S0_array[1]))
)
else:
S0 = ComplexTuple(
theano.shared(float_x(np.zeros(config.DIM))),
theano.shared(float_x(np.zeros(config.DIM)))
)
# D_table is a dict mapping categories to dictionaries of hypervectors
D_table = {}
filename = os.path.join(config.SAVE_DIR, "D_Shapes.npy")
if os.path.exists(filename):
darray = np.load(filename)
D_table["Shapes"] = ComplexTuple(
theano.shared(float_x(darray[0])),
theano.shared(float_x(darray[1]))
)
else:
D_table["Shapes"] = ComplexTuple(
theano.shared(float_x(0.01 * np.random.uniform(-1, 1, size=(config.DIM, len(shapes))))),
theano.shared(float_x(0.01 * np.random.uniform(-1, 1, size=(config.DIM, len(shapes)))))
)
for prop in properties:
filename = os.path.join(config.SAVE_DIR, "D_{}.npy".format(prop.__name__))
if os.path.exists(filename):
darray = np.load(filename)
D_table[prop.__name__] = ComplexTuple(
theano.shared(float_x(darray[0])),
theano.shared(float_x(darray[1]))
)
else:
D_table[prop.__name__] = ComplexTuple(
theano.shared(float_x(0.01 * np.random.uniform(-1, 1, size=(config.DIM, len(prop.params))))),
theano.shared(float_x(0.01 * np.random.uniform(-1, 1, size=(config.DIM, len(prop.params)))))
)
# Generate all bound combinations of available objects with properties
_D_combined = D_table["Shapes"]
for i, prop in enumerate(properties):
# each iteration increases the dimensionality of D_combined by one
# the last dimension corresponds to the ith property
i += 1
_D_combined = (
_D_combined.dimshuffle([0] + range(1, i+1) + ["x"]) *
D_table[prop.__name__].dimshuffle(*[[0] + (["x"] * i) + [1]])
)
D = _D_combined.flatten(2)
# Concatenate a single vector representing background to D
bg_filename = os.path.join(config.SAVE_DIR, "D_bg.npy")
if os.path.exists(bg_filename):
darray = np.load(bg_filename)
bg_vector = ComplexTuple(
theano.shared(float_x(darray[0])),
theano.shared(float_x(darray[1]))
)
else:
bg_vector = ComplexTuple(
theano.shared(float_x(0.01 * np.random.uniform(-1, 1, size=(config.DIM, 1)))),
theano.shared(float_x(0.01 * np.random.uniform(-1, 1, size=(config.DIM, 1))))
)
D = ComplexTuple(
T.concatenate([D.real, bg_vector.real], axis=1),
T.concatenate([D.imag, bg_vector.imag], axis=1)
)
learn_params = [bg_vector.real, bg_vector.imag]
for D_prop in D_table.values():
learn_params += [D_prop.real, D_prop.imag]
learn_params = [S0.real, S0.imag] + learn_params
def save_params():
s0_filename = os.path.join(config.SAVE_DIR, "S0")
np.save(s0_filename, np.array(list(S0.get_value())))
D_filename = os.path.join(config.SAVE_DIR, "D_Shapes")
np.save(D_filename, np.array(list(D_table["Shapes"].get_value())))
for prop in properties:
D_filename = os.path.join(config.SAVE_DIR, "D_{}".format(prop.__name__))
np.save(D_filename, np.array(list(D_table[prop.__name__].get_value())))
np.save(os.path.join(config.SAVE_DIR, "D_bg"), np.array(list(bg_vector.get_value())))
|
[
"thekevinyu@gmail.com"
] |
thekevinyu@gmail.com
|
ca1fc2d69082f42093ba01898f4e9cbfde8dba16
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/models/ms_data/identity_constraint/id_l051_xsd/__init__.py
|
5f14f91fccf3bc1c1eb8c2330c525ca13b8ee553
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 135
|
py
|
from output.models.ms_data.identity_constraint.id_l051_xsd.id_l051 import (
Root,
Uid,
)
__all__ = [
"Root",
"Uid",
]
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
3f96a324be5d36b890aa2e0eb8f0d22f9106d7b3
|
03195206540b44d74f86801c5e58b2b731c863bf
|
/pi/mission_control/debile/pousse_feu.py
|
0c57edb5ec2e019d69a3560ce3287b3d729de533
|
[] |
no_license
|
clement91190/eurobot
|
7f242f15b966216ef81d4851c338493ccf056c26
|
e61c9b3a32c1ee1417d807be6c4f97032a7e55a6
|
refs/heads/master
| 2021-01-25T09:53:38.517607
| 2014-05-30T09:16:40
| 2014-05-30T09:16:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
from mission_control.mission import Mission, SuccessOut, FailOut
from utils.coord import Coord
from mae_generator.mae import MAE, InitState, debugger
def get_mission(com_state_factory):
#raise NotImplementedError(" mission non codee")
return Mission(" pousse_feu ", Coord(-800, 600, 180), MAEPousseFeu(com_state_factory))
class MAEPousseFeu(MAE):
def __init__(self, com_state_factory):
MAE.__init__(self)
self.sf = com_state_factory
#states
init = InitState()
#tape = self.sf.get_pmi_tape()
avance_triangle1 = self.sf.get_bf_fw(Coord(200))
out = SuccessOut()
out2 = FailOut()
#transitions
init.add_instant_transition(avance_triangle1)
avance_triangle1.add_afini_transition(out)
avance_triangle1.add_bloc_transition(out)
avance_triangle1.add_advd_transition(out)
self.state_list = [
init, avance_triangle1, out, out2
]
self.reinit_state()
if __name__ == "__main__":
from com_state_factory import ComStateFactory
from communication import PipoCommunication
com = PipoCommunication()
mae = MAEPousseFeu(ComStateFactory(com))
com.set_global_mae(mae)
#mae = MAEGlobal()
debugger(mae)
|
[
"clem.jambou@gmail.com"
] |
clem.jambou@gmail.com
|
c08a65cd4eaa895be6579ac952edbbb1cfd00cc9
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/kindergarten-garden/d338583c2ba84654b20ddcf61ff29827.py
|
c739d565e7f3e97d21da436f3a80d769200473d7
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 961
|
py
|
from collections import defaultdict, deque
from typing import Dict, Iterable, List
PLANTS = {
'R': 'Radishes',
'C': 'Clover',
'G': 'Grass',
'V': 'Violets',
}
STUDENTS = (
'Alice', 'Bob', 'Charlie', 'David',
'Eve', 'Fred', 'Ginny', 'Harriet',
'Ileana', 'Joseph', 'Kincaid', 'Larry'
)
class Garden:
def __init__(self, garden: str, students: Iterable[str]=STUDENTS, grouping: int=2) -> None:
self.students = defaultdict(List[str]) # type: Dict[str, List[str]]
if isinstance(students, list):
students.sort()
for group in garden.split():
group_queue = deque(group)
for student in students:
for x in range(grouping):
self.students[student].append(PLANTS[group_queue.popleft()])
if not group_queue:
break
def plants(self, student: str) -> List[str]:
return self.students.get(student)
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
c31badd4543416eb333ff1a8f62aac8c166188c5
|
f94f9ddd8f7ec105161366461275f16b325d9c3e
|
/l2hmc-qcd/train.py
|
d9568bd70244bddda72a9c96ba1f88273e2b46d6
|
[
"Apache-2.0"
] |
permissive
|
FigTop/l2hmc-qcd
|
da086545b94f5ff2da835b0f2c440e077a28e15a
|
0003da4f6c76172a27dbdec223393ce04cf73805
|
refs/heads/master
| 2023-04-21T21:20:43.724346
| 2021-05-22T19:28:21
| 2021-05-22T19:28:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,794
|
py
|
"""
train.py
Train 2D U(1) model using eager execution in tensorflow.
"""
# noqa: E402, F401
# pylint:disable=wrong-import-position,invalid-name, unused-import,
# pylint: disable=ungrouped-imports
from __future__ import absolute_import, division, print_function
import os
import json
import contextlib
import logging
import tensorflow as tf
from config import BIN_DIR
import utils
try:
import horovod
import horovod.tensorflow as hvd
try:
RANK = hvd.rank()
except ValueError:
hvd.init()
RANK = hvd.rank()
HAS_HOROVOD = True
logging.info(f'using horovod version: {horovod.__version__}')
logging.info(f'using horovod from: {horovod.__file__}')
GPUS = tf.config.experimental.list_physical_devices('GPU')
for gpu in GPUS:
tf.config.experimental.set_memory_growth(gpu, True)
if GPUS:
gpu = GPUS[hvd.local_rank()]
tf.config.experimental.set_visible_devices(gpu, 'GPU')
except (ImportError, ModuleNotFoundError):
HAS_HOROVOD = False
from utils.file_io import console
import utils.file_io as io
from utils.attr_dict import AttrDict
from utils.parse_configs import parse_configs
from dynamics.gauge_dynamics import build_dynamics
from utils.training_utils import train, train_hmc
from utils.inference_utils import run, run_hmc, run_inference_from_log_dir
# os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logger = logging.getLogger(__name__)
logging_datefmt = '%Y-%m-%d %H:%M:%S'
logging_level = logging.WARNING
logging_format = (
'%(asctime)s %(levelname)s:%(process)s:%(thread)s:%(name)s:%(message)s'
)
logging.info(f'using tensorflow version: {tf.__version__}')
logging.info(f'using tensorflow from: {tf.__file__}')
@contextlib.contextmanager
def experimental_options(options):
"""Run inside contextmanager with special options."""
old_opts = tf.config.optimizer.get_experimental_options()
tf.config.optimizer.set_experimental_options(options)
try:
yield
finally:
tf.config.optimizer.set_experimental_options(old_opts)
def restore_flags(flags, train_dir):
"""Update `FLAGS` using restored flags from `log_dir`."""
rf_file = os.path.join(train_dir, 'FLAGS.z')
if os.path.isfile(rf_file):
try:
restored = io.loadz(rf_file)
restored = AttrDict(restored)
io.log(f'Restoring FLAGS from: {rf_file}...')
flags.update(restored)
except (FileNotFoundError, EOFError):
pass
return flags
def main(configs, num_chains=None, run_steps=None):
"""Main method for training."""
hmc_steps = configs.get('hmc_steps', 0)
# tf.keras.backend.set_floatx('float32')
log_file = os.path.join(os.getcwd(), 'log_dirs.txt')
x = None
log_dir = configs.get('log_dir', None)
beta_init = configs.get('beta_init', None)
beta_final = configs.get('beta_final', None)
if log_dir is not None: # we want to restore from latest checkpoint
configs.restore = True
run_steps = configs.get('run_steps', None)
train_steps = configs.get('train_steps', None)
restored = restore_flags(configs,
os.path.join(configs.log_dir, 'training'))
for key, val in configs.items():
if key in restored:
if val != restored[key]:
io.log(f'Restored {key}: {restored[key]}')
io.log(f'Using {key}: {val}')
configs.update({
'train_steps': train_steps,
'run_steps': run_steps,
})
if beta_init != configs.get('beta_init', None):
configs.beta_init = beta_init
if beta_final != configs.get('beta_final', None):
configs.beta_final = beta_final
else: # New training session
train_steps = configs.get('train_steps', None)
run_steps = configs.get('run_steps', None)
timestamps = AttrDict({
'month': io.get_timestamp('%Y_%m'),
'time': io.get_timestamp('%Y-%M-%d-%H%M%S'),
'hour': io.get_timestamp('%Y-%m-%d-%H'),
'minute': io.get_timestamp('%Y-%m-%d-%H%M'),
'second': io.get_timestamp('%Y-%m-%d-%H%M%S'),
})
configs.log_dir = io.make_log_dir(configs, 'GaugeModel', log_file,
timestamps=timestamps)
io.write(f'{configs.log_dir}', log_file, 'a')
configs.restore = False
if hmc_steps > 0:
# x, _, eps = train_hmc(args)
x, dynamics_hmc, _, hflags = train_hmc(configs,
num_chains=num_chains)
# dirs_hmc = hflags.get('dirs', None)
# args.dynamics_config['eps'] = dynamics_hmc.eps.numpy()
_ = run(dynamics_hmc, hflags, save_x=False)
if num_chains is None:
num_chains = configs.get('num_chains', 15)
x, dynamics, train_data, configs = train(configs, x=x, make_plots=True,
num_chains=num_chains)
if run_steps is None:
run_steps = configs.get('run_steps', 50000)
# ====
# Run inference on trained model
if run_steps > 0:
# run_steps = args.get('run_steps', 125000)
log_dir = configs.log_dir
beta = configs.get('beta_final')
if configs.get('small_batch', False):
batch_size = 256
old_shape = configs['dynamics_config']['x_shape']
new_shape = (batch_size, *old_shape[1:])
configs['dynamics_config']['x_shape'] = new_shape
dynamics = build_dynamics(configs, log_dir=log_dir)
x = x[:batch_size]
results = run(dynamics, configs, x, beta=beta, make_plots=True,
therm_frac=0.1, num_chains=num_chains, save_x=False)
try:
run_data = results.run_data
run_dir = run_data.dirs['run_dir']
dataset = run_data.save_dataset(run_dir, therm_frac=0.)
except:
# TODO: Properly catch exception (if thrown)
pass
# _ = run_inference_from_log_dir(log_dir=log_dir,
# run_steps=run_steps,
# beta=beta,
# num_chains=num_chains,
# batch_size=batch_size,
# therm_frac=0.2,
# make_plots=True,
# train_steps=0,
# x=xbatch)
# Run with random start
# _ = run(dynamics, args)
# # Run HMC
# args.hmc = True
# args.dynamics_config['eps'] = 0.15
# hmc_dir = os.path.join(args.log_dir, 'inference_hmc')
# _ = run_hmc(args=args, hmc_dir=hmc_dir)
if __name__ == '__main__':
timestamp = io.get_timestamp('%Y-%m-%d-%H%M')
# debug_events_writer = tf.debugging.experimental.enable_dump_debug_info(
# debug_dir, circular_buffer_size=-1,
# tensor_debug_mode="FULL_HEALTH",
# )
CONFIGS = parse_configs()
CONFIGS = AttrDict(CONFIGS.__dict__)
if CONFIGS.get('debug', False):
logging_level = logging.DEBUG
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
else:
logging_level = logging.WARNING
console.log(f'CONFIGS: {dict(**CONFIGS)}')
# io.print_dict(CONFIGS)
main(CONFIGS)
# if RANK == 0:
# console.save_text(os.path.join(os.getcwd(), 'train.log'), styles=False)
#
# debug_events_writer.FlushExecutionFiles()
# debug_events_writer.FlushNonExecutionFiles()
|
[
"saforem2@gmail.com"
] |
saforem2@gmail.com
|
325cc25af1d6dc78baf0a0a9ddce7df6d98af533
|
a2cf2e8e5bf2c5604071c22356fb94bb5e6bcc13
|
/190820/working_order.py
|
641f5dafc8ca37db1be143ba119f97f4bfa232db
|
[] |
no_license
|
baambox5/algorithm
|
a9a3b05d1e87c6bf713aca1770ea1a2e0c728120
|
ce28170db4277faaabbc4a06602aafab1a1129a3
|
refs/heads/master
| 2020-09-01T11:19:17.484337
| 2019-12-19T12:45:29
| 2019-12-19T12:45:29
| 218,948,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,927
|
py
|
import sys
sys.stdin = open('working_order.txt', 'r')
# for test_case in range(1, 11):
# V, E = tuple(map(int, input().split()))
# G = [[] for _ in range(V + 1)]
# condition = [0] * (V + 1)
# visit = [0] * (V + 1)
# arr = list(map(int, input().split()))
# stack_list = []
# for i in range(len(arr)):
# if i % 2:
# condition[arr[i]] += 1
# else:
# G[arr[i]].append(arr[i + 1])
# print('#{}'.format(test_case), end=' ')
# for i in range(1, V + 1):
# if not condition[i] and not visit[i]:
# stack_list.append(i)
# visit[i] = 1
# print('{}'.format(i), end=' ')
# for j in G[i]:
# condition[j] -= 1
# while stack_list:
# for w in G[i]:
# if not condition[w] and not visit[w]:
# visit[w] = 1
# stack_list.append(i)
# print('{}'.format(w), end=' ')
# i = w
# for j in G[w]:
# condition[j] -= 1
# break
# else:
# i = stack_list.pop()
# print()
def dfs(v):
visit[v] = 1
print('{}'.format(v), end=' ')
for w in G[v]:
condition[w] -= 1
if not condition[w] and not visit[w]:
dfs(w)
for test_case in range(1, 11):
V, E = tuple(map(int, input().split()))
G = [[] for _ in range(V + 1)]
condition = [0] * (V + 1)
visit = [0] * (V + 1)
arr = list(map(int, input().split()))
for i in range(len(arr)):
if i % 2:
condition[arr[i]] += 1
else:
G[arr[i]].append(arr[i + 1])
print('#{}'.format(test_case), end=' ')
for i in range(1, V + 1):
if not condition[i] and not visit[i]:
dfs(i)
print()
|
[
"baamboxo@gmail.com"
] |
baamboxo@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.