blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fde9051a268cde6daf399cea7a982312c8d7b75c | d109f8d0597c20ad69fe2d07809bdf74cf942339 | /antinex_core/send_results_to_broker.py | 522f209c69f55faf51674f6a18575a0ebbefb4a1 | [
"Apache-2.0"
] | permissive | eos21/antinex-core | f565b472ad09a24e267b2ce94521045ca51b7ccf | 6d179f84300a642867997b55b1f7c5a1b4f8cfa0 | refs/heads/master | 2020-04-17T18:13:02.218873 | 2018-12-07T20:06:17 | 2018-12-07T20:06:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,860 | py | import json
import pandas as pd
from celery import Celery
from spylunking.log.setup_logging import build_colorized_logger
from antinex_utils.consts import SUCCESS
from antinex_utils.consts import ERROR
from antinex_utils.consts import FAILED
log = build_colorized_logger(
name='send_results')
def send_results_to_broker(
loc,
final_results):
"""send_results_to_broker
:param loc: api-generated dictionary for where to send the results
:param final_results: prediction results from the worker
"""
log.info((
'sending back={}').format(
loc))
status = ERROR
org_model = None
org_rounded = None
org_train_scaler = None
org_test_scaler = None
org_train_scaler_dataset = None
org_test_scaler_dataset = None
if len(final_results) > 0 and len(final_results) > 0:
if final_results["data"]["sample_predictions"]:
final_results["data"]["sample_predictions"] = json.loads(
pd.Series(
final_results["data"]["sample_predictions"]).to_json(
orient="records"))
if final_results["data"]["rounded"]:
final_results["data"]["rounded"] = json.loads(
pd.Series(
final_results["data"]["rounded"]).to_json(
orient="records"))
final_results["data"].pop("predictions", None)
final_results["data"]["model_json"] = \
final_results["data"]["model"].to_json()
# remove values that cannot be serialized to json (for now)
org_model = final_results["data"].pop("model", None)
org_rounded = final_results["data"].pop("rounded_predictions", None)
org_train_scaler = final_results["data"].pop("scaler_train", None)
org_test_scaler = final_results["data"].pop("scaler_test", None)
org_train_scaler_dataset = final_results["data"].pop(
"scaled_train_dataset", None)
org_test_scaler_dataset = final_results["data"].pop(
"scaled_test_dataset", None)
source = loc["source"]
auth_url = loc["auth_url"]
ssl_options = loc["ssl_options"]
queue_name = loc["queue"]
task_name = loc["task_name"]
delivery_mode = loc["delivery_mode"]
manifest = loc["manifest"]
retries = loc.get(
"retries",
100000)
log.debug(("CORERES - sending response back to source={} "
"ssl={} queue={} task={}")
.format(
source,
ssl_options,
queue_name,
task_name))
send_data_to_rest_api = {
"results": final_results,
"manifest": manifest
}
broker_settings = {
"broker_url": auth_url,
"broker_use_ssl": ssl_options
}
try:
app = Celery("core-publish-results")
log.info(("creating celery app auth={} ssl={}")
.format(
auth_url,
ssl_options))
app.conf.update(
**broker_settings)
# Celery task routing and queue
log.info(("sending response queue={} task={} retries={}")
.format(
queue_name,
task_name,
retries))
task_id = app.send_task(
task_name,
args=[send_data_to_rest_api],
queue=queue_name,
delivery_mode=delivery_mode,
retries=retries)
log.info(("task.id={}")
.format(
task_id))
except Exception as e:
log.info(("Failed to publish to core req={} with ex={}")
.format(
str(final_results)[0:32],
e))
# try/ex
status = SUCCESS
log.info(("send_results_to_broker - done"))
else:
log.info(("CORERES - nothing to send back final_results={} ")
.format(
final_results))
status = FAILED
# publish to the core if enabled
# put this back into the results
if org_model:
final_results["data"]["model"] = org_model
if org_rounded:
final_results["data"]["rounded_predictions"] = org_rounded
# could be improved by checking assignment with a list
final_results["data"]["scaler_train"] = org_train_scaler
final_results["data"]["scaler_test"] = org_test_scaler
final_results["data"]["scaled_train_dataset"] = org_train_scaler_dataset
final_results["data"]["scaled_test_dataset"] = org_test_scaler_dataset
return status
# end of send_results_to_broker
| [
"jay.p.h.johnson@gmail.com"
] | jay.p.h.johnson@gmail.com |
66d3256171b55d2e6dedd66068e4d467e0c34f3c | 2937d60b7f5259b4899ba5af08146bd874529a67 | /67 Instnce variable.py | 87ab62b129147867f1ad9f8bd4412eaf9e5157d1 | [] | no_license | gourav47/Let-us-learn-python | 9a2302265cb6c47e74863359c79eef5a3078358a | b324f2487de65b2f073b54c8379c1b9e9aa36298 | refs/heads/master | 2021-06-27T03:33:27.483992 | 2021-01-07T12:26:16 | 2021-01-07T12:26:16 | 204,323,390 | 1 | 1 | null | 2020-07-19T14:25:12 | 2019-08-25T16:53:56 | Python | UTF-8 | Python | false | false | 241 | py | class Account:
## def __init__(self,a,b):
## self.accno=a
## self.balance=b
def f1(self,a,b):
self.accno=a
self.balance=b
acc1=Account()
acc1.accno=102
acc1.balance=6000
print(acc1.__dict__)
| [
"noreply@github.com"
] | gourav47.noreply@github.com |
3865fb102c37c18c4c7802879d66bab9f72e9e9a | c60d956f0def89f2afdc014fce2218deef6096f0 | /tools/aws/nuke_clusters.py | 5d87866522ffe08ae7dff467cb0015f36b531904 | [
"Apache-2.0"
] | permissive | criteo-forks/marathon | 9691af7936ac624c0f3d8b3e51e140b6ce9276ff | 02137d3b4fd9a56fc56966312a300982fb690300 | refs/heads/criteo/1.9.109 | 2023-03-15T23:12:32.960763 | 2023-03-10T15:39:00 | 2023-03-10T15:39:00 | 50,347,255 | 1 | 10 | Apache-2.0 | 2023-09-12T22:55:56 | 2016-01-25T11:48:51 | Scala | UTF-8 | Python | false | false | 1,282 | py | #!/usr/bin/env python3
import boto3
import logging
from botocore.exceptions import ClientError
from logging import config
from logging import config
logging.config.fileConfig('logging.conf')
logger = logging.getLogger(__name__)
def delete_stacks():
logger.info('Deleting stacks..')
cloudformation = boto3.resource('cloudformation')
for stack in cloudformation.stacks.all():
stack.delete()
logger.info('Done.')
def delete_volumes():
logger.info('Delete volumes.')
ec2 = boto3.resource('ec2')
for volume in ec2.volumes.all():
try:
volume.delete()
except ClientError:
logger.exception('Could not delete volume %s', volume.id)
logger.info('Done.')
def delete_key_pairs():
logger.info('Delete key pairs.')
ec2 = boto3.resource('ec2')
for pair in ec2.key_pairs.all():
pair.delete()
logger.info('Done.')
def nuke_clusters():
delete_stacks()
delete_volumes()
delete_key_pairs()
if __name__ == "__main__":
confirmation = input('You are about to nuke all test clusters. Enter "I know what I\'m doing" to continue:')
if confirmation == 'I know what I\'m doing':
boto3.setup_default_session(region_name='us-west-2')
nuke_clusters()
| [
"noreply@github.com"
] | criteo-forks.noreply@github.com |
4101f33c930b8f6e102d452e033ff5be1edb8a6f | a57fc2270ecd7738a0e569f2ec24069333dbb797 | /createTable.py | a8a0062e8920e42c8622fc8189b6bf46cfdba540 | [] | no_license | 1751660300/flaskProject_BYSJ | 495a3a35d3a712f29b7cbf5c0e6ae79c7c3cd07f | 20eb182020f345c8759a5932d0bbac72a69f38b5 | refs/heads/master | 2023-03-30T23:07:58.098939 | 2021-04-11T12:03:48 | 2021-04-11T12:03:48 | 347,541,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | # -*- coding:utf-8 -*-
from views import db
from views import init_app
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
app = init_app()
db.create_all(app=app)
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
manager.run()
| [
"1751660300@qq.com"
] | 1751660300@qq.com |
bef150c80b2f9c2c777b6f7c223061c7d747b293 | b18f92a6a41a3d83e77848460d4a3f17e4fe677a | /algorithms/sorting/2_sort_vectors/solution/test_solution.py | 88123a6d0bb419b9aa7bc5f05cf6421ae424e231 | [] | no_license | ByteAcademyCo/Exercises | de71b885a498ead8296e6107836f9a06ac399d4f | 8332d0473ab35ee1d2975b384afda45c77ef943d | refs/heads/master | 2022-05-25T23:01:59.466480 | 2022-03-14T13:12:10 | 2022-03-14T13:12:10 | 252,842,407 | 1 | 109 | null | 2022-03-14T13:12:11 | 2020-04-03T21:09:47 | Python | UTF-8 | Python | false | false | 480 | py |
def test_solution():
import solution
vects1 = [((1, 3), (2, 6)), ((1, 5), (3, 4)), ((2, 6), (2, 9))]
ret1 = [((1, 5), (3, 4)), ((2, 6), (2, 9)), ((1, 3), (2, 6))]
vects2 = [((2, 6), (2, 9)), ((1, 3), (2, 6))] #sorted order
vects3 = [((1, 8), (2, 4))]
assert solution.sort_vectors(vects1) == ret1
assert solution.sort_vectors([]) == []
assert solution.sort_vectors(vects2) == vects2
assert solution.sort_vectors(vects3) == vects3
| [
"avelikevitch@gmail.com"
] | avelikevitch@gmail.com |
2609cc5c2f2c7cc888d4a7c1af8ddfda9309ec6a | dd97a08267b2197a73c7b19f630ab2742ada837c | /python/ThirteenTeV/HSCPgluino_M_400_TuneCUETP8M1_13TeV_pythia8_cff.py | b26dbd233525388cb46d453f05695b303eb269d9 | [] | no_license | Mohammed2/genproductions | 48dc93d15c070a02c9ce7c70060909587115e8f8 | 9e18bbd3be45e57b0ecaf3cbea94c8f50df939fa | refs/heads/master | 2020-06-16T16:59:50.959353 | 2017-06-06T16:43:27 | 2017-06-06T16:43:27 | 92,869,604 | 1 | 0 | null | 2017-05-30T19:40:46 | 2017-05-30T19:40:46 | null | UTF-8 | Python | false | false | 2,892 | py | FLAVOR = 'gluino'
COM_ENERGY = 13000. # GeV
MASS_POINT = 400 # GeV
PROCESS_FILE = 'SimG4Core/CustomPhysics/data/RhadronProcessList.txt'
PARTICLE_FILE = 'Configuration/Generator/data/particles_%s_%d_GeV.txt' % (FLAVOR, MASS_POINT)
SLHA_FILE ='Configuration/Generator/data/HSCP_%s_%d_SLHA.spc' % (FLAVOR, MASS_POINT)
PDT_FILE = 'Configuration/Generator/data/hscppythiapdt%s%d.tbl' % (FLAVOR, MASS_POINT)
USE_REGGE = False
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(-1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(-1),
maxEventsToPrint = cms.untracked.int32(0),
SLHAFileForPythia8 = cms.string('%s' % SLHA_FILE),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'SUSY:all = off',
'SUSY:gg2gluinogluino = on',
'SUSY:qqbar2gluinogluino = on',
'RHadrons:allow = on',
'RHadrons:allowDecay = off',
'RHadrons:setMasses = on',
'RHadrons:probGluinoball = 0.1',
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'
)
)
)
generator.hscpFlavor = cms.untracked.string(FLAVOR)
generator.massPoint = cms.untracked.int32(MASS_POINT)
generator.particleFile = cms.untracked.string(PARTICLE_FILE)
generator.slhaFile = cms.untracked.string(SLHA_FILE)
generator.processFile = cms.untracked.string(PROCESS_FILE)
generator.pdtFile = cms.FileInPath(PDT_FILE)
generator.useregge = cms.bool(USE_REGGE)
dirhadrongenfilter = cms.EDFilter("MCParticlePairFilter",
Status = cms.untracked.vint32(1, 1),
MinPt = cms.untracked.vdouble(0., 0.),
MinP = cms.untracked.vdouble(0., 0.),
MaxEta = cms.untracked.vdouble(100., 100.),
MinEta = cms.untracked.vdouble(-100, -100),
ParticleCharge = cms.untracked.int32(0),
ParticleID1 = cms.untracked.vint32(1000993,1009213,1009313,1009323,1009113,1009223,1009333,1091114,1092114,1092214,1092224,1093114,1093214,1093224,1093314,1093324,1093334),
ParticleID2 = cms.untracked.vint32(1000993,1009213,1009313,1009323,1009113,1009223,1009333,1091114,1092114,1092214,1092224,1093114,1093214,1093224,1093314,1093324,1093334)
)
ProductionFilterSequence = cms.Sequence(generator*dirhadrongenfilter)
| [
"sheffield@physics.rutgers.edu"
] | sheffield@physics.rutgers.edu |
3da0c80029968573c5c25265c28b5924d3c20582 | edd0dc36452366de13a04ec5ced3c502af336b06 | /zhe/brainvision.py | 5dea8ce8fa90e105fe77d3264dae7647075d6dcb | [] | no_license | expertanalytics/Elektrosjokk | 2de0a4d504d8cac40d07ce72c081a3fc3a69aaf6 | 325e731c04950d0c19f47bab1f65d8a3eb3a79ec | refs/heads/master | 2021-09-19T02:05:17.726598 | 2021-08-05T09:15:31 | 2021-08-05T09:15:31 | 79,484,610 | 0 | 0 | null | 2018-10-25T12:19:09 | 2017-01-19T18:58:28 | Python | UTF-8 | Python | false | false | 5,465 | py | # License: BSD 3-clause
# Author: Boris Reuderink
#very little modification from the original script
#the regular expression stim_code = int(re.match(r'S\s*(\d+)', mdesc).group(1))
#now it matches correctly also markers without spaces ex: "S102"
# Adapted for python3 by Jakob Schreiner
import logging
import re
import itertools
import numpy as np
from pathlib import Path
from collections import namedtuple
from configparser import SafeConfigParser
from typing import (
Iterable,
List,
Tuple,
Optional
)
# TODO:
# - add encoding of commas (\1)
# - verify units for resolution in UTF8
log = logging.getLogger('__main__')
HeaderData = namedtuple(
"DataSpec", (
"sample_rate",
"channel_label_list",
"channel_resolution_list",
"eeg_file_name",
"marker_file_name"
))
MarkerData = namedtuple(
"MarkerData", (
"name",
"type",
"description",
"duration",
"channels"
))
def read_header(file_name: Path) -> HeaderData:
"""Read the data header.
The sample_rate, channel labels, channel resolution, eeg and marker file_names are
returned as a namedtuple.
Arguments:
file_name: Path to header file.
"""
with open(file_name) as file_handle:
# setup config reader
header = "Brain Vision Data Exchange Header File Version 1.0"
assert file_handle.readline().strip() == header
# Break when reachibng [Comment]
lines = itertools.takewhile( lambda x: '[Comment]' not in x, file_handle.readlines())
cfg = SafeConfigParser()
cfg.readfp(lines)
# Samplling interval is given in micro seconds. Convert to seconds -- Hence 1e6
sample_rate = 1e6/cfg.getfloat('Common Infos', 'SamplingInterval')
num_channels = cfg.getint('Common Infos', 'NumberOfChannels')
log.info(f"Found sample rate of {sample_rate:.2f} Hz, {num_channels:d} channels.")
# check binary format
assert cfg.get('Common Infos', 'DataOrientation') == 'MULTIPLEXED'
assert cfg.get('Common Infos', 'DataFormat') == 'BINARY'
assert cfg.get('Binary Infos', 'BinaryFormat') == 'INT_16'
# load channel labels
channel_label_list = ["UNKNOWN"]*num_channels
channel_resolution_list = [np.nan]*num_channels
for chan, props in cfg.items('Channel Infos'):
n = int(re.match(r'ch(\d+)', chan).group(1))
name, refname, resolution, unit = props.split(',')[:4]
del refname
channel_label_list[n - 1] = name
channel_resolution_list[n - 1] = float(resolution)
# locate EEG and marker files
eeg_file_name = cfg.get('Common Infos', 'DataFile')
marker_file_name = cfg.get('Common Infos', 'MarkerFile')
return HeaderData(
sample_rate=sample_rate,
channel_label_list=channel_label_list,
channel_resolution_list=channel_resolution_list,
eeg_file_name=eeg_file_name,
marker_file_name=marker_file_name
)
def read_eeg(file_name: Path, channel_resolution: Iterable[float]) -> np.ndarray:
"""Read the binary data file.
The eeg file must follow the specifications from the header (.vhdr).
Arguments:
file_name: Name of binary data file
channel_resolution: The resolution of each channel
Returns:
The eeg channels scaled by their respective resolution
"""
_channel_resolution = np.asarray(channel_resolution, dtype="f8")
num_channels = _channel_resolution.size
with open(file_name, 'rb') as file_handle:
raw = file_handle.read()
size = len(raw)//2 # TODO: Why 2?
ints = np.ndarray(
shape=(num_channels, size//num_channels),
dtype='<i2',
order='F',
buffer=raw
)
return ints*_channel_resolution[:, None]
def read_markers(file_name: Path) -> List[MarkerData]:
"""Parse the marker header and return the each key-value pair.
Arguments:
file_name: Path to marker header (*.vmrk).
"""
with open(file_name) as file_handle:
header = "Brain Vision Data Exchange Marker File, Version 1.0"
assert file_handle.readline().strip() == header
cfg = SafeConfigParser()
cfg.readfp(file_handle)
events = []
for marker, info in cfg.items("Marker Infos"):
events.append(MarkerData(*info.split(",")[:5]))
return events
def read_brainvis_triplet(
header_file_name: str,
marker_file_name: Optional[str] = None,
eeg_file_name: Optional[str] = None
) -> Tuple[HeaderData, List[MarkerData], np.ndarray]:
""" Read BrainVision Recorder header file, locate and read the marker and EEG file.
Returns a header dictionary, a matrix with events and the raw EEG.
This is a convenience wrapper around `read_header`, `read_eeg` and `read_markers`.
"""
header_path = Path(header_file_name)
assert header_path.exists(), header_path
header_spec = read_header(header_path)
if marker_file_name is None:
marker_fname = header_path.parent / header_spec.marker_file_name
marks = read_markers(marker_fname)
if eeg_file_name is None:
eeg_file_name = header_path.parent / header_spec.eeg_file_name
X = read_eeg(eeg_file_name, header_spec.channel_resolution_list)
return header_spec, marks, X
| [
"jakob2602@gmail.com"
] | jakob2602@gmail.com |
a99bebca4185df88160a3362c3cb41f270204030 | fb118c335e893e92b7e88ab9af3e9b355c9ed2e0 | /brain/session/data/dataset_to_dict.py | c052f8f35e28a330c5c76f6b678a2866edc3bc4c | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | mtamillow/machine-learning | 65b4a170ef7d171c01c05d8fb964f5c81f74ad5d | f21a7ecabdb0f168b40ba49730b59f908e8bdf72 | refs/heads/master | 2021-01-24T01:35:47.652869 | 2016-06-10T03:09:45 | 2016-06-10T03:09:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,950 | py | #!/usr/bin/python
'''@save_entity
Note: the term 'dataset' used throughout various comments in this file,
synonymously implies the user supplied 'file upload(s)', and XML url
references.
'''
from brain.converter.convert_dataset import Convert_Dataset
def dataset_dictionary(id_entity, upload):
'''@dataset_dictionary
This method converts the supplied csv, or xml file upload(s) to a uniform
dict object.
@flag_append, when false, indicates the neccessary 'dataset' was not
properly defined, causing this method to 'return', which essentially
stops the execution of the current session.
@upload, uploaded dataset(s).
'''
flag_append = True
dataset = []
observation_labels = []
list_error = []
try:
# web-interface: define flag to convert to dataset to json
if upload['dataset']['file_upload']:
for val in upload['dataset']['file_upload']:
# reset file-pointer
val['file'].seek(0)
# csv to dict
if val['type'] == 'csv':
try:
# conversion
converter = Convert_Dataset(val['file'])
converted = converter.csv_to_dict()
count_features = converter.get_feature_count()
labels = converter.get_observation_labels()
# assign observation labels
observation_labels.append(labels)
# build new (relevant) dataset
dataset.append({
'id_entity': id_entity,
'premodel_dataset': converted,
'count_features': count_features
})
except Exception as error:
list_error.append(error)
flag_append = False
# json to dict
elif val['type'] == 'json':
try:
# conversion
converter = Convert_Dataset(val['file'])
converted = converter.json_to_dict()
count_features = converter.get_feature_count()
labels = converter.get_observation_labels()
# assign observation labels
observation_labels.append(labels)
# build new (relevant) dataset
dataset.append({
'id_entity': id_entity,
'premodel_dataset': converted,
'count_features': count_features
})
except Exception as error:
list_error.append(error)
flag_append = False
# xml to dict
elif val['type'] == 'xml':
try:
# conversion
converter = Convert_Dataset(val['file'])
converted = converter.xml_to_dict()
count_features = converter.get_feature_count()
labels = converter.get_observation_labels()
# assign observation labels
observation_labels.append(labels)
# build new (relevant) dataset
dataset.append({
'id_entity': id_entity,
'premodel_dataset': converted,
'count_features': count_features
})
except Exception as error:
list_error.append(error)
flag_append = False
if not flag_append:
return False
# programmatic-interface
elif upload['dataset']['json_string']:
# conversion
dataset_json = upload['dataset']['json_string']
converter = Convert_Dataset(dataset_json, True)
converted = converter.json_to_dict()
count_features = converter.get_feature_count()
observation_labels.append(dataset_json.keys())
# build dataset
dataset.append({
'id_entity': id_entity,
'premodel_dataset': converted,
'count_features': count_features
})
except Exception as error:
list_error.append(error)
print error
# return results
if list_error:
return {
'dataset': dataset,
'observation_labels': observation_labels,
'error': list_error
}
else:
return {
'dataset': dataset,
'observation_labels': observation_labels,
'error': None
}
| [
"jeff1evesque@yahoo.com"
] | jeff1evesque@yahoo.com |
6232db56563263b899bb280c8b3ac86e38fa9ecc | 2ee8c8ccb844bea31c21d7493a1d3504925f2b4f | /uwnet/whitenoise.py | ea6d6b9eb154c036143798f994a678927a45772e | [
"MIT"
] | permissive | mhdella/uwnet | 11b3273677d2b32d63e17d9df116b2065fc626ae | 24427547c485247e5019b9f8afa6843a6be603e4 | refs/heads/master | 2020-07-05T01:12:10.970962 | 2019-06-18T07:14:10 | 2019-06-18T07:14:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,238 | py | import argparse
import logging
import dask.array as da
import numpy as np
import xarray as xr
import torch
from uwnet.thermo import compute_apparent_source
logger = logging.getLogger('whitenoise')
def get_error(model):
from src.data import open_data
data = open_data("training")
data = data.isel(time=slice(0, 100)).compute()
srcs = model.predict(data)
q2 = compute_apparent_source(data.QT, data.FQT * 86400)
q1 = compute_apparent_source(data.SLI, data.FSLI * 86400)
return xr.Dataset({
'QT': q2 - srcs.QT,
'SLI': q1 - srcs.SLI
}).dropna('time')
def cholesky_factor(C):
return np.stack(np.linalg.cholesky(C[i]) for i in range(C.shape[0]))
class WhiteNoiseModel(object):
"""Generate random noise with correct covariance structure
"""
def fit(self, error):
self.time_step_ = float(error.time[1] - error.time[0])
X = da.concatenate([error.QT.data, error.SLI.data], axis=1)
# compute covariance
nz = X.shape[0]
nx = X.shape[-1]
n = nx * nz
C = da.einsum('tzyx,tfyx->yzf', X, X) / n
C = C.compute()
# shape is
# (y, feat, feat)
self.Q_ = cholesky_factor(C) * np.sqrt(self.time_step_)
return self
def __call__(self, state):
"""
Parameters
----------
state : dict
Returns
-------
tend : dict
physical tendencies
"""
sli_key = "liquid_ice_static_energy"
qt_key = "total_water_mixing_ratio"
nx = state[qt_key].shape[-1]
dt = state['dt'] / 86400
logger.info(f"Computing white noise tendency with {dt} days")
y = self.Q_.shape[0]
z = self.Q_.shape[1]
# dividing by sqrt(dt) ensures that
# output * dt = Q sqrt{dt} N
N = np.random.randn(y, nx, z) * np.sqrt(dt)
W = np.einsum('yzf,yxf->zyx', self.Q_, N)
dqt, dsli = np.split(W, 2, 0)
# perform time step
qt = state[qt_key] + dqt
sli = state[sli_key] + dsli
return {qt_key: qt, sli_key: sli}
def fit(model):
model = torch.load(model)
error = get_error(model)
return WhiteNoiseModel().fit(error)
| [
"nbren12@gmail.com"
] | nbren12@gmail.com |
a706ffafd91a2edbbc8f35fd4a95d5ca568abbae | 74983098c5de53007bde6052a631845c781b5ba8 | /forrester/forrester10/forrester.py | 109a0941d6cab03cc0fed480ab326aa1bcf164aa | [] | no_license | numairmansur/Experiments | 94ccdd60f4c2cf538fab41556ac72405656c9d77 | 592f39916461c7a9f7d400fa26f849043d1377ed | refs/heads/master | 2021-04-29T12:39:16.845074 | 2017-02-15T07:36:47 | 2017-02-15T07:36:47 | 78,043,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | import numpy as np
import sys
import math
import time
import csv
from hpolib.benchmarks.synthetic_functions import Forrester
from time import gmtime, strftime
def main(job_id, params):
print '!!! Entered Main !!!'
print 'Anything printed here will end up in the output directory for job #:', str(job_id)
print params
f = Forrester()
res = f.objective_function([params['x']])
print res
#with open('/home/numair/Downloads/testEnvFortheCluster/Experiments/forrester/run1.csv','a') as csvfile:
with open('/home/mansurm/Experiments/forrester/run10.csv','a') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow([res['main'][0]])
return res['main'][0] | [
"numair.mansur@gmail.com"
] | numair.mansur@gmail.com |
21aa7fda2068d5bc4eb2aff0f958e5958d8d9296 | 6f57761c60582c546423a2a08c769f18236fd153 | /benchmarks/test_correlation_MG3_SD.py | 595a1199b439267520a573446a1e282cbfec6b4b | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"BSD-2-Clause"
] | permissive | LSSTDESC/CCL | 30644922fead0b017c1056e628bec23cf6bc4dfb | 29d46978445678d86a4bee485cb29d30246ff64a | refs/heads/master | 2023-09-03T17:03:17.012019 | 2023-08-08T11:01:33 | 2023-08-08T11:01:33 | 57,389,367 | 118 | 68 | BSD-3-Clause | 2023-08-30T13:25:25 | 2016-04-29T14:08:38 | C | UTF-8 | Python | false | false | 7,232 | py | import os
import numpy as np
import pyccl as ccl
from pyccl.modified_gravity import MuSigmaMG
from scipy.interpolate import interp1d
import pytest
@pytest.fixture(scope='module', params=['fftlog', 'bessel'])
def corr_method(request):
errfacs = {'fftlog': 0.21, 'bessel': 0.05}
return request.param, errfacs[request.param]
@pytest.fixture(scope='module')
def set_up(request):
dirdat = os.path.dirname(__file__) + '/data'
h0 = 0.67702026367187500
logA = 3.05 # log(10^10 A_s)
# scale dependent MG cosmology
ccl.gsl_params.LENSING_KERNEL_SPLINE_INTEGRATION = False
ccl.gsl_params.INTEGRATION_LIMBER_EPSREL = 2.5E-5
ccl.gsl_params.INTEGRATION_EPSREL = 2.5E-5
cosmo = ccl.Cosmology(Omega_c=0.12/h0**2, Omega_b=0.0221/h0**2, Omega_k=0,
h=h0, A_s=np.exp(logA)/10**10, n_s=0.96, Neff=3.046,
m_nu=0.0, w0=-1, wa=0, T_CMB=2.7255,
mg_parametrization=MuSigmaMG(
mu_0=0.1, sigma_0=0.1,
c1_mg=1.1, c2_mg=1.1, lambda_mg=1),
transfer_function='boltzmann_isitgr',
matter_power_spectrum='linear')
# Ell-dependent correction factors
# Set up array of ells
fl = {}
lmax = 10000
nls = (lmax - 400)//20+141
ells = np.zeros(nls)
ells[:101] = np.arange(101)
ells[101:121] = ells[100] + (np.arange(20) + 1) * 5
ells[121:141] = ells[120] + (np.arange(20) + 1) * 10
ells[141:] = ells[140] + (np.arange(nls - 141) + 1) * 20
fl['lmax'] = lmax
fl['ells'] = ells
# Load dNdz's
z1, pz1 = np.loadtxt(dirdat + "/bin1_histo.txt", unpack=True)
z2, pz2 = np.loadtxt(dirdat + "/bin2_histo.txt", unpack=True)
# Set up the linear galaxy bias as used in generating benchmarks
bz1 = 1.45*np.ones_like(pz1)
bz2 = 1.55*np.ones_like(pz2)
# Initialize tracers
trc = {}
trc['g1'] = ccl.NumberCountsTracer(cosmo, has_rsd=False,
dndz=(z1, pz1), bias=(z1, bz1))
trc['g2'] = ccl.NumberCountsTracer(cosmo, has_rsd=False,
dndz=(z2, pz2), bias=(z2, bz2))
trc['l1'] = ccl.WeakLensingTracer(cosmo, dndz=(z1, pz1))
trc['l2'] = ccl.WeakLensingTracer(cosmo, dndz=(z2, pz2))
# Read benchmarks
bms = {}
bms['dd_11'] = np.loadtxt(dirdat+'/wtheta_linear_predictionSD.dat')[0:15]
bms['dd_22'] = np.loadtxt(dirdat+'/wtheta_linear_predictionSD.dat')[15:30]
bms['dl_11'] = np.loadtxt(dirdat+'/gammat_linear_predictionSD.dat')[0:15]
bms['dl_12'] = np.loadtxt(dirdat+'/gammat_linear_predictionSD.dat')[15:30]
bms['dl_21'] = np.loadtxt(dirdat+'/gammat_linear_predictionSD.dat')[30:45]
bms['dl_22'] = np.loadtxt(dirdat+'/gammat_linear_predictionSD.dat')[45:60]
bms['ll_11_p'] = np.loadtxt(dirdat+'/Xip_linear_predictionSD.dat')[0:15]
bms['ll_12_p'] = np.loadtxt(dirdat+'/Xip_linear_predictionSD.dat')[15:30]
bms['ll_22_p'] = np.loadtxt(dirdat+'/Xip_linear_predictionSD.dat')[30:45]
bms['ll_11_m'] = np.loadtxt(dirdat+'/Xim_linear_predictionSD.dat')[0:15]
bms['ll_12_m'] = np.loadtxt(dirdat+'/Xim_linear_predictionSD.dat')[15:30]
bms['ll_22_m'] = np.loadtxt(dirdat+'/Xim_linear_predictionSD.dat')[30:45]
theta = np.loadtxt(dirdat+'/theta_corr_MG.dat')
bms['theta'] = theta
# Read error bars
ers = {}
d = np.loadtxt("benchmarks/data/sigma_clustering_Nbin5",
unpack=True)
ers['dd_11'] = interp1d(d[0], d[1],
fill_value=d[1][0],
bounds_error=False)(theta)
ers['dd_22'] = interp1d(d[0], d[2],
fill_value=d[2][0],
bounds_error=False)(theta)
d = np.loadtxt("benchmarks/data/sigma_ggl_Nbin5",
unpack=True)
ers['dl_12'] = interp1d(d[0], d[1],
fill_value=d[1][0],
bounds_error=False)(theta)
ers['dl_11'] = interp1d(d[0], d[2],
fill_value=d[2][0],
bounds_error=False)(theta)
ers['dl_22'] = interp1d(d[0], d[3],
fill_value=d[3][0],
bounds_error=False)(theta)
ers['dl_21'] = interp1d(d[0], d[4],
fill_value=d[4][0],
bounds_error=False)(theta)
d = np.loadtxt("benchmarks/data/sigma_xi+_Nbin5",
unpack=True)
# We cut the largest theta angle from xip because of issues
# with the benchmark.
ers['ll_11_p'] = interp1d(d[0], d[1],
fill_value=d[1][0],
bounds_error=False)(theta)
ers['ll_22_p'] = interp1d(d[0], d[2],
fill_value=d[2][0],
bounds_error=False)(theta)
ers['ll_12_p'] = interp1d(d[0], d[3],
fill_value=d[3][0],
bounds_error=False)(theta)
d = np.loadtxt("benchmarks/data/sigma_xi-_Nbin5",
unpack=True)
ers['ll_11_m'] = interp1d(d[0], d[1],
fill_value=d[1][0],
bounds_error=False)(theta)
ers['ll_22_m'] = interp1d(d[0], d[2],
fill_value=d[2][0],
bounds_error=False)(theta)
ers['ll_12_m'] = interp1d(d[0], d[3],
fill_value=d[3][0],
bounds_error=False)(theta)
ccl.gsl_params.reload()
return cosmo, trc, bms, ers, fl
@pytest.mark.parametrize("t1,t2,bm,er,kind,pref",
[('g1', 'g1', 'dd_11', 'dd_11', 'NN', 1),
('g2', 'g2', 'dd_22', 'dd_22', 'NN', 1),
('g1', 'l1', 'dl_11', 'dl_11', 'NG', 1),
('g1', 'l2', 'dl_12', 'dl_12', 'NG', 1),
('g2', 'l1', 'dl_21', 'dl_21', 'NG', 1),
('g2', 'l2', 'dl_22', 'dl_22', 'NG', 1),
('l1', 'l1', 'll_11_p', 'll_11_p', 'GG+', 1),
('l1', 'l2', 'll_12_p', 'll_12_p', 'GG+', 1),
('l2', 'l2', 'll_22_p', 'll_22_p', 'GG+', 1),
('l1', 'l1', 'll_11_m', 'll_11_m', 'GG-', 1),
('l1', 'l2', 'll_12_m', 'll_12_m', 'GG-', 1),
('l2', 'l2', 'll_22_m', 'll_22_m', 'GG-', 1)])
def test_xi(set_up, corr_method, t1, t2, bm, er, kind, pref):
cosmo, trcs, bms, ers, fls = set_up
method, errfac = corr_method
# Debugging - define the same cosmology but in GR
cl = ccl.angular_cl(cosmo, trcs[t1], trcs[t2], fls['ells'])
ell = np.arange(fls['lmax'])
cli = interp1d(fls['ells'], cl, kind='cubic')(ell)
# Our benchmarks have theta in arcmin
# but CCL requires it in degrees:
theta_deg = bms['theta'] / 60.
xi = ccl.correlation(cosmo, ell=ell, C_ell=cli, theta=theta_deg, type=kind,
method=method)
xi *= pref
print(xi)
assert np.all(np.fabs(xi - bms[bm]) < ers[er] * errfac)
| [
"noreply@github.com"
] | LSSTDESC.noreply@github.com |
a689274e77fed8cb988eeed9338bc8ba75695894 | ba596595866771383f0d178421031fcf43fab16f | /plugin.video.pakindia/default.py | a1289475eb471d5e70aa8e45cd31cac529dd74f5 | [] | no_license | kevintone/tdbaddon | ab7e5aea48a369dffd8046c068d95ef26ddc7d9a | 10cbfceb375e1f5af4ade29a62c433cfa36cd883 | refs/heads/master | 2021-01-16T22:51:36.801741 | 2016-06-21T16:27:51 | 2016-06-21T16:27:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,136 | py | import urllib,urllib2,sys,re,xbmcplugin,xbmcgui,xbmcaddon,xbmc,os
import json
ADDON = xbmcaddon.Addon(id='plugin.video.pakindia')
datapath = xbmc . translatePath ( ADDON . getAddonInfo ( 'profile' ) )
pak = os. path . join ( datapath , "pak" )
def CATEGORIES():
aa=open(pak).read()
match=re.compile('<programCategory>(.+?)</programCategory.+?<categoryImage>(.+?)</categoryImage>',re.DOTALL).findall(aa)
uniques =[]
print match
for name , iconimage in match:
if name not in uniques:
uniques.append(name)
addDir(name,name,1,iconimage)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
setView('movies', 'default')
def GetContent(url):
aa=open(pak).read()
link=aa.split('<items>')
for p in link:
try:
name=re.compile('<programTitle>(.+?)</programTitle>').findall(p)[0]
URL=re.compile('<programURL>(.+?)</programURL>').findall(p)[0]
iconimage=re.compile('<programImage>(.+?)</programImage>').findall(p)[0]
if '<programCategory>'+url in p:
addDir(name,URL,200,iconimage)
except:pass
def OPEN_URL(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Pak%20TV/1.0 CFNetwork/758.2.8 Darwin/15.662')
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
def auth():
import base64
import time
TIME = time.time()
second= str(TIME).split('.')[0]
first =int(second)+69296929
token=base64.b64encode('%s@2nd2@%s' % (str(first),second))
DATA_URL='https://app.dynns.com/keys/Pak.php?token='+token
request = urllib2.Request(DATA_URL)
base64string = 'ZGlsZGlsZGlsOlBAa2lzdEBu'
request.add_header("User-Agent",ADDON.getSetting('pakuser'))
request.add_header("Authorization", "Basic %s" % base64string)
return urllib2.urlopen(request).read()
def getletter():
import string
letters='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
import random
return random.choice(string.letters)
def getlanguage():
language=['en', 'en_gb', 'en_us', 'en_ca', 'en_in', 'fr', 'fr_CA', 'es', 'es_MX', 'pt', 'pt_BR', 'it', 'de', 'zh_Hans', 'zh_Hant', 'zh_HK', 'nl', 'ja', 'ko', 'vi', 'ru', 'sv', 'da', 'fi', 'nb', 'tr', 'el', 'id', 'ms', 'th', 'hi', 'hu', 'pl', 'cs', 'sk', 'uk', 'hr', 'ca', 'ro', 'he', 'ar']
import random
return random.choice(language)
def getuser():
from random import randint
number='1.0.0.%s%s%s' % (getletter().upper(),randint(0,20),randint(0,20))
agent='AppleCoreMedia/1.0.0.13E238 (iPhone; U; CPU OS 9_3_1 like Mac OS X; en_gb)'
return agent #% (number,randint(0,20),randint(0,20),randint(0,20),getlanguage())
def PLAY_STREAM(name,url,iconimage):
url =url+auth()
liz = xbmcgui.ListItem(name, iconImage='DefaultVideo.png', thumbnailImage=iconimage)
liz.setInfo(type='Video', infoLabels={'Title':name})
liz.setProperty("IsPlayable","true")
liz.setPath(url+'|User-Agent='+getuser())
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, liz)
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def addDir(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name} )
menu = []
if mode ==200:
liz.setProperty("IsPlayable","true")
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
else:
menu.append(('Play All Videos','XBMC.RunPlugin(%s?name=%s&mode=2001&iconimage=None&url=%s)'% (sys.argv[0],name,url)))
liz.addContextMenuItems(items=menu, replaceItems=False)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def setView(content, viewType):
if content:
xbmcplugin.setContent(int(sys.argv[1]), content)
if ADDON.getSetting('auto-view') == 'true':#<<<----see here if auto-view is enabled(true)
xbmc.executebuiltin("Container.SetViewMode(%s)" % ADDON.getSetting(viewType) )#<<<-----then get the view type
params=get_params()
url=None
name=None
mode=None
iconimage=None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
iconimage=urllib.unquote_plus(params["iconimage"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
print "Mode: "+str(mode)
print "URL: "+str(url)
print "Name: "+str(name)
print "IconImage: "+str(iconimage)
#these are the modes which tells the plugin where to go
if mode==None or url==None or len(url)<1:
print ""
CATEGORIES()
elif mode==1:
print ""+url
GetContent(url)
elif mode==200:
PLAY_STREAM(name,url,iconimage)
elif mode==2001:
playall(name,url)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| [
"tdbaddon@hotmail.com"
] | tdbaddon@hotmail.com |
38737caf77f0aa21cdc1b89494ab8bf166d318f0 | 277f976227c7590f6de5e7991d8fbed23b6646fe | /euler/cleaned_solutions/p88.py | 4d72ae9ef50c34c20cca638763abee936759bad8 | [] | no_license | domspad/euler | ca19aae72165eb4d08104ef7a2757115cfdb9a18 | a4901403e442b376c2edd987a1571ab962dadab2 | refs/heads/master | 2021-01-17T14:04:39.198658 | 2016-07-25T23:40:10 | 2016-07-25T23:40:10 | 54,561,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,387 | py |
#~90 mins before running + 60 mins running? need to run again...
from math import sqrt
from collections import defaultdict
def primes_lt3(N):
"""
Return all primes less than N > 0 int
"""
# test every number less than N/2
primes = [ i for i in xrange(2,N)
if not any( ( i % p == 0 for p in xrange(2,int(sqrt(i))+1) ) )]
return primes
PRIMES = primes_lt3(10000)
def prime_fact_upto(n):
"""
n > 1, return prime facts of all integers up to n
12 --> {2:2, 3:1}
"""
PRIME_FACTS = [defaultdict(int),defaultdict(int)]
for i in xrange(2,n):
if i in PRIMES:
new_pf = defaultdict(int)
new_pf[i] = 1
PRIME_FACTS.append(new_pf)
else:
for p in PRIMES:
if p > sqrt(i) + 1: # not one found!
new_pf = defaultdict(int)
new_pf[i] = 1
PRIME_FACTS.append(new_pf)
break
elif i % p == 0:
new_pf = PRIME_FACTS[i/p].copy()
new_pf[p] += 1
PRIME_FACTS.append(new_pf)
break
return PRIME_FACTS
# PRIME_FACTS = prime_fact_upto(30)
from itertools import combinations
from collections_extended import bag,frozenbag
from collections import defaultdict
N = 12500
#from http://stackoverflow.com/questions/19368375/set-partitions-in-python
def partition(collection):
if len(collection) == 1:
yield [ collection ]
return
first = collection[0]
for smaller in partition(collection[1:]):
# insert `first` in each of the subpartition's subsets
for n, subset in enumerate(smaller):
yield smaller[:n] + [[ first ] + subset] + smaller[n+1:]
# put `first` in its own subset
yield [ [ first ] ] + smaller
def trans_pf_dict_to_bag(pf_dict):
return sum(([k]*pf_dict[k] for k in pf_dict),[])
def calc_k(bag):
prod = reduce(lambda x,y: x*y, bag, 1)
return prod - sum(bag) + len(bag)
found_ks = defaultdict(int)
FOUND_KS = set()
for e,pf in enumerate(prime_fact_upto(N)):
if e < 2:
continue
if e % 100 == 0:
print e
partitions = partition(trans_pf_dict_to_bag(pf))
bags = set(frozenbag(map(lambda x: reduce(lambda y,z: y*z, x, 1), p)) for p in partitions)
for k in map(calc_k, bags):
FOUND_KS.add(k)
if found_ks[k] == 0:
found_ks[k] = e
print sum(set((found_ks[i] for i in xrange(2,12001))))
########################################################################
#dp method?
N = 100
K = N/2
# prod_partitions = [[(,)]*K for i in xrange(N)]
| [
"domspad@umich.edu"
] | domspad@umich.edu |
06bf555808d01c943cc92f5dce4c87da0ba3687e | 43e900f11e2b230cdc0b2e48007d40294fefd87a | /Amazon/OnlineAssessment/min_priority.py | 7a2fa3bd70e6d543510c6edb459380fb66dfbf84 | [] | no_license | DarkAlexWang/leetcode | 02f2ed993688c34d3ce8f95d81b3e36a53ca002f | 89142297559af20cf990a8e40975811b4be36955 | refs/heads/master | 2023-01-07T13:01:19.598427 | 2022-12-28T19:00:19 | 2022-12-28T19:00:19 | 232,729,581 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | import collections
class Solution:
def minPriority(self, arr):
n = len(arr)
dic = collections.defaultdict()
for val in arr:
dic[val] = dic.get(val, 0) + 1
dic = dict(sorted(dic.items()))
print(dic)
mp = {}
priority = 1
for val in dic.keys():
mp[val] = priority
priority += 1
for i in range(n):
arr[i] = mp[arr[i]]
return arr
if __name__ == "__main__":
solution = Solution()
ans1 = solution.minPriority([1, 4, 8, 4])
ans2 = solution.minPriority([2, 9, 3, 2, 3])
print(ans1)
print(ans2)
| [
"wangzhihuan0815@gmail.com"
] | wangzhihuan0815@gmail.com |
05933d6e8971beb73e7caacabd14fe5c9e0081de | 18fa0ad57cd9c26bc2622ead61b88c81e017e2e8 | /CNN_DNN_tensorflow/tensor_model_boost/input_data.py | 801fad1042f51ce2267f9539d59d5cf67b1cf64a | [] | no_license | weihhh/ECG_pro | 45da18fad4709009cd4766a870fac7c5d5514a92 | 1e013cbb7352ad896661412f036fd9c6242a6001 | refs/heads/master | 2021-05-04T13:52:17.259815 | 2018-07-20T02:39:16 | 2018-07-20T02:39:16 | 120,323,445 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | from sklearn.model_selection import train_test_split#训练数据、测试数据切分
from collections import Counter
import pickle
from sklearn import preprocessing
from sklearn.metrics import classification_report,accuracy_score #模型准确率,查准率,查全率,f1_score
from tensor_ecg_model import tensor_dataset
def read_data_sets():
#unpickle
with open(r'D:\aa_work\ECG\svm\data.pickle','rb') as f:
ECG_data,ECG_annotation=pickle.load(f)#array of array,(*, 300),(*,1) *样本数
print('原始数据规模: ',ECG_data.shape,'原始标签规模: ',ECG_annotation.shape)
annotation_counts=Counter(ECG_annotation.flatten())
print('类别概览: ',annotation_counts)
#归一化
ECG_data=preprocessing.scale(ECG_data)
x_train,x_validation,y_train,y_validation=train_test_split(ECG_data,ECG_annotation.flatten(),test_size=0.5)
print('训练集规模: {},测试集规模: {}'.format(x_train.shape,x_validation.shape))
train_dataset=tensor_dataset(x_train,y_train)
validation_dataset=tensor_dataset(x_validation,y_validation)
return train_dataset,validation_dataset
def main():
train_dataset,validation_dataset=read_data_sets()
print('训练集:{},{}'.format(train_dataset._images.shape,train_dataset._labels.shape))
print('验证集:{}, {}'.format(validation_dataset._images.shape,validation_dataset._labels.shape))
if __name__ == '__main__':
main()
| [
"wz591757596@163.com"
] | wz591757596@163.com |
5b8e0722c567a62b5772d325d53476f5dc7e763f | bc441bb06b8948288f110af63feda4e798f30225 | /ucpro_sdk/model/metadata_center/stream_metrics_schema_pb2.py | c00bdc5b8bb6d992da17621de3f2b1535aefb23a | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 5,147 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: stream_metrics_schema.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from ucpro_sdk.model.metadata_center import stream_metrics_schema_field_pb2 as ucpro__sdk_dot_model_dot_metadata__center_dot_stream__metrics__schema__field__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='stream_metrics_schema.proto',
package='metadata_center',
syntax='proto3',
serialized_options=_b('ZIgo.easyops.local/contracts/protorepo-models/easyops/model/metadata_center'),
serialized_pb=_b('\n\x1bstream_metrics_schema.proto\x12\x0fmetadata_center\x1a\x41ucpro_sdk/model/metadata_center/stream_metrics_schema_field.proto\"\xc8\x01\n\x13StreamMetricsSchema\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0b\n\x03org\x18\x02 \x01(\x05\x12\x0f\n\x07version\x18\x03 \x01(\x05\x12\x0c\n\x04name\x18\x04 \x01(\t\x12=\n\ndimensions\x18\x05 \x03(\x0b\x32).metadata_center.StreamMetricsSchemaField\x12:\n\x07metrics\x18\x06 \x03(\x0b\x32).metadata_center.StreamMetricsSchemaFieldBKZIgo.easyops.local/contracts/protorepo-models/easyops/model/metadata_centerb\x06proto3')
,
dependencies=[ucpro__sdk_dot_model_dot_metadata__center_dot_stream__metrics__schema__field__pb2.DESCRIPTOR,])
_STREAMMETRICSSCHEMA = _descriptor.Descriptor(
name='StreamMetricsSchema',
full_name='metadata_center.StreamMetricsSchema',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='metadata_center.StreamMetricsSchema.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='metadata_center.StreamMetricsSchema.org', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='metadata_center.StreamMetricsSchema.version', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='metadata_center.StreamMetricsSchema.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dimensions', full_name='metadata_center.StreamMetricsSchema.dimensions', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metrics', full_name='metadata_center.StreamMetricsSchema.metrics', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=316,
)
_STREAMMETRICSSCHEMA.fields_by_name['dimensions'].message_type = ucpro__sdk_dot_model_dot_metadata__center_dot_stream__metrics__schema__field__pb2._STREAMMETRICSSCHEMAFIELD
_STREAMMETRICSSCHEMA.fields_by_name['metrics'].message_type = ucpro__sdk_dot_model_dot_metadata__center_dot_stream__metrics__schema__field__pb2._STREAMMETRICSSCHEMAFIELD
DESCRIPTOR.message_types_by_name['StreamMetricsSchema'] = _STREAMMETRICSSCHEMA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StreamMetricsSchema = _reflection.GeneratedProtocolMessageType('StreamMetricsSchema', (_message.Message,), {
'DESCRIPTOR' : _STREAMMETRICSSCHEMA,
'__module__' : 'stream_metrics_schema_pb2'
# @@protoc_insertion_point(class_scope:metadata_center.StreamMetricsSchema)
})
_sym_db.RegisterMessage(StreamMetricsSchema)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
f02c685f1d84a5172d78157a3535cdc42dd04fb0 | 21e7542abf4dbb3fdfbe7b021860c70cdf8b5ee8 | /dnacauldron/Filter.py | 722d767b55440ec6d38933bd8af2117736905069 | [
"MIT"
] | permissive | Edinburgh-Genome-Foundry/DnaCauldron | 8a4b4b71481a4b8f6c944b7a542f735a27040fc1 | 21d9f96926dd3f6920e8d9783127e7115948eae3 | refs/heads/master | 2023-08-31T14:25:16.897465 | 2023-07-28T17:01:39 | 2023-07-28T17:01:39 | 91,811,105 | 44 | 9 | MIT | 2023-07-18T22:12:47 | 2017-05-19T13:53:31 | Python | UTF-8 | Python | false | false | 3,236 | py | """
Filters applied in methods such as ``mix.compute_circular_assemblies`` in order
to filter out circular assemblies which would have the wrong marker, or
restriction sites of the digestion enzyme (these are unstable)
"""
from Bio import Restriction
from Bio.Seq import Seq
from .biotools import record_is_linear
class NoRestrictionSiteFilter:
"""Filters to ignore fragments and final assemblies containing a given
restriction site
"""
def __init__(self, enzyme_name):
self.enzyme_name = enzyme_name
self.enzyme = Restriction.__dict__[enzyme_name]
def __call__(self, seqrecord):
is_linear = record_is_linear(seqrecord, default=True)
if is_linear:
# Shameful hack so that enzyme sites of enzymes cutting outside
# of the sequence (but have their site inside) will be detected
seq = "AAAAAA" + Seq(str(seqrecord.seq)) + "AAAAAA"
else:
seq = seqrecord.seq
return (self.enzyme.search(seq, linear=is_linear) == [])
def __repr__(self):
return ("NoRestriction(%s)" % self.enzyme_name)
def __str__(self):
return ("NoRestriction(%s)" % self.enzyme_name)
class NoPatternFilter:
"""Filters to ignore fragments and final assemblies whose DNA sequence
contains the given pattern.
The pattern must be an exact sequence of DNA.
"""
# TODO: regular expressions
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, seqrecord):
return seqrecord.seq.find(self.pattern == -1)
class TextSearchFilter:
"""Filters to ignore assemblies containing or not containing some text.
The text will be looked for in every feature of the construct.
Constructs which do NOT have the text pattern in at least one feature will
be filtered out, unless ``is_forbidden`` is set to True, at which case
constructs which DO have the text pattern will be filtered out.
"""
def __init__(self, text, is_forbidden=False):
self.text = text
self.is_forbidden = is_forbidden
@staticmethod
def gather_all_feature_text(feature):
"""Return a single string of all text in the feature (+qualifiers)."""
return " ".join(
[feature.type] +
list(map(str, feature.qualifiers.keys())) +
list(map(str, feature.qualifiers.values()))
)
def gather_all_texts(self, seqrecord):
"""Return a single string of all texts in all record features."""
return " ".join([self.gather_all_feature_text(feature)
for feature in seqrecord.features] +
list(map(str, seqrecord.annotations)))
def __call__(self, seqrecord):
all_texts = self.gather_all_texts(seqrecord)
text_found = self.text in all_texts
if self.is_forbidden:
return not text_found
else:
return text_found
class FragmentSetContainsPartsFilter:
def __init__(self, part_names):
self.mandatory_part_names = set(part_names)
def __call__(self, fragments):
fragments = set([f.original_part.id for f in fragments])
return fragments >= self.mandatory_part_names
| [
"valentin.zulkower@gmail.com"
] | valentin.zulkower@gmail.com |
7189c697d9a266365e1e83739f9a2ba79c9ee6bd | 272ae95716e530d538937ded59ec5b6e0b6d4db8 | /섹션 2/2. K번째 수/AA.py | 7e7fb5d0ad9b617148e4fe6e520d0a3c4d09457b | [] | no_license | gogoheejun/algorithm | 83a1cb30bff5c349f53be16764e517a46e99cf1c | 39e999abf7170f434a7ac6e1f698f066e55aca03 | refs/heads/main | 2023-06-22T13:06:32.135917 | 2021-07-25T15:46:19 | 2021-07-25T15:46:19 | 383,379,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | import sys
# sys.stdin = open("input.txt", "rt")
T = int(input())
for t in range(T):
n, s, e, k = map(int, input().split())
a = list(map(int, input().split()))
a = a[s-1:e]
a.sort()
print("#%d %d" % (t+1, a[k-1]))
| [
"heejjuunn@gmail.com"
] | heejjuunn@gmail.com |
853bbbaec74dc86334d689deb94adad51a9e766f | 4917066aaf02a14a5a00308eacb3cfaafa185f51 | /production/arrplastic.py | 8cc75a743984a555012d0d928aa014aafab6ad6f | [
"MIT"
] | permissive | markusritschel/thesis | d7bf28a7807aa1e3d2ecd1d3d67f1866a8640e1b | effc196ce062223e47fcfdfef205cc5e1678e19e | refs/heads/main | 2023-05-09T23:55:59.807218 | 2021-06-07T01:05:09 | 2021-06-07T01:05:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | import sys
import os
arg = int(sys.argv[1])
tauRef = [(i / 20) for i in range(100, 121)][arg] # 21 cases
import math
import planetengine
planetengine.set_global_anchor(os.path.basename(__file__)[:-3], '.')
from planetengine.systems import Arrhenius, Viscoplastic
from planetengine.initials import Sinusoidal
final = (planetengine.finals.Averages, {'tolerance': 1e-3, 'minlength': 50})
initial = Sinusoidal(freq = 1.)
for alpha in [10 ** (i / 2.) for i in range(8, 15)]:
system = Arrhenius(
f = 1.,
aspect = 1.,
res = min(64, 2 ** math.floor(math.log10(alpha))),
alpha = alpha,
observers = True,
temperatureField = initial,
innerMethod = 'lu',
courant = 1.,
)
system[:final]()
initial = system
system = Viscoplastic(
f = 1.,
aspect = 1.,
res = 64,
alpha = 10.**7.,
tauRef = tauRef,
observers = True,
temperatureField = initial,
innerMethod = 'lu',
courant = 1,
)
system[:final:100]()
| [
"rohan.byrne@gmail.com"
] | rohan.byrne@gmail.com |
809b05581a91d7f83e0e9b65055f4f625b740d4f | e84672db206696b8f48113a754d85e93f9d57859 | /routers/about_us_router/schemas.py | 2119773164f185aca9806a241343eebc3dd6d7e1 | [] | no_license | eliblurr/eav1 | 80a24a1f27bc7ee4d9444f87815b4a28bd4f8c51 | 31a4d44d80ce55ac0df1bffcb3ce4d2a8a906f93 | refs/heads/main | 2023-06-08T23:12:11.116518 | 2021-06-23T08:58:58 | 2021-06-23T08:58:58 | 311,332,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | from pydantic import BaseModel
from typing import Optional
import datetime
class AboutUsBase(BaseModel):
title: Optional[str]
metatitle: Optional[str]
description: str
index: int
status: Optional[bool]
class CreateAboutUs(AboutUsBase):
pass
class UpdateAboutUs(AboutUsBase):
title: Optional[str]
metatitle: Optional[str]
description: Optional[str]
index: Optional[int]
status: Optional[bool]
class AboutUs(AboutUsBase):
id: int
date_created: datetime.datetime
date_modified: datetime.datetime
class Config():
orm_mode = True | [
"segbawuel@aiti-kace.com"
] | segbawuel@aiti-kace.com |
e5d339075d6c49b20d922195e8001b0778f3b4c1 | c80ec1805a7e6cb1bd3f4b3e383ef4f4cf164765 | /gen/filters/rules/media/_hasnotematchingsubstringof.py | ac8d27c0922b99dc89a90d0f24532a4324f44713 | [] | no_license | balrok/gramps_addon | 57c8e976c47ea3c1d1298d3fd4406c13909ac933 | 0c79561bed7ff42c88714edbc85197fa9235e188 | refs/heads/master | 2020-04-16T03:58:27.818732 | 2015-02-01T14:17:44 | 2015-02-01T14:17:44 | 30,111,898 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,783 | py | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hasnotesubstrbase import HasNoteSubstrBase
#-------------------------------------------------------------------------
# "Media having notes that contain a substring"
#-------------------------------------------------------------------------
class HasNoteMatchingSubstringOf(HasNoteSubstrBase):
"""Media having notes containing <substring>"""
name = _('Media objects having notes containing <substring>')
description = _("Matches media objects whose notes contain text "
"matching a substring")
| [
"carl.schoenbach@gmail.com"
] | carl.schoenbach@gmail.com |
a99300fc155bea9ad18198e891be1b2266256ab3 | 0a0536142014f759aa08974118133767c1eac6a8 | /build/pal_msgs/pal_tablet_msgs/catkin_generated/pkg.develspace.context.pc.py | 15f1eff9bef9f885ff57e8eb85ea040e8ac92198 | [] | no_license | EricSun787/SSJ_AGV | bb1851bf5102b6917888717fabf4f627f142e47c | 01d387efa1b53bd1a909f30dc89b14c9364ffd72 | refs/heads/master | 2023-07-08T18:30:43.704457 | 2017-09-19T16:02:44 | 2017-09-19T16:02:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ssj/ros_ws/devel/include".split(';') if "/home/ssj/ros_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "pal_tablet_msgs"
PROJECT_SPACE_DIR = "/home/ssj/ros_ws/devel"
PROJECT_VERSION = "0.11.3"
| [
"1012116832@qq.com"
] | 1012116832@qq.com |
2fcb25aed79d349b7d2fd32d4022304eb3510056 | 26edc9df2bc9df95cd94e6cea78fa2ffb6c2ac44 | /Pmw-2.0.0/Pmw/Pmw_2_0_0/demos/ConfigClass.py | 2602ed7980043f9a429f626772cb4fc6a8601319 | [] | no_license | khw7096/opener | 55c3c43ddfa62c9f2bdf23cccde1b9abbcb7ece9 | 4981e1c53e7fe55f9c4ba7c5f57ee9f8634f389d | refs/heads/master | 2021-06-09T10:57:53.101948 | 2017-06-06T09:18:26 | 2017-06-06T09:18:26 | 35,756,994 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,372 | py | title = 'Component python class configuration demonstration'
# Import Pmw from this directory tree.
import sys
sys.path[:0] = ['../../..']
import tkinter
import Pmw
class MyButton(tkinter.Button):
# This is just an ordinary button with special colors.
def __init__(self, master=None, cnf={}, **kw):
self.__toggle = 0
kw['background'] = 'green'
kw['activebackground'] = 'red'
tkinter.Button.__init__(*(self, master, cnf), **kw)
class Demo:
def __init__(self, parent):
# Create a title label:
label = tkinter.Label(parent,
text = 'EntryFields with label components of specified type:')
label.pack(fill='x', expand=1, padx=10, pady=5)
# Create and pack some EntryFields.
entries = []
entry = Pmw.EntryField(parent,
labelpos = 'w',
label_text = 'Label'
)
entry.pack(fill='x', expand=1, padx=10, pady=5)
entries.append(entry)
entry = Pmw.EntryField(parent,
labelpos = 'w',
label_pyclass = tkinter.Button,
label_text = 'Button'
)
entry.pack(fill='x', expand=1, padx=10, pady=5)
entries.append(entry)
entry = Pmw.EntryField(parent,
labelpos = 'w',
label_pyclass = MyButton,
label_text = 'Special button'
)
entry.pack(fill='x', expand=1, padx=10, pady=5)
entries.append(entry)
Pmw.alignlabels(entries)
# Create and pack a ButtonBox.
buttonBox = Pmw.ButtonBox(parent,
labelpos = 'nw',
label_text = 'ButtonBox:')
buttonBox.pack(fill = 'both', expand = 1, padx=10, pady=5)
# Add some buttons to the ButtonBox.
buttonBox.add('with a')
buttonBox.add('special', pyclass = MyButton)
buttonBox.add('button')
######################################################################
# Create demo in root window for testing.
if __name__ == '__main__':
root = tkinter.Tk()
Pmw.initialise(root)
root.title(title)
exitButton = tkinter.Button(root, text = 'Exit', command = root.destroy)
exitButton.pack(side = 'bottom')
widget = Demo(root)
root.mainloop()
| [
"khw7096@gmail.com"
] | khw7096@gmail.com |
0fad5ee29a5268ce97c8cf84b91a05bda392f678 | 63afbceb127443806e7ee989be74e54e04152e88 | /app01/migrations/0009_auto_20171205_1005.py | 73ded2547d58a26c5a03a167482d838ba69312d4 | [] | no_license | Jamin2018/Django_zhihu | 002be8c2ff457f44798b881aaebb98f9611c165e | 1f5adc08cfd66eb2cf1049cf9e09ad0bb66d5509 | refs/heads/master | 2021-08-23T13:52:58.887369 | 2017-12-05T04:26:42 | 2017-12-05T04:26:42 | 113,132,441 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-05 02:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app01', '0008_auto_20171205_1003'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='body',
field=models.TextField(default='1'),
preserve_default=False,
),
migrations.AlterField(
model_name='comment',
name='p',
field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, to='app01.Post'),
preserve_default=False,
),
migrations.AlterField(
model_name='comment',
name='u',
field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, to='app01.User'),
preserve_default=False,
),
]
| [
"389098898@qq.com"
] | 389098898@qq.com |
a9f0f51216566c7f222ea78b7291aa59324f8376 | 6b9865a42a86c538cff987b3079da5b644137b37 | /0x1B-island_perimeter/0-island_perimeter.py | 340b08d724b063ab8518cf088b66084e17c4dce4 | [] | no_license | zahraaassaad/holbertonschool-python | 099072b0350f594adf30b47eb18fcdce0375546d | 83d7d185f14f44ea4481538ab1e04463a9b62739 | refs/heads/master | 2023-01-09T03:34:48.082352 | 2020-11-20T17:54:05 | 2020-11-20T17:54:05 | 291,666,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | #!/usr/bin/python3
"""
Module for to_graph.
"""
def island_perimeter(grid):
""" Returns the perimeter of the island described in grid."""
num = 0
neighbor = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
num = num + 1
if i > 0 and grid[i-1][j] == 1:
neighbor += 1
if j > 0 and grid[i][j-1] == 1:
neighbor += 1
return num * 4 - neighbor * 2
| [
"zahraa.asaad8@gmail.com"
] | zahraa.asaad8@gmail.com |
0616483d02c5aefc6595b766907293a1a338733f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_231/ch131_2020_04_01_18_01_14_493339.py | eb145819744ce0d279dd15de473d87ced7a5bfef | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | import random
s=random.randint(2,20)
n1=int(input('Digite um numero:'))
n2=int(input('Digite um numero maior ou igual:'))
if s<n1:
print('Soma menor')
if s>n2:
print('Soma maior')
else:
print('Soma no meio')
d=10
print('Voce tem {} dinheiros'.format(d))
q=int(input('Quantos chutes voce quer comprar?'))
d=d-q
c=int(input('Qual é o seu chute?'))
while c!=s and q>0:
q= q-1
c= int(input('Errou tente novamente:'))
if s==c:
d= d+ 5*d
print('Você terminou o jogo com {} dinheiros'.format(d)) | [
"you@example.com"
] | you@example.com |
1eceb6d7ed6e20c50f6cd3cfa5ed3cb1af5c6b5f | 5508368df9df1036755aeaa8574bdadcc25359f8 | /AmadoWHApp/migrations/0020_auto_20180811_1629.py | 1744319d9177284bc372d66e2ea8518549de2da6 | [] | no_license | HanSol1994/Amado | 4627a6165009733059e8e87f545244f69d492b91 | 9fbc37250b9974bbf3a3c3a2571a748a300b2d29 | refs/heads/master | 2022-01-23T23:59:30.666584 | 2019-07-23T08:12:19 | 2019-07-23T08:12:19 | 198,373,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,745 | py | # Generated by Django 2.0.5 on 2018-08-11 11:59
from django.db import migrations, models
import django_jalali.db.models
class Migration(migrations.Migration):
dependencies = [
('AmadoWHApp', '0019_auto_20180806_1249'),
]
operations = [
migrations.DeleteModel(
name='ShowHelpTicket',
),
migrations.AlterField(
model_name='amadofood',
name='price_change_date',
field=django_jalali.db.models.jDateField(default='1397-05-20', verbose_name='تاریخ تغییر قیمت'),
),
migrations.AlterField(
model_name='branchwarehouse',
name='date',
field=django_jalali.db.models.jDateField(default='1397-05-20', verbose_name='تاریخ موجودی'),
),
migrations.AlterField(
model_name='foodsale',
name='date',
field=django_jalali.db.models.jDateField(default='1397-05-20', verbose_name='تاریخ فروش'),
),
migrations.AlterField(
model_name='message',
name='message_date',
field=models.CharField(blank=True, default='1397-05-20 ساعت 16:29', max_length=32, null=True, verbose_name='تاریخ ارسال پیام'),
),
migrations.AlterField(
model_name='price',
name='cost',
field=models.IntegerField(verbose_name='قیمت(ریال)'),
),
migrations.AlterField(
model_name='price',
name='date',
field=django_jalali.db.models.jDateField(default='1397-05-20', verbose_name='تاریخ قیمت'),
),
migrations.AlterField(
model_name='product',
name='price_change_date',
field=django_jalali.db.models.jDateField(default='1397-05-20', verbose_name='تاریخ تغییر قیمت'),
),
migrations.AlterField(
model_name='product',
name='product_actual_price_1',
field=models.IntegerField(blank=True, null=True, verbose_name='قیمت تمام شده نوع ۱(ریال)'),
),
migrations.AlterField(
model_name='product',
name='product_actual_price_2',
field=models.IntegerField(blank=True, null=True, verbose_name='قیمت تمام شده نوع ۲(ریال)'),
),
migrations.AlterField(
model_name='request',
name='request_date',
field=django_jalali.db.models.jDateField(blank=True, default='1397-05-20', null=True, verbose_name='تاریخ درخواست'),
),
migrations.AlterField(
model_name='request',
name='request_time',
field=models.TimeField(blank=True, default='16:29:16', null=True, verbose_name='ساعت درخواست'),
),
migrations.AlterField(
model_name='requestproduct',
name='request_date',
field=django_jalali.db.models.jDateField(blank=True, default='1397-05-20', null=True, verbose_name='تاریخ ثبت'),
),
migrations.AlterField(
model_name='requestproduct',
name='request_time',
field=models.TimeField(blank=True, default='16:29:16', null=True, verbose_name='ساعت ثبت'),
),
migrations.AlterField(
model_name='requestproductvariance',
name='request_date',
field=django_jalali.db.models.jDateField(blank=True, default='1397-05-20', null=True, verbose_name='تاریخ ثبت مغایرت'),
),
migrations.AlterField(
model_name='requestproductvariance',
name='request_time',
field=models.TimeField(blank=True, default='16:29:16', null=True, verbose_name='ساعت ثبت مغایرت'),
),
migrations.AlterField(
model_name='shopdetail',
name='last_price',
field=models.IntegerField(verbose_name='آخرین قیمت(ریال)'),
),
migrations.AlterField(
model_name='shopdetail',
name='last_price_date',
field=django_jalali.db.models.jDateField(default='1397-05-20', verbose_name='تاریخ آخرین قیمت'),
),
migrations.AlterField(
model_name='shopdetail',
name='rc_date',
field=django_jalali.db.models.jDateField(default='1397-05-20', verbose_name='تاریخ دریافت'),
),
migrations.AlterField(
model_name='shoprequest',
name='from_date',
field=django_jalali.db.models.jDateField(default='1397-05-20', verbose_name='تاریخ خرید'),
),
]
| [
"haansol@gmail.com"
] | haansol@gmail.com |
23e930e42475382cf34d1a2f27f9c1fea496abce | 7ab41799fd38489c93282f1beb3b20e7ef8ff165 | /python/141.py | 513b3798a89e31e94eb8c4208e90df739cbc0f2d | [] | no_license | scturtle/leetcode-sol | 86c4095df6b31a9fcad683f2d63669ce1691633c | e1a9ce5d9b8fe4bd11e50bd1d5ba1933de845db7 | refs/heads/master | 2020-04-23T00:01:37.016267 | 2015-11-21T04:15:27 | 2015-11-21T04:15:27 | 32,385,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
slow = fast = head
while slow and fast:
fast = fast.next
if fast is slow:
return True
if fast:
fast = fast.next
if fast is slow:
return True
slow = slow.next
return False
| [
"scturtle@gmail.com"
] | scturtle@gmail.com |
2d666e7337eebaf1bf48c46e9692e00a29ebce20 | 7c7c22e1b9c2406cb417b3ca394878765d2cb9de | /app/models.py | c4514e30300feec142c3b1d73c508db24f84b49c | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Carolwanzuu/m-blog | 215312a91702c771cf56041623314607319f474f | baa4df0a5d43f216f82829bb5eeb8669b6317c4b | refs/heads/master | 2023-06-05T09:58:53.344224 | 2021-06-30T08:40:08 | 2021-06-30T08:40:08 | 378,681,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,990 | py | from . import db
from datetime import datetime
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Quote:
def __init__(self,id,quote,author):
self.id =id
self.quote = quote
self.author = author
# class User:
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(255))
email = db.Column(db.String(255))
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String(255))
pass_secure = db.Column(db.String(255))
blog = db.relationship('Blog', backref = 'users', lazy = 'dynamic')
comment = db.relationship('Comment', backref = 'users', lazy = 'dynamic')
pass_secure = db.Column(db.String(255))
def save_user(self):
db.session.add(self)
db.session.commit()
def delete_user(self):
db.session.delete(self)
db.session.commit()
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'User {self.username}'
class Blog(db.Model):
__tablename = 'blog'
id = db.Column(db.Integer,primary_key = True)
title = db.Column(db.String(255))
blog_content = db.Column(db.String(255))
author = db.Column(db.String(255))
posted = db.Column(db.DateTime, default = datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
comments = db.relationship('Comment',backref = 'blog',lazy = 'dynamic')
def save_blog(self):
db.session.add(self)
db.session.commit()
def delete_blog(self):
db.session.delete(self)
db.session.commit()
@classmethod
def get_blog(cls,user_id):
blogs = Blog.query.filter_by(user_id=user_id).all()
return Blog
def __repr__(self):
return f'Blog {self.name}'
class Comment(db.Model):
__tablename__='comments'
id=db.Column(db.Integer,primary_key=True)
comment=db.Column(db.String)
posted=db.Column(db.DateTime,default=datetime.utcnow)
user_id=db.Column(db.Integer,db.ForeignKey("users.id"))
blog_id=db.Column(db.Integer,db.ForeignKey('blog.id'))
def save_comment(self):
db.session.add(self)
db.session.commit()
def delete_comment(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return f'Comment{self.comment}'
@classmethod
def get_comments(cls,id):
comment=Comment.query.filter_by(blog_id=id).all()
return comment
| [
"carolkyalo01@gmail.com"
] | carolkyalo01@gmail.com |
846264f0a27ea09c3be94e1151a676b918da4b47 | d561fab22864cec1301393d38d627726671db0b2 | /python/helpers/typeshed/stdlib/3/linecache.pyi | a77de887c7b3029234ce84f5159ce0aee0a01eb9 | [
"Apache-2.0",
"MIT"
] | permissive | Vedenin/intellij-community | 724dcd8b3e7c026936eed895cf964bb80574689a | 74a89fa7083dedc6455a16e10cf779d191d79633 | refs/heads/master | 2021-01-25T00:47:43.514138 | 2017-03-27T15:48:36 | 2017-03-27T15:54:02 | 86,361,176 | 1 | 1 | null | 2017-03-27T16:54:23 | 2017-03-27T16:54:23 | null | UTF-8 | Python | false | false | 200 | pyi | from typing import Any
def getline(filename: str, lineno: int, module_globals: Any=...) -> str: pass
def clearcache() -> None: pass
def getlines(filename: str, module_globals: Any=...) -> None: pass
| [
"andrey.vlasovskikh@gmail.com"
] | andrey.vlasovskikh@gmail.com |
dd67cc4be7876eb5b448ca6cb7d1e9b1263d0eb0 | b4e326e8cf257f3920767a258a0740b9c121d0cd | /general-practice/28_08_2019/p1.py | 7280f4b0c5ff41b04c05fe9b85d3dfd595dfac55 | [] | no_license | luthraG/ds-algo-war | 6646e7539dc20d1acd631edb2ed0a83384e4ea7e | 05a0b1c6abe47eb5a403b0f61e315e6609bfa466 | refs/heads/master | 2020-07-03T23:39:21.849008 | 2019-09-18T09:43:57 | 2019-09-18T09:43:57 | 202,087,293 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | from timeit import default_timer as timer
if __name__ == '__main__':
test_cases = int(input('Enter test cases :: '))
for t in range(test_cases):
number = int(input('Enter number :: '))
start = timer()
# since we need multiples below
number -= 1
mux3 = number // 3
mux5 = number // 5
mux15 = number // 15
mux3 = 3 * ((mux3 * (mux3 + 1)) // 2)
mux5 = 5 * ((mux5 * (mux5 + 1)) // 2)
mux15 = 15 * ((mux15 * (mux15 + 1)) // 2)
sum = mux3 + mux5 - mux15
print('Sum of multiples of 3 and 5 below {} is {}'.format(number + 1, sum))
end = timer()
print('Time taken is {}'.format(end - start))
| [
"luthra.zenith@gmail.com"
] | luthra.zenith@gmail.com |
a33b4f14f7a159a87a0c7bae1d44ca4f5cfbdda2 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_babble.py | 5ff3bdd5eb3760e6b698e33c67e8e28434a76e73 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py |
#calss header
class _BABBLE():
def __init__(self,):
self.name = "BABBLE"
self.definitions = [u'to talk or say something in a quick, confused, excited, or silly way: ', u'(of a stream) to make the low, continuous noise of water flowing over stones: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
1645d1441b6fb91bfd157539cff28ac587c5682f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_386/ch140_2020_04_01_19_54_27_737379.py | 9c722141f7b9ff19386a9a12b1ec1d2082a859de | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | notas=[1, 2, 4.2, 5.5, 8, 9]
def faixa_notas(notas):
nota_b = []
nota_m = []
nota_a = []
for i in notas:
if i < 5:
nota_a +=1
elif i > 5 and i <= 7:
nota_b +=1
elif i > 7:
nota_a +=1
nota_b.extend(nota_m)
nota_m.extend(nota_a)
| [
"you@example.com"
] | you@example.com |
64c83edfd993d2f54b775476fe706d0ed2d40465 | 57f5cdf321d4fc39217b2c3dcd5ce3750330d618 | /pratica/listas/max_min_for.py | ee94f56573628e24ff949b26fdd1383cff11b48b | [
"MIT"
] | permissive | chicolucio-python-learning/entrevistas-tecnicas | ba9575f680cb663d340840e71654432904f6b5af | 1efec34e18fe0f726a1d3c390e2c0c8b07405360 | refs/heads/main | 2022-12-26T22:53:10.364365 | 2020-10-07T17:42:56 | 2020-10-07T17:42:56 | 302,114,910 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | import random
import pytest
def max_min(lst):
"""
Calculate the maximum and minimum of a list lst
Parameters
----------
lst : list
Returns
-------
tuple
(max, min)
"""
if not lst:
raise ValueError('Empty list')
max_value = min_value = lst[0]
for value in lst:
if value > max_value:
max_value = value
if value < min_value:
min_value = value
return max_value, min_value
def test_error():
lst = []
with pytest.raises(ValueError):
max_min(lst)
def test_single_element_list():
lst = [1]
assert max_min(lst) == (1, 1)
def test_ordered_list():
lst = list(range(0, 11))
assert max_min(lst) == (10, 0)
def test_random_list():
lst = list(range(100))
random.seed(42)
random.shuffle(lst)
assert lst[0] == 42
assert lst[-1] == 81
assert max_min(lst) == (99, 0) | [
"23560423+chicolucio@users.noreply.github.com"
] | 23560423+chicolucio@users.noreply.github.com |
cb13d4be78e90d678affb45cd65d51c3dff6b4e0 | 2ef5b78a1a750ee33d86f36bba176796163e3933 | /demo23/test9.py | 23576f92e307bfc134a62e86f24d243d61c4720d | [] | no_license | LIZEJU/flask-demo | 08f8419757dc4902239b89b3df9ea71ce918ad26 | 6ae201e3cc078b7a3fd18fb6d114b0b83f1b4b41 | refs/heads/master | 2020-09-27T05:58:09.742198 | 2020-01-30T11:04:41 | 2020-01-30T11:04:41 | 226,445,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | import matplotlib.pyplot as plt
import numpy as np
x = [np.random.randint(0,20) for i in range(10)]
print(x)
y = [np.random.randint(0,20) for i in range(10)]
# x,y 用来设置封闭区域的定点的有序数对,参数color用来完成封闭区域的填充颜色的设置工作
plt.fill(x,y,color='cornflowerblue')
# 完成多边形相对位置的调整
plt.xlim(-1,16)
plt.ylim(-1,16)
# 设置刻度
plt.xticks(np.arange(0,16,5))
plt.yticks(np.arange(0,16,5))
# 保存生成的图片
plt.savefig('8.png')
# 展示绘制效果
plt.show() | [
"m18611694189@163.com"
] | m18611694189@163.com |
5d8086c8c19f7b022f0b49451639e6638afc9c7c | 050fc5ca698dfd7612dee42aa980fc7b5eee40a2 | /tests/plugin/data/sw_loguru/services/consumer.py | 45e145e7877e9f8012feeac26a6b6b3138fdd3f0 | [
"Apache-2.0"
] | permissive | apache/skywalking-python | 8ac6ce06630c519f9984a45e74c1fcc88cf5b9d6 | 1a360228c63cd246dd4c5dd8e1f09bdd5556ad7d | refs/heads/master | 2023-09-05T02:45:56.225937 | 2023-08-28T22:19:24 | 2023-08-28T22:19:24 | 261,456,329 | 178 | 122 | Apache-2.0 | 2023-08-28T22:19:26 | 2020-05-05T12:13:49 | Python | UTF-8 | Python | false | false | 1,109 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import requests
if __name__ == '__main__':
from fastapi import FastAPI
import uvicorn
app = FastAPI()
@app.get('/users')
async def application():
res = requests.get('http://provider:9091/users', timeout=5)
return {'http': res.json()}
uvicorn.run(app, host='0.0.0.0', port=9090)
| [
"noreply@github.com"
] | apache.noreply@github.com |
ab1acbf9736b4d8dcf92e279bca3053be513b34f | f795a505b4c92e7e12b2b905dcfe0a889c9c99a6 | /BIOMD0000000497/model.py | 43f603dce6f353e607e36970b7b15756e45ccb32 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | biomodels/BIOMD0000000497 | a953ad543534bc2cc6f5197caeb96d17c5fb6931 | ceb94fac6887cfbdc2ba5e33cd31909b102a64cc | refs/heads/master | 2016-09-06T11:59:06.453896 | 2014-10-16T05:19:41 | 2014-10-16T05:19:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'BIOMD0000000497.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) | [
"stanleygu@gmail.com"
] | stanleygu@gmail.com |
c86a379eb2e6c400bfa33f54e0069383be891482 | 854245317cb8031ea9b65347a7f3b78b4ea251b4 | /amazon_scraper/scraper_app/__init__.py | cc9ac8b8bb2b4bd4b4dcc027af07e35d6411c589 | [] | no_license | tahirs95/Web-Scraping-Portfolio | d9bfe8fcc5effec96f5fe123aac6a6f8941c500f | 973b9ba7f0d930f83cbce5ae64dea1a86b882fda | refs/heads/master | 2023-02-27T15:00:40.164995 | 2021-09-22T15:53:48 | 2021-09-22T15:53:48 | 213,682,348 | 3 | 1 | null | 2023-02-16T04:17:55 | 2019-10-08T15:29:31 | SCSS | UTF-8 | Python | false | false | 548 | py | from flask import Flask, render_template, request
from .functions import process_data, scrape
app = Flask(__name__)
@app.route('/', methods=['GET','POST'])
def index():
data = {'None': 'None'}
data_available = False
if request.form:
asin = request.form['search']
if asin:
url = "https://amazon.com/dp/{}".format(asin)
data = scrape(url)
data = process_data(data)
data_available = True
return render_template('index.html', data_available=data_available, data=data)
| [
"tahirs95@hotmail.com"
] | tahirs95@hotmail.com |
51836f374510131b73000b64d570b39a3bc1aeeb | d233b312cd3f0c83a06c1cce830252a3664023ff | /catkin_ws/build/universal_robot/ur3_moveit_config/catkin_generated/pkg.installspace.context.pc.py | 9ebc475624671b0e651c4ea0c32ffe85e6e6776c | [] | no_license | leopauly/Observation-learning-Real-world-UR5 | 32691f989ed60d92eca82bea0f47b960a6d48afa | 72e61d07582d05d40a1bde31c99ab5d9cf97f70d | refs/heads/main | 2023-07-05T07:05:00.554268 | 2021-08-12T11:51:58 | 2021-08-12T11:51:58 | 395,289,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ur3_moveit_config"
PROJECT_SPACE_DIR = "/home/wisdom/catkin_ws/install"
PROJECT_VERSION = "1.2.5"
| [
"meetleopauly@yahoo.com"
] | meetleopauly@yahoo.com |
4adbab1657f6fe87b55dcd77902ca289972684a1 | a893537a71aa285071a68035c968ba6f5c0ca57d | /ch02/18/18.py | 054f16fcb33111a2f4f27ccd17242175023354dd | [] | no_license | sinchir0/2020_NLP100 | 0a1810b0c299c29fa1a811f68fa87be74f9b3cf9 | 772123da5b5ac4094c26fdce2e192637dc55190a | refs/heads/main | 2023-07-18T04:03:04.123302 | 2021-09-08T22:54:44 | 2021-09-08T22:54:44 | 257,416,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | # 18. 各行を3コラム目の数値の降順にソート
# 各行を3コラム目の数値の逆順で整列せよ(注意: 各行の内容は変更せずに並び替えよ).確認にはsortコマンドを用いよ(この問題はコマンドで実行した時の結果と合わなくてもよい).
def file_line_reader_generator(file_path):
"""ファイルの行を返すジェネレータ"""
with open(file_path, encoding="utf-8") as in_file:
for line in in_file:
yield line
if __name__ == "__main__":
# generatorで読み込む
popular_names = file_line_reader_generator("../10/popular-names.txt")
popular_names = sorted(popular_names, key=lambda x: x.split("\t")[2], reverse=True)
for name in popular_names:
print(name)
# UNIXコマンド
# sort -n -k 3 -r ../10/popular-names.txt
# -n: 文字列を数値と見なして並べ替える
# -k 指定: 場所と並び替え種別を指定する
# -r: 逆順に並び替える
| [
"s-saito@chic.ocn.ne.jp"
] | s-saito@chic.ocn.ne.jp |
b3bf86f703dabc845c1b7f0e94b45a8abf13cf86 | 61e32bb5ebf868965d3af68b5717672f7b4c51c2 | /denominations.py | 53ed3f12c39a6488ee1fa1c2e7fadf3e2ffa6ff8 | [] | no_license | billaanil3/PRACTICE | e4f885473cc10a17ab422da2f6d29aea576c7b94 | 9e0d6512bd8cbfc0fa4e0d1472ac23d1482bacf9 | refs/heads/master | 2020-11-26T18:25:30.318395 | 2019-12-20T02:22:45 | 2019-12-24T11:00:14 | 229,172,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | from collections import OrderedDict
notes = [2000, 500, 200, 100, 50, 20, 10, 5, 2, 1]
amount = int(input("Enter a number:"))
res_notes = OrderedDict()
for note in notes:
if amount % note:
if amount // note > 0:
res_notes[note] = amount // note
amount = amount % note
print res_notes
| [
"anil.b@onedelta.in"
] | anil.b@onedelta.in |
9d56ea4a1e627c0491afb179e8aa9e1ff000be66 | c1dd2020da8c3e117aec41619aa56b22cc62b5be | /django1/src/blog/views.py | 2718fc0cb6201b2e0106fd0e77d82a4aa6d846f8 | [] | no_license | rlaqhdwns/django1 | 039dadc5212704222f9bcab389c4eb9f27ede412 | 74180aa0ce2d9e5217ac8df7855482bd86d07ff1 | refs/heads/master | 2020-04-21T23:11:17.801454 | 2019-02-10T03:20:47 | 2019-02-10T03:20:47 | 169,938,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,153 | py | from django.shortcuts import render
#제네릭뷰 : 장고에서 제공하는 여러가지 뷰 기능을 구현한 클래스
#ListView : 특정 모델클래스의 객체 목록을 다루는 기능이 구현된 뷰
#DetailView : 특정 모델클래스의 객체 1개를 다루는 기능이 구현
#FormView : 특정 폼클래스를 다루는 기능이 구현
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView
from django.views.generic.list import ListView
from pyexpat import model
from blog.models import Post, PostFile, PostImage
from blog.forms import PostForm
from django.http.response import HttpResponseRedirect
from django.urls.base import reverse
from django.contrib.auth.mixins import LoginRequiredMixin
#index : 글목록이 뜨는 메인페이지
class Index(ListView):
#해당 뷰가 사용할 html 파일의 경로
template_name='blog/index.html'
#리스트로 뽑을 모델클래스
model = Post
#템플릿에게 객체리스트를 넘겨줄 때 사용할 키값
context_object_name='list'
#한페이지에 최대 몇개의 객체가 보여질지 설정
paginate_by= 5
#detail : 글 상세보기 페이지
class Detail(DetailView):
template_name = 'blog/detail.html'
model = Post
context_object_name = 'obj'
#posting : 글쓰기 페이지
class Posting(LoginRequiredMixin, FormView):
template_name = 'blog/posting.html'
#연동할 폼클래스 저장
form_class = PostForm
context_object_name = 'f'
#is_valid()함수가 True를 반환한 뒤의 처리를 form_valid()함수를 오버라이딩해서 작성
def form_valid(self, form):
#매개변수 form : is_valid()함수를 통과한 PostForm객체
#PostForm객체를 바탕으로 Post객체 저장
#글쓴이(author) 변수가 비어있으므로, 데이터베이스에 저장하지않음
p = form.save(commit=False)#p : Post 객체
#request.user : 요청한 클라이언트의 로그인정보(User 모델클래스 객체)
p.author = self.request.user
p.save() #Post 객체가 데이터베이스에 저장됨
#클라이언트가 보낸 첨부파일, 이미지파일을 바탕으로 PostFile,
#PostImage객체 생성 및 저장
#requeset.FILES : 클라이언트가 서버로 보낸 파일정보를 관리하는 변수
#PostFile객체를 생성
for f in self.request.FILES.getlist('files'):
# f : 파일 정보
pf = PostFile() #새로운 PostFile 모델클래스의 객체 생성
pf.file = f
pf.post = p
pf.save()#데이터베이스에 새로운 PostFile 객체가 저장됨
#PostImage 객체를 생성
for i in self.request.FILES.getlist('images'):
#i : 이미지 정보
pi = PostImage()
pi.post = p
pi.image = i
pi.save()
#완성된 글페이지로 URL이동
return HttpResponseRedirect (reverse('blog:detail', args=(p.id,))) | [
"user@DESKTOP-37GULAI"
] | user@DESKTOP-37GULAI |
3dc9e30e5c222c2883a7eac6fdbd007fc805284a | a31de016611f3b4efc7a576e7113cad1a738419b | /9.8_exception_classes.py | f3fb70d78cdd6362c88d732831f0ffe476b30943 | [] | no_license | Ing-Josef-Klotzner/python | 9d4044d632672fff966b28ab80e1ef77763c78f5 | 3913729d7d6e1b7ac72b46db7b06ca0c58c8a608 | refs/heads/master | 2022-12-09T01:40:52.275592 | 2022-12-01T22:46:43 | 2022-12-01T22:46:43 | 189,040,355 | 0 | 0 | null | 2022-12-01T19:52:37 | 2019-05-28T14:05:16 | Python | UTF-8 | Python | false | false | 207 | py | # exception classes
#import sys
class B:
pass
class C(B):
pass
class D(C):
pass
for c in [B, C, D]:
try:
raise c()
except D:
print "D"
except C:
print "C"
except B:
print "B" | [
"noreply@github.com"
] | Ing-Josef-Klotzner.noreply@github.com |
0ad259d0f323921f6882bfce2be18fa92b27acdc | e845f7f61ff76b3c0b8f4d8fd98f6192e48d542a | /djangocg/utils/safestring.py | f2b147175bdb1cb129a4deff7e5ed7f49f002395 | [
"BSD-3-Clause"
] | permissive | timothyclemans/djangocg | fd150c028013cb5f53f5a3b4fdc960a07fdaaa78 | 52cf28e046523bceb5d436f8e6bf61e7d4ba6312 | refs/heads/master | 2021-01-18T13:20:13.636812 | 2012-08-31T23:38:14 | 2012-08-31T23:38:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,189 | py | """
Functions for working with "safe strings": strings that can be displayed safely
without further escaping in HTML. Marking something as a "safe string" means
that the producer of the string has already turned characters that should not
be interpreted by the HTML engine (e.g. '<') into the appropriate entities.
"""
from djangocg.utils.functional import curry, Promise
from djangocg.utils import six
class EscapeData(object):
pass
class EscapeBytes(bytes, EscapeData):
"""
A byte string that should be HTML-escaped when output.
"""
pass
class EscapeText(six.text_type, EscapeData):
"""
A unicode string object that should be HTML-escaped when output.
"""
pass
if six.PY3:
EscapeString = EscapeText
else:
EscapeString = EscapeBytes
# backwards compatibility for Python 2
EscapeUnicode = EscapeText
class SafeData(object):
pass
class SafeBytes(bytes, SafeData):
"""
A bytes subclass that has been specifically marked as "safe" (requires no
further escaping) for HTML output purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe byte string with another safe byte string or safe
unicode string is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeBytes, self).__add__(rhs)
if isinstance(rhs, SafeText):
return SafeText(t)
elif isinstance(rhs, SafeBytes):
return SafeBytes(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
decode = curry(_proxy_method, method=bytes.decode)
class SafeText(six.text_type, SafeData):
"""
A unicode (Python 2) / str (Python 3) subclass that has been specifically
marked as "safe" for HTML output purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe unicode string with another safe byte string or
safe unicode string is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeText, self).__add__(rhs)
if isinstance(rhs, SafeData):
return SafeText(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
encode = curry(_proxy_method, method=six.text_type.encode)
if six.PY3:
SafeString = SafeText
else:
SafeString = SafeBytes
# backwards compatibility for Python 2
SafeUnicode = SafeText
def mark_safe(s):
"""
Explicitly mark a string as safe for (HTML) output purposes. The returned
object can be used everywhere a string or unicode object is appropriate.
Can be called multiple times on a single string.
"""
if isinstance(s, SafeData):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return SafeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return SafeText(s)
return SafeString(str(s))
def mark_for_escaping(s):
"""
Explicitly mark a string as requiring HTML escaping upon output. Has no
effect on SafeData subclasses.
Can be called multiple times on a single string (the resulting escaping is
only applied once).
"""
if isinstance(s, (SafeData, EscapeData)):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return EscapeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return EscapeText(s)
return EscapeBytes(bytes(s))
| [
"timothy.clemans@gmail.com"
] | timothy.clemans@gmail.com |
124ed1ef4d6daa12465b30bbddac78841214f9f8 | 6d116e51e92d199b65f60929ed51e3b8e2ffcba2 | /dephell/commands/autocomplete.py | 00fe5569a1e70210055c34b2de1fd44352a500b6 | [
"MIT"
] | permissive | avallbona/dephell | a2ce64466092360b82b98ae314f242626d54b099 | b96b401ea6906b464f9ea87f7ec9f52f9ac6297f | refs/heads/master | 2020-05-20T08:47:12.498537 | 2019-05-08T21:12:03 | 2019-05-09T05:56:28 | 185,482,156 | 0 | 0 | null | 2019-05-07T21:42:04 | 2019-05-07T21:42:03 | null | UTF-8 | Python | false | false | 2,208 | py | # built-in
from argparse import ArgumentParser
from pathlib import Path
# external
from appdirs import user_data_dir
from dephell_shells import Shells
# app
from ..actions import make_bash_autocomplete, make_zsh_autocomplete
from ..config import builders
from .base import BaseCommand
class AutocompleteCommand(BaseCommand):
"""Enable DepHell commands autocomplete for current shell.
https://dephell.readthedocs.io/en/latest/cmd-autocomplete.html
"""
@classmethod
def get_parser(cls) -> ArgumentParser:
parser = ArgumentParser(
prog='dephell autocomplete',
description=cls.__doc__,
)
builders.build_config(parser)
builders.build_output(parser)
return parser
def __call__(self):
shell = Shells(bin_path=None).shell_name
msg = 'Autocompletion installed. Please, reload your shell'
if shell == 'bash':
self._bash()
self.logger.info(msg)
return True
if shell == 'zsh':
self._zsh()
self.logger.info(msg)
return True
self.logger.error('unsupported shell', extra=dict(shell=shell))
return False
def _bash(self):
script = make_bash_autocomplete()
path = Path.home() / '.local' / 'etc' / 'bash_completion.d' / 'dephell.bash-completion'
path.write_text(script)
for rc_name in ('.bashrc', '.profile'):
rc_path = Path.home() / rc_name
if not rc_path.exists():
continue
if 'bash_completion.d' not in rc_path.read_text():
with rc_path.open('a') as stream:
stream.write('\n\nsource {}\n'.format(str(path)))
break
def _zsh(self):
script = make_zsh_autocomplete()
path = Path(user_data_dir('dephell')) / '_dephell_zsh_autocomplete'
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(script)
path.chmod(0o777)
rc_path = Path.home() / '.zshrc'
if str(path) not in rc_path.read_text():
with rc_path.open('a') as stream:
stream.write('\n\nsource {}\n'.format(str(path)))
| [
"master_fess@mail.ru"
] | master_fess@mail.ru |
82684a66eda279814ea72973da8efc55538b1150 | 4074db4436d5fc5fa5395de072557def620f993e | /0x07-python-test_driven_development/5-text_indentation.py | 8cc9fff7f5c7176d0a15c39387dbc13dcfc8d719 | [] | no_license | Hunt66/holbertonschool-higher_level_programming | 49b4a93a8b565cdd588e26e6348bed5d3e9d6953 | b26f42c1d41bb24842d77bf5cf86c441bd8fcf51 | refs/heads/master | 2020-03-28T11:11:52.204554 | 2019-03-25T19:45:28 | 2019-03-25T19:45:28 | 148,187,536 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | #!/usr/bin/python3
def text_indentation(text):
if not isinstance(text, str):
raise TypeError("text must be a string")
for i in range(0, len(text)):
if text[i] == '.' or text[i] == '?' or text[i] == ':':
text = text[:i + 1] + '\n\n' + text[i + 2:]
print(text)
| [
"489@holbertonschool.com"
] | 489@holbertonschool.com |
78216d61d2bb30903297162eb9e88377d48f746e | d64a5a065ec3d368ee508f197fc0e61fc7a6607e | /build/navigation/base_local_planner/catkin_generated/pkg.develspace.context.pc.py | 7c5c6126d275af4617da46d917ce5215b4cfcc06 | [] | no_license | ProgettoSGN/charlie | 925f0373b79135cab9d307ddd9a4fbac0dba69cf | b44c65cbb518fe7d3d7fa1b022ece92df3882595 | refs/heads/master | 2023-01-19T09:18:27.382628 | 2020-11-16T12:59:16 | 2020-11-16T12:59:16 | 312,522,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/robot/charlie_ws/devel/include;/home/robot/charlie_ws/src/navigation/base_local_planner/include".split(';') if "/home/robot/charlie_ws/devel/include;/home/robot/charlie_ws/src/navigation/base_local_planner/include" != "" else []
PROJECT_CATKIN_DEPENDS = "angles;costmap_2d;dynamic_reconfigure;geometry_msgs;message_runtime;nav_core;nav_msgs;pluginlib;roscpp;sensor_msgs;std_msgs;tf2;tf2_ros;voxel_grid".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lbase_local_planner;-ltrajectory_planner_ros".split(';') if "-lbase_local_planner;-ltrajectory_planner_ros" != "" else []
PROJECT_NAME = "base_local_planner"
PROJECT_SPACE_DIR = "/home/robot/charlie_ws/devel"
PROJECT_VERSION = "1.16.4"
| [
"74403226+ProgettoSGN@users.noreply.github.com"
] | 74403226+ProgettoSGN@users.noreply.github.com |
27888c9296c46a1804a96ccbc084f8cacc2d38e5 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/A/alice_corona/mtv_italy_top_20_scraper.py | 54032d35f4c41ec3e149925dd92b9a1ec496cc2a | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,626 | py | import scraperwiki
import scraperwiki
import scraperwiki
import requests
import lxml.html
html = requests.get('http://classifiche.mtv.it/classifica/hitlist-italia-classifica-singoli/hitlist-italia-singoli-7-gennaio-2012').text
root = lxml.html.fromstring(html)
for item in root.cssselect("span.today") :
date = item.text_content()
date2 = date.replace(' ', '-')
html2 = requests.get('http://classifiche.mtv.it/classifica/hitlist-italia-classifica-singoli/hitlist-italia-singoli' + date2).text
print date2
#root2 = lxml.html.fromstring(html2)
# for item in root2.cssselect("a.cpChartEntryImage"):
# song = item.text_content()
# print song
#for item in root2.cssselect("span.pos"):
#position = item.text_content()
#for box in root2.cssselect("a"):
#print date, position
#html3 = requests.get('http://classifiche.mtv.it/classifica/hitlist-italia-classifica-singoli/hitlist-italia-singoli' + date2 + '/pagina-2').text
#root3 = lxml.html.fromstring(html3)#for item in root3.cssselect("span.pos"):
#position = item.text_content()
#print date, position
#for item in root3.cssselect("span.pos"):
#position2 = item.text_content()
#for name in root2.cssselect("a"):
# print date, name.text_content(),
# Blank Python
import scraperwiki
import scraperwiki
import scraperwiki
import requests
import lxml.html
html = requests.get('http://classifiche.mtv.it/classifica/hitlist-italia-classifica-singoli/hitlist-italia-singoli-7-gennaio-2012').text
root = lxml.html.fromstring(html)
for item in root.cssselect("span.today") :
date = item.text_content()
date2 = date.replace(' ', '-')
html2 = requests.get('http://classifiche.mtv.it/classifica/hitlist-italia-classifica-singoli/hitlist-italia-singoli' + date2).text
print date2
#root2 = lxml.html.fromstring(html2)
# for item in root2.cssselect("a.cpChartEntryImage"):
# song = item.text_content()
# print song
#for item in root2.cssselect("span.pos"):
#position = item.text_content()
#for box in root2.cssselect("a"):
#print date, position
#html3 = requests.get('http://classifiche.mtv.it/classifica/hitlist-italia-classifica-singoli/hitlist-italia-singoli' + date2 + '/pagina-2').text
#root3 = lxml.html.fromstring(html3)#for item in root3.cssselect("span.pos"):
#position = item.text_content()
#print date, position
#for item in root3.cssselect("span.pos"):
#position2 = item.text_content()
#for name in root2.cssselect("a"):
# print date, name.text_content(),
# Blank Python
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
f8adb2e5d9355b185e9cfa1e7aa552bd39f443f7 | 245b92f4140f30e26313bfb3b2e47ed1871a5b83 | /airflow/serialization/serializers/kubernetes.py | 0ed9c96f7186007e794cfdde0cc62bc5d2e9cec3 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | ephraimbuddy/airflow | 238d6170a0e4f76456f00423124a260527960710 | 3193857376bc2c8cd2eb133017be1e8cbcaa8405 | refs/heads/main | 2023-05-29T05:37:44.992278 | 2023-05-13T19:49:43 | 2023-05-13T19:49:43 | 245,751,695 | 2 | 1 | Apache-2.0 | 2021-05-20T08:10:14 | 2020-03-08T04:28:27 | null | UTF-8 | Python | false | false | 2,226 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from airflow.utils.module_loading import qualname
# lazy loading for performance reasons
serializers = [
"kubernetes.client.models.v1_resource_requirements.V1ResourceRequirements",
"kubernetes.client.models.v1_pod.V1Pod",
]
if TYPE_CHECKING:
from airflow.serialization.serde import U
__version__ = 1
deserializers: list[type[object]] = []
log = logging.getLogger(__name__)
def serialize(o: object) -> tuple[U, str, int, bool]:
from kubernetes.client import models as k8s
if not k8s:
return "", "", 0, False
if isinstance(o, (k8s.V1Pod, k8s.V1ResourceRequirements)):
from airflow.kubernetes.pod_generator import PodGenerator
def safe_get_name(pod):
"""
We're running this in an except block, so we don't want it to
fail under any circumstances, e.g. by accessing an attribute that isn't there
"""
try:
return pod.metadata.name
except Exception:
return None
try:
return PodGenerator.serialize_pod(o), qualname(o), __version__, True
except Exception:
log.warning("Serialization failed for pod %s", safe_get_name(o))
log.debug("traceback for serialization error", exc_info=True)
return "", "", 0, False
return "", "", 0, False
| [
"noreply@github.com"
] | ephraimbuddy.noreply@github.com |
574d55b848536bfcfa322627dbf0c6b104d9d507 | c64bb34a3dde14d3c9bf813bde414a7b3f10611d | /ommat_addons/sprogroup_purchase_request/models/__init__.py | 570e708fb07b60d951eacbd75670b5b49e901c7c | [] | no_license | sm2x/my_work | ebf2e1abd06191ee59b0d82a23534274a81a3195 | efc469aee4cd20b038d48d4c09f8257f3f04ba1c | refs/heads/master | 2021-01-07T20:41:45.254025 | 2020-02-12T16:02:46 | 2020-02-12T16:02:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import sprogroup_purchase_request
from . import vendor_model
from . import products
from . import stock_castom
| [
"ragaamaherhanna@gmail.com"
] | ragaamaherhanna@gmail.com |
abfb82095f1a7ee3c1b729cc4f99af3aa9ed2b7c | b9d54c64d4a280703b459b346e42518896e20e0a | /lingvo/tools/compare_params.py | 4515d2c948c01e7afb76a2e16c89d9df30b14990 | [
"Apache-2.0"
] | permissive | zh794390558/lingvo | 55a27a4e241414389f0c7b40f381a672bb164372 | ecdf678179018ca07f4f52d065b9bf3fe2dc7c5a | refs/heads/master | 2020-09-26T18:32:31.631402 | 2019-12-06T04:01:22 | 2019-12-06T04:02:05 | 177,497,272 | 0 | 0 | Apache-2.0 | 2019-03-25T02:05:42 | 2019-03-25T02:05:42 | null | UTF-8 | Python | false | false | 3,983 | py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for comparing two models / hyperparams."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lingvo import compat as tf
from lingvo import model_registry
def _hyperparams_text_to_dict(cfg_text):
"""Converts hyperparams config text to a dictionary of key-value pairs."""
txt_list = cfg_text.split("\n")
pair_list = []
for v in txt_list:
if not v:
continue
vals = v.split(" : ")
if len(vals) != 2:
raise ValueError(v)
pair_list.append(vals)
return dict(pair_list)
def hyperparams_text_diff(cfg1_text, cfg2_text):
"""Computes the differences between two hyperparams.Params texts.
Args:
cfg1_text: A hyperparams.Params().ToText() of the first model config.
cfg2_text: A hyperparams.Params().ToText() of the second model config.
Returns:
A tuple of 3 elements:
- cfg1_not_cfg2: A list of keys in cfg1 but not cfg2.
- cfg2_not_cfg1: A list of keys in cfg2 but not cfg1.
- cfg1_and_cfg2_diff: A dict of common keys whose config values differ: each
value is a tuple of the config values from cfg1 and cfg2 respectively.
"""
cfg1_dict = _hyperparams_text_to_dict(cfg1_text)
cfg2_dict = _hyperparams_text_to_dict(cfg2_text)
cfg1_keys = set(cfg1_dict.keys())
cfg2_keys = set(cfg2_dict.keys())
cfg1_not_cfg2 = sorted(list(cfg1_keys - cfg2_keys))
cfg2_not_cfg1 = sorted(list(cfg2_keys - cfg1_keys))
def get_class_name(v):
try:
idx = v.rindex("/")
return v[idx + 1:]
except ValueError:
return v
cfg1_and_cfg2_diff = {}
for k_intersection in cfg1_keys & cfg2_keys:
c1v = cfg1_dict[k_intersection]
c2v = cfg2_dict[k_intersection]
if k_intersection.endswith(".cls"):
c1v = get_class_name(c1v)
c2v = get_class_name(c2v)
if c1v != c2v:
cfg1_and_cfg2_diff[k_intersection] = (c1v, c2v)
return cfg1_not_cfg2, cfg2_not_cfg1, cfg1_and_cfg2_diff
def print_hyperparams_text_diff(path1, path2, cfg1_not_cfg2, cfg2_not_cfg1,
cfg1_and_cfg2_diff):
"""Prints the differences of the output of hyperparams_text_diff.
Args:
path1: Name of registered model or path to model 1.
path2: Name of registered model or path to model 2.
cfg1_not_cfg2: A list of keys in cfg1 but not cfg2.
cfg2_not_cfg1: A list of keys in cfg2 but not cfg1.
cfg1_and_cfg2_diff: A dictionary of common keys whose config values differ;
each value is a tuple of the config values from cfg1 and cfg2
respectively.
"""
if cfg1_not_cfg2:
print("\n\nKeys in %s but not %s: \n%s\n\n" %
(path1, path2, "\n".join(cfg1_not_cfg2)))
if cfg2_not_cfg1:
print("\n\nKeys in %s but not %s: \n%s\n\n" %
(path2, path1, "\n".join(cfg2_not_cfg1)))
if cfg1_and_cfg2_diff:
print("\n\nKeys with differences and their values: \n\n")
for k, v in sorted(cfg1_and_cfg2_diff.items()):
v1, v2 = v
print("%s: [%s] vs. [%s]" % (k, v1, v2))
print("\n\n")
def get_model_params_as_text(model_path):
try:
cfg = model_registry.GetParams(model_path, "Train")
return cfg.ToText()
except LookupError:
# Try reading as file.
return tf.io.gfile.GFile(model_path).read()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
621c1ffa8b324f1b4fe00aa56300fb2c3f0c237c | ca034a0fe0ae4661ed952ea597e1ba97a1f48d7e | /_build.py | 60c79d7104f07978bcdb6ca7f56f2e3ed2bf1f18 | [] | no_license | tokejepsen/docs | 2bde19a64551b63c6017dec7103a17504f0d4019 | 631498f1552c3c5e45754e3f249aef0d254c9e15 | refs/heads/master | 2020-03-19T12:41:15.291282 | 2018-06-07T21:32:34 | 2018-06-07T21:32:34 | 136,533,338 | 0 | 0 | null | 2018-06-07T21:33:13 | 2018-06-07T21:33:13 | null | UTF-8 | Python | false | false | 6,365 | py | """Generate markdown from template.
This module converts bespoke markdown into markdown compatible with
the bespoke mkdocs theme developed for Avalon.
"""
import sys
import json
import time
import shutil
import contextlib
import subprocess
from tempfile import mkdtemp
@contextlib.contextmanager
def tempfile(name):
try:
tempdir = mkdtemp()
fname = os.path.join(tempdir, name)
yield fname
finally:
shutil.rmtree(tempdir)
def on_template(template):
definition = template.strip("{{").rstrip().rstrip("}}")
key, value = definition.split(":")
if key == "schema":
return on_schema(value)
if key == "api" and value == "members":
return on_api_members()
return template
def on_block(language, block):
if language == "python":
return on_python(block)
return ""
def on_page(page):
formatted_time = time.strftime("%b %d %Y %H:%M:%S GMT+0", time.gmtime())
return """\
<p>{time}</p>
<br>
{content}\
""".format(time=formatted_time)
def on_api_members():
from avalon import api
table = """\
| Member | Description
|:-------|:--------
"""
row = "| `{name}` | {description}\n"
for name in api.__all__:
member = getattr(api, name)
doc = member.__doc__
if doc is None:
raise SyntaxError("'%s' is missing a docstring." % name)
table += row.format(
name=name,
description=doc.splitlines()[0]
)
return table
def on_schema(name):
from avalon import schema
schema = schema._cache[name]
description = """\
```json
{dump}
```
""".format(dump=json.dumps({
key: value.get("description", "")
for key, value in schema["properties"].items()
}, indent=4, sort_keys=True))
example = """\
**Example**
```json
{dump}
```
""".format(dump=json.dumps({
key: value.get("example", "")
for key, value in schema["properties"].items()
}, indent=4, sort_keys=True))
definition = """\
**Definition**
| Key | Description
|:----|:------------
"""
row = "| `{key}` | {description}\n"
for key, data in schema["properties"].items():
if "requires" in schema and key not in schema["requires"]:
continue
if "description" not in data:
raise SyntaxError("'%s' of %s must have a "
"description" % (key, name))
data["key"] = key
try:
data["type"] = {
"string": "str",
"number": "int",
"array": "list",
"object": "dict"
}[data["type"]]
except KeyError:
data["type"] = "any"
data["required"] = str(key in schema.get("required", {}))
definition += row.format(**data)
root = "https://github.com/getavalon/core/tree/master/avalon/schema"
link = """\
<a href="{root}/{name}" title="{name}" class="md-source-file">
{name}
</a>
""".format(root=root, name=name)
return os.linesep.join([link, description, example])
def on_python(block):
with tempfile("block.py") as fname:
with open(fname, "w") as f:
f.write(os.linesep.join(block))
try:
output = subprocess.check_output(
[sys.executable, fname],
stderr=subprocess.STDOUT,
universal_newlines=True
)
except subprocess.CalledProcessError as e:
output = e.output
output = "\n".join(
"<span class=\"p\">{line}</span>".format(line=line)
for line in output.splitlines()
)
source = """\
```python
{input}
```
""".format(input="".join(block))
output = """\
<table class="codehilitetable output">
<tbody>
<tr>
<td class="code">
<div class="codehilite" id="__code_1">
<pre>
{output}\
</pre>
</div>
</td>
</tr>
</tbody>
</table>
""".format(output=output) if output else ""
return "\n".join([source, output])
def parse(fname):
parsed = list()
blocks = list()
with open(fname) as f:
in_block = False
current_block = None
current_language = None
line_no = 0
for line in f:
line_no += 1
if line_no == 1 and line.startswith("build: false"):
print("Skipping '%s'.." % fname)
parsed = f.read()
break
if line.startswith("{{"):
line = on_template(line)
if in_block and line.startswith("```"):
print("Running Python..")
print("".join("\t%s" % line for line in current_block))
line = on_block(current_language, current_block)
in_block = False
current_language = None
parsed.append(line)
elif in_block:
current_block.append(line)
elif line.startswith("```python"):
in_block = True
current_language = "python"
current_block = list()
blocks.append(current_block)
else:
parsed.append(line)
return "".join(parsed)
if __name__ == '__main__':
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("path", nargs='?')
args = parser.parse_args()
cd = os.path.abspath(os.path.dirname(__file__))
os.chdir(cd)
if args.path and os.path.isfile(args.path):
files = [args.path]
else:
files = list()
path = args.path
for base, dirs, fnames in os.walk("pages"):
for fname in fnames:
name, ext = os.path.splitext(fname)
if ext != ".md":
continue
src = os.path.join(base, fname)
files.append(src)
results = list()
for src in files:
print("Building '%s'.." % src)
dst = src.replace("pages", "build")
parsed = parse(src)
results.append((dst, parsed))
# Parsing can take some time, so write
# files all in one batch when done
for dst, parsed in results:
try:
os.makedirs(os.path.dirname(dst))
except OSError:
pass
with open(dst, "w") as f:
f.write(parsed)
| [
"konstruktion@gmail.com"
] | konstruktion@gmail.com |
95dd605d13b812f1e44aa83d1847cdec84539d9d | 50e089f906489b2586cc586712420fd085f1f637 | /nested_functions.py | bc7e0eb17a8dd8bb22b42ee4d5c4d9a3db320695 | [] | no_license | AaronTho/Python_Notes | 5ab629e3b3d49be5c68d2a285a79683dc604cd3e | 4aa0e1fb4a35763458a1da467e1bb01e393bc972 | refs/heads/main | 2023-07-24T00:59:23.552952 | 2021-09-11T17:32:25 | 2021-09-11T17:32:25 | 375,399,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | def greeting(first, last):
def full_name():
return f'{first} {last}'
print(f'Hi {full_name()}!')
greeting('Kristine', 'Hudgens')
| [
"aamith@gmail.com"
] | aamith@gmail.com |
be5efb5f77e4571b19fcd2f4531c1a7779771783 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /22_专题/单词缩写/527. 单词缩写.py | 05bf160951915d6d65e4e60fcb9259136c8d17ac | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 1,753 | py | from typing import List
# 缩写规则:
# 1. 初始缩写由起始字母+省略字母的数量+结尾字母组成。
# 2. 若存在冲突,则使用更长的前缀代替首字母,直到从单词到缩写的映射唯一
# 3. 若缩写并不比原单词更短,则保留原样。
# 贪心:
# !首先给每个单词选择最短的缩写。然后我们对于所有重复的单词,我们增加这些重复项的长度。
class Solution:
def wordsAbbreviation(self, words: List[str]) -> List[str]:
def compress(word: str, start=0) -> str:
if len(word) - start <= 3:
return word
return word[: start + 1] + str(len(word) - start - 2) + word[-1]
n = len(words)
res = list(map(compress, words))
needStartFrom = [0] * n
for i in range(n):
while True:
dup = set()
for j in range(i + 1, n):
if res[i] == res[j]:
dup.add(j)
if not dup:
break
# 重复前缀的单词start+1 重新压缩
dup.add(i)
for dupeIndex in dup:
needStartFrom[dupeIndex] += 1
res[dupeIndex] = compress(words[dupeIndex], needStartFrom[dupeIndex])
return res
print(
Solution().wordsAbbreviation(
words=[
"like",
"god",
"internal",
"me",
"internet",
"interval",
"intension",
"face",
"intrusion",
]
)
)
# 输出: ["l2e","god","internal","me","i6t","interval","inte4n","f2e","intr4n"]
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
d0decda0750a41a8cd360423f492d5acd9c51db4 | 7d7ef4997628d92c9f2ad119ba956c711cc7470e | /preprocess/set_informations.py | 1a0daaba394c3457093e1ac50ab82ccd56be0582 | [] | no_license | jpra2/bifasico_v2 | 6a53031d2b4c37ee231770a61c09815146f897d8 | 061330d5cc1185946283379a2478732ae9bb1b3b | refs/heads/master | 2020-06-01T14:39:30.547389 | 2019-06-26T14:42:51 | 2019-06-26T14:42:51 | 190,817,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,145 | py | from pymoab import types, rng
def injector_producer_press(mb, gama_w, gama_o, gravity, all_nodes, volumes_d, tags):
press_tag = tags['P']
values = mb.tag_get_data(press_tag, volumes_d, flat=True)
wells_injector_tag = mb.tag_get_handle('WELLS_INJECTOR', 1, types.MB_TYPE_HANDLE, types.MB_TAG_SPARSE, True)
wells_producer_tag = mb.tag_get_handle('WELLS_PRODUCER', 1, types.MB_TYPE_HANDLE, types.MB_TAG_SPARSE, True)
tags['WELLS_INJECTOR'] = wells_injector_tag
tags['WELLS_PRODUCER'] = wells_producer_tag
wells_injector_meshset = mb.create_meshset()
wells_producer_meshset = mb.create_meshset()
m = values.mean()
injectors = []
producers = []
for i, v in enumerate(values):
if v > m:
injectors.append(volumes_d[i])
else:
producers.append(volumes_d[i])
producers = rng.Range(producers)
injectors = rng.Range(injectors)
mb.add_entities(wells_producer_meshset, producers)
mb.add_entities(wells_injector_meshset, injectors)
mb.tag_set_data(wells_injector_tag, 0, wells_injector_meshset)
mb.tag_set_data(wells_producer_tag, 0, wells_producer_meshset)
if gravity:
set_p_with_gravity(mb, press_tag, all_nodes, injectors, producers, gama_w, gama_o, tags)
return injectors, producers
def set_p_with_gravity(mb, press_tag, all_nodes, injectors, producers, gama_w, gama_o, tags):
coords = mb.tag_get_data(tags['NODES'], all_nodes)
coords = coords.reshape([len(all_nodes), 3])
maxs = coords.max(axis=0)
Lz = maxs[2]
values = mb.tag_get_data(press_tag, injectors, flat=True)
z_elems = -1*mb.tag_get_data(tags['CENT'], injectors)[:,2]
delta_z = z_elems + Lz
pressao = gama_w*(delta_z) + values
mb.tag_set_data(press_tag, injectors, pressao)
values = mb.tag_get_data(press_tag, producers, flat=True)
z_elems = -1*mb.tag_get_data(tags['CENT'], producers)[:,2]
delta_z = z_elems + Lz
pressao = gama_o*(delta_z) + values
mb.tag_set_data(press_tag, producers, pressao)
def injector_producer(mb, gama_w, gama_o, gravity, all_nodes, volumes_d, volumes_n, tags):
neuman_tag = tags['Q']
press_tag = tags['P']
wells_injector_tag = mb.tag_get_handle('WELLS_INJECTOR', 1, types.MB_TYPE_HANDLE, types.MB_TAG_SPARSE, True)
wells_producer_tag = mb.tag_get_handle('WELLS_PRODUCER', 1, types.MB_TYPE_HANDLE, types.MB_TAG_SPARSE, True)
wells_injector_meshset = mb.create_meshset()
wells_producer_meshset = mb.create_meshset()
mb.add_entities(wells_producer_meshset, volumes_d)
mb.add_entities(wells_injector_meshset, volumes_n)
mb.tag_set_data(wells_injector_tag, 0, wells_injector_meshset)
mb.tag_set_data(wells_producer_tag, 0, wells_producer_meshset)
if gravity:
set_p_with_gravity(mb, tags['P'], all_nodes, volumes_n, volumes_d, gama_w, gama_o, tags)
return volumes_n, volumes_d
def convert_to_SI(mb, tags, all_volumes, all_faces, all_nodes, volumes_d, volumes_n):
from preprocess import conversao as conv
k_pe_to_m = 1.0
k_md_to_m2 = 1.0
k_psi_to_pa = 1.0
k_bbldia_to_m3seg = 1.0
k_pe_to_m = conv.pe_to_m(k_pe_to_m)
k_md_to_m2 = conv.milidarcy_to_m2(k_md_to_m2)
k_psi_to_pa = conv.psi_to_Pa(k_psi_to_pa)
k_bbldia_to_m3seg = conv.bbldia_to_m3seg(k_bbldia_to_m3seg)
p_tag = tags['P']
k_harm_tag = tags['KHARM']
cent_tag = tags['CENT']
press_values = mb.tag_get_data(tags['P'], volumes_d, flat=True)
press_values *= k_psi_to_pa
mb.tag_set_data(p_tag, volumes_d, press_values)
if len(volumes_n) > 0:
q_values = mb.tag_get_data(tags['Q'], volumes_n, flat=True)
q_values *= k_bbldia_to_m3seg
mb.tag_set_data(q_tag, volumes_q, q_values)
k_harms = mb.tag_get_data(tags['KHARM'], all_faces, flat=True)
k_harms *= k_md_to_m2*k_pe_to_m
mb.tag_set_data(k_harm_tag, all_faces, k_harms)
centroids = (k_pe_to_m)*mb.tag_get_data(cent_tag, all_volumes)
mb.tag_set_data(cent_tag, all_volumes, centroids)
coords = mb.tag_get_data(tags['NODES'], all_nodes)
mb.tag_set_data(tags['NODES'], all_nodes, coords*(k_pe_to_m))
| [
"jprandrade2@gmail.com"
] | jprandrade2@gmail.com |
77878cfdb6cf7b01007cf4810306020ad7afae92 | e2dc5943070ddb3e6198711a9fe3c4dda4e8449a | /190227/최소배열/venv/Scripts/easy_install-script.py | 338e6276c2540c67179947380316d9e7e30b6741 | [] | no_license | jiheelee/stack | eb7b34073180a9a46221b974585b4cd98cd42e59 | b99dba43d1fb2bfae4b4643fda8a523ba18450e5 | refs/heads/master | 2020-04-25T15:25:53.306308 | 2019-02-27T08:53:28 | 2019-02-27T08:53:28 | 172,878,764 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 471 | py | #!C:\Users\student\PycharmProjects\190227\ÃÖ¼Ò¹è¿\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"jiheelee.ljh@gmail.com"
] | jiheelee.ljh@gmail.com |
155751b1f16e508681de993ca06665d2975eb2ce | 9c5abcd43318ef7553be95a95a859a0f3e41a467 | /examples/tutorials/11_load.py | 84afd7397952523fee2c5088abc9332745ba37a5 | [
"BSD-3-Clause"
] | permissive | xzy103/PyEasyGraphics | d66da503f601868fe39d404a77b3b8d0a43a2e52 | 16bd6f21dd6f3d76edaab2b533500e9aa789c6b2 | refs/heads/master | 2020-05-19T07:31:22.005958 | 2019-02-25T09:54:41 | 2019-02-25T09:54:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | """
Load and display a image
"""
if __name__ == "__main__":
from easygraphics import *
init_graph(800, 600)
img = load_image("test.png")
draw_image((get_width() - img.get_width()) // 2,
(get_height() - img.get_height()) // 2, img)
pause()
img.close()
close_graph()
| [
"royqh1979@gmail.com"
] | royqh1979@gmail.com |
fd27ad6a8290c32a967a034eca6ecec5d2b83195 | 02b650423c122fd4c2a200eb6a90c4530a75511d | /tests/unit/api/commands/test_Schedule_Commands.py | 0ab731997fe190c8d7405e2d013704fed5854efd | [
"Apache-2.0"
] | permissive | OpenSecuritySummit/OSS-Bot | 67b5f83ca94c612fbbed7610b92e4174e1f512de | 8341df2be0f12c59ca1b0e9c407b650ef2fa44f9 | refs/heads/master | 2021-08-10T15:28:22.046363 | 2020-05-28T22:56:41 | 2020-05-28T22:56:41 | 185,175,370 | 1 | 1 | Apache-2.0 | 2019-06-03T09:49:15 | 2019-05-06T10:31:25 | Python | UTF-8 | Python | false | false | 572 | py | from pbx_gs_python_utils.utils.Dev import Dev
from oss_bot.Deploy import Deploy
from oss_bot.api.commands.Participant_Commands import Participant_Commands
from oss_bot.api.commands.Schedule_Commands import Schedule_Commands
from oss_bot.helpers.Test_Helper import Test_Helper
class test_Schedule_Commands(Test_Helper):
def setUp(self):
super().setUp()
self.result = None
def tearDown(self):
if self.result is not None:
Dev.pprint(self.result)
def test_today(self):
Schedule_Commands.today(None,'DJ8UA0RFT',[])
| [
"dinis.cruz@owasp.org"
] | dinis.cruz@owasp.org |
714febafa72df8362cb0c1a03d8eeec7bb22472c | 005d644bb56f8c9e168834ae7b8aa6eb6ac121fd | /splash/ui_splash_screen.py | f04928ecdf3274c3ce622ab71292fb9879474210 | [] | no_license | TrendingTechnology/Notepy | 950c797a111a57aedd521f558cdebe14e643e03d | 029f0725a667d2085e20a9ad60e9032d4535830c | refs/heads/main | 2023-06-20T21:02:48.004169 | 2021-07-17T05:47:56 | 2021-07-17T05:47:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,474 | py | from PySide2.QtCore import (QCoreApplication, QMetaObject, QObject, QPoint,
QRect, QSize, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont,
QFontDatabase, QIcon, QLinearGradient, QPalette, QPainter, QPixmap,
QRadialGradient)
from PySide2.QtWidgets import *
class Ui_SplashScreen(object):
def setupUi(self, SplashScreen):
if SplashScreen.objectName():
SplashScreen.setObjectName(u"SplashScreen")
SplashScreen.resize(680, 400)
self.centralwidget = QWidget(SplashScreen)
self.centralwidget.setObjectName(u"centralwidget")
self.verticalLayout = QVBoxLayout(self.centralwidget)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(u"verticalLayout")
self.verticalLayout.setContentsMargins(10, 10, 10, 10)
self.dropShadowFrame = QFrame(self.centralwidget)
self.dropShadowFrame.setObjectName(u"dropShadowFrame")
self.dropShadowFrame.setStyleSheet(u"QFrame { \n"
" background-color: rgb(255, 255, 255); \n"
" color: rgb(220, 220, 220);\n"
" border-radius: 10px;\n"
"}")
self.dropShadowFrame.setFrameShape(QFrame.StyledPanel)
self.dropShadowFrame.setFrameShadow(QFrame.Raised)
self.label_title = QLabel(self.dropShadowFrame)
self.label_title.setObjectName(u"label_title")
self.label_title.setGeometry(QRect(0, 90, 661, 61))
font = QFont()
font.setFamily(u"Segoe UI")
font.setPointSize(40)
self.label_title.setFont(font)
self.label_title.setStyleSheet(u"color: rgb(254, 121, 199);")
self.label_title.setAlignment(Qt.AlignCenter)
self.label_description = QLabel(self.dropShadowFrame)
self.label_description.setObjectName(u"label_description")
self.label_description.setGeometry(QRect(0, 150, 661, 31))
font1 = QFont()
font1.setFamily(u"Segoe UI")
font1.setPointSize(14)
self.label_description.setFont(font1)
self.label_description.setStyleSheet(u"color: rgb(98, 114, 164);")
self.label_description.setAlignment(Qt.AlignCenter)
self.progressBar = QProgressBar(self.dropShadowFrame)
self.progressBar.setObjectName(u"progressBar")
self.progressBar.setGeometry(QRect(50, 280, 561, 23))
self.progressBar.setStyleSheet(u"QProgressBar {\n"
" \n"
" background-color: rgb(98, 114, 164);\n"
" color: rgb(200, 200, 200);\n"
" border-style: none;\n"
" border-radius: 10px;\n"
" text-align: center;\n"
"}\n"
"QProgressBar::chunk{\n"
" border-radius: 10px;\n"
" background-color: qlineargradient(spread:pad, x1:0, y1:0.511364, x2:1, y2:0.523, stop:0 rgba(254, 121, 199, 255), stop:1 rgba(170, 85, 255, 255));\n"
"}")
self.progressBar.setValue(24)
self.label_loading = QLabel(self.dropShadowFrame)
self.label_loading.setObjectName(u"label_loading")
self.label_loading.setGeometry(QRect(0, 320, 661, 21))
font2 = QFont()
font2.setFamily(u"Segoe UI")
font2.setPointSize(12)
self.label_loading.setFont(font2)
self.label_loading.setStyleSheet(u"color: rgb(98, 114, 164);")
self.label_loading.setAlignment(Qt.AlignCenter)
self.label_credits = QLabel(self.dropShadowFrame)
self.label_credits.setObjectName(u"label_credits")
self.label_credits.setGeometry(QRect(20, 350, 621, 21))
font3 = QFont()
font3.setFamily(u"Segoe UI")
font3.setPointSize(10)
self.label_credits.setFont(font3)
self.label_credits.setStyleSheet(u"color: rgb(98, 114, 164);")
self.label_credits.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.verticalLayout.addWidget(self.dropShadowFrame)
SplashScreen.setCentralWidget(self.centralwidget)
self.retranslateUi(SplashScreen)
QMetaObject.connectSlotsByName(SplashScreen)
# setupUi
def retranslateUi(self, SplashScreen):
SplashScreen.setWindowTitle(QCoreApplication.translate("SplashScreen", u"MainWindow", None))
self.label_title.setText(QCoreApplication.translate("SplashScreen", u"<strong>Notepy</strong>", None))
self.label_loading.setText(QCoreApplication.translate("SplashScreen", u"'Writing is the painting of the voice'" , None))
self.label_credits.setText(QCoreApplication.translate("SplashScreen", u"<strong>Created by</strong>: Mirko Rovere", None))
# retranslateUi
| [
"noreply@github.com"
] | TrendingTechnology.noreply@github.com |
223fd5fbb8b7756b2d9e6bea44f9505273bd3aa9 | b022d86a5beabbc830d3896ccfba4f219875cbca | /meshio/medit_io.py | 87bcf8e31d2cc5a2c2217046f45872dff2bb8d40 | [
"MIT"
] | permissive | nicoguaro/meshio | 4bb136417f0c93990b8ebbc3b9cd53d0b0b044c1 | 5b3a15e72f3a0c134d176b016d2c16e10e890f27 | refs/heads/master | 2021-01-22T04:24:37.177143 | 2017-05-18T17:45:50 | 2017-05-18T17:45:50 | 92,463,341 | 1 | 0 | null | 2017-05-26T02:24:27 | 2017-05-26T02:24:27 | null | UTF-8 | Python | false | false | 3,757 | py | # -*- coding: utf-8 -*-
#
'''
I/O for Medit's format, cf.
<https://people.sc.fsu.edu/~jburkardt/data/medit/medit.html>.
.. moduleauthor:: Nico Schlömer <nico.schloemer@gmail.com>
'''
from itertools import islice
import numpy
def read(filename):
with open(filename) as f:
points, cells = read_buffer(f)
return points, cells, {}, {}, {}
def read_buffer(f):
dim = 0
cells = {}
while True:
try:
line = next(islice(f, 1))
except StopIteration:
break
stripped = line.strip()
# skip comments and empty lines
if len(stripped) == 0 or stripped[0] == '#':
continue
assert stripped[0].isalpha()
keyword = stripped.split(' ')[0]
meshio_from_medit = {
'Edges': ('line', 2),
'Triangles': ('triangle', 3),
'Quadrilaterals': ('quad', 4),
'Tetrahedra': ('tetra', 4),
'Hexahedra': ('hexahedra', 8)
}
if keyword == 'MeshVersionFormatted':
assert stripped[-1] == '1'
elif keyword == 'Dimension':
dim = int(stripped[-1])
elif keyword == 'Vertices':
assert dim > 0
# The first line is the number of nodes
line = next(islice(f, 1))
num_verts = int(line)
points = numpy.empty((num_verts, dim), dtype=float)
for k, line in enumerate(islice(f, num_verts)):
# Throw away the label immediately
points[k] = numpy.array(line.split(), dtype=float)[:-1]
elif keyword in meshio_from_medit:
meshio_name, num = meshio_from_medit[keyword]
# The first line is the number of elements
line = next(islice(f, 1))
num_cells = int(line)
cell_data = numpy.empty((num_cells, num), dtype=int)
for k, line in enumerate(islice(f, num_cells)):
data = numpy.array(line.split(), dtype=int)
# Throw away the label
cell_data[k] = data[:-1]
# adapt 0-base
cells[meshio_name] = cell_data - 1
elif keyword == 'End':
pass
else:
raise RuntimeError('Unknown keyword \'%s\'.' % keyword)
return points, cells
def write(
filename,
points,
cells,
point_data=None,
cell_data=None,
field_data=None
):
with open(filename, 'wb') as fh:
fh.write(b'MeshVersionFormatted 1\n')
fh.write(b'# Created by meshio\n')
# Dimension info
d = '\nDimension %d\n' % points.shape[1]
fh.write(d.encode('utf-8'))
# vertices
fh.write(b'\nVertices\n')
fh.write(('%d\n' % len(points)).encode('utf-8'))
labels = numpy.ones(len(points), dtype=int)
data = numpy.c_[points, labels]
fmt = ' '.join(['%r'] * points.shape[1]) + ' %d'
numpy.savetxt(fh, data, fmt)
medit_from_meshio = {
'line': ('Edges', 2),
'triangle': ('Triangles', 3),
'quad': ('Quadrilaterals', 4),
'tetra': ('Tetrahedra', 4),
'hexahedra': ('Hexahedra', 8)
}
for key, data in cells.items():
medit_name, num = medit_from_meshio[key]
fh.write(b'\n')
fh.write(('%s\n' % medit_name).encode('utf-8'))
fh.write(('%d\n' % len(data)).encode('utf-8'))
labels = numpy.ones(len(data), dtype=int)
# adapt 1-base
data_with_label = numpy.c_[data + 1, labels]
fmt = ' '.join(['%d'] * (num + 1))
numpy.savetxt(fh, data_with_label, fmt)
fh.write(b'\nEnd\n')
return
| [
"nico.schloemer@gmail.com"
] | nico.schloemer@gmail.com |
22b155a60cc9f26f8fbcbfb19da779853da72f7b | fc7cad490cb774d769c1b463ac6d1d9a8ea97024 | /pages/tests/test_views.py | 1769e7849fc404679d64f326e2eea26408bcaedd | [] | no_license | Aviemusca/curriculum-dev | c301915532353836cb085130fd12e2734da4b956 | 691a6536718ef496ac603b1c8daee7508b3e8ff2 | refs/heads/master | 2022-12-26T20:56:55.031344 | 2020-10-01T08:11:49 | 2020-10-01T08:11:49 | 297,643,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,158 | py | from django.test import SimpleTestCase
from django.urls import reverse
class HomePageViewTests(SimpleTestCase):
def test_home_page_status_code(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_home_view_url_by_name(self):
response = self.client.get(reverse('pages:home'))
self.assertEqual(response.status_code, 200)
def test_home_view_template(self):
response = self.client.get(reverse('pages:home'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
class AboutPageViewTests(SimpleTestCase):
def test_about_page_status_code(self):
response = self.client.get('/about/')
self.assertEqual(response.status_code, 200)
def test_about_view_url_by_name(self):
response = self.client.get(reverse('pages:about'))
self.assertEqual(response.status_code, 200)
def test_about_view_template(self):
response = self.client.get(reverse('pages:about'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'about.html')
| [
"yvan@metatech.ie"
] | yvan@metatech.ie |
6751ec11b96160d04f89626642ceb462999b1053 | ab621c65fc91f5194c4032d68e750efaa5f85682 | /pabi_asset_management/wizard/account_asset_compute.py | d6cacc753c4c8d1e7b206c84a6b2f2cc5727aea3 | [] | no_license | pabi2/pb2_addons | a1ca010002849b125dd89bd3d60a54cd9b9cdeef | e8c21082c187f4639373b29a7a0905d069d770f2 | refs/heads/master | 2021-06-04T19:38:53.048882 | 2020-11-25T03:18:24 | 2020-11-25T03:18:24 | 95,765,121 | 6 | 15 | null | 2022-10-06T04:28:27 | 2017-06-29T10:08:49 | Python | UTF-8 | Python | false | false | 1,524 | py | # -*- coding: utf-8 -*-
import ast
from openerp import models, fields, api, _
class AccountAssetCompute(models.Model): # Change to a Model
_inherit = 'account.asset.compute'
_rec_name = 'id'
_order = 'id desc'
id = fields.Integer(
string='ID',
readonly=True,
)
period_id = fields.Many2one(
readonly=True,
states={'draft': [('readonly', False)]},
)
state = fields.Selection(
[('draft', 'Draft'),
('done', 'Done')],
string='Status',
readonly=True,
default='draft',
)
move_ids = fields.Many2many(
'account.move',
'asset_compute_account_move_rel',
'compute_id', 'move_id',
string='Journal Entries',
readonly=True,
)
@api.multi
def asset_compute(self):
res = super(AccountAssetCompute, self).asset_compute()
domain = ast.literal_eval(res['domain'])
move_ids = domain[0][2]
self.write({'move_ids': [(6, 0, move_ids)],
'state': 'done'})
return True
@api.multi
def open_entries(self):
self.ensure_one()
return {
'name': _("Journal Entries"),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move',
'view_id': False,
'type': 'ir.actions.act_window',
'context': self._context,
'nodestroy': True,
'domain': [('id', 'in', self.move_ids.ids)],
}
| [
"kittiu@gmail.com"
] | kittiu@gmail.com |
0b2dfc4a3aeb2cffa837508f7aeee51394e57bd1 | 2cfa0cd5e016d81ecdd3f643e95bd6382652f1ab | /toTheMoon/leetcode_139_WordBreak.py | 64604808320a3a83f58121a9a3c08cfb7d26d7ca | [
"MIT"
] | permissive | jercas/offer66-leetcode-newcode | b863871840875cc38e0310b1e20ccaa4040ea134 | a2e5256f27dbfb23fc34119fc857cd9b00e28c03 | refs/heads/master | 2020-05-07T17:43:43.326326 | 2019-10-24T12:52:32 | 2019-10-24T12:52:32 | 180,738,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,094 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 20:32:31 2019
@author: jercas
"""
"""
leetcode-139: 单词拆分 MEDIUM
'动态规划'
给定一个非空字符串 s 和一个包含非空单词列表的字典 wordDict,判定 s 是否可以被空格拆分为一个或多个在字典中出现的单词。
说明:
拆分时可以重复使用字典中的单词。
你可以假设字典中没有重复的单词。
Hint:
(1)设dp[i]表示字符串s[0:i]是否可以被拆分,False 不能,True能。
(2)现在要想求dp[i]的值,很显然只要判断dp[i - k]的值和子串s[i - k: i]是否存在wordDict中,
其中k为wordDict中一个单词的长度,所以在这一块,可以遍历所有的单词来求。
(3)可以先求出wordDict中每个长度,并且给它排序,方便后面的计算。
"""
class Solution(object):
def wordBreak1(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: bool
"""
if len(s) == 0 or not wordDict:
return False
max_stride = max([len(x) for x in wordDict])
res = [0] * (len(s) + 1)
res[0] = 1
for i in range(1, len(s) + 1):
for j in range(i - max_stride, i):
if res[j] == 1 and s[j:i] in wordDict:
res[i] = 1
if res[-1] == 1:
return True
else:
return False
def wordBreak2(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: bool
"""
words = set(wordDict)
lengths = sorted({len(w) for w in words})
dp = [False] * (len(s) + 1)
dp[0] = True
for i in range(1, len(s) + 1):
for k in lengths:
if not dp[i] and i - k >= 0:
dp[i] = (dp[i - k] and s[i - k: i] in words)
#print(i, dp[i])
#print(dp)
return dp[-1]
if __name__ == "__main__":
s = ["leetcode", "applepenapple", "catsandog", "cars"]
wordDict = [["leet", "code"], ["apple", "pen"], ["cats", "dog", "sand", "and", "cat"], ["car", "ca", "rs"]]
A = [True, True, False, True]
solution = Solution()
for i in range(4):
if A[i] == solution.wordBreak2(s[i], wordDict[i]):
print(s[i],"+", wordDict[i], "-->", A[i])
print('AC') | [
"jercas0618@163.com"
] | jercas0618@163.com |
4e2e3d32be1cba99eb676eb1b0b9eb38a7c57daf | 4a8c1f7d9935609b780aff95c886ef7781967be0 | /atcoder/_codeforces/1359_d.py | e4583d55e18b587b4ff08fba68b0847da062511c | [] | no_license | recuraki/PythonJunkTest | d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a | 2556c973d468a6988d307ce85c5f2f8ab15e759a | refs/heads/master | 2023-08-09T17:42:21.875768 | 2023-07-18T23:06:31 | 2023-07-18T23:06:31 | 13,790,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
from pprint import pprint
import sys
input = sys.stdin.readline
n = int(input())
dat = list(map(int, input().split()))
s = [0] * (n+1)
for i in range(n):
s[i+1] = s[i] + dat[i];
resval = -999999999999999999999
res = 0
for i in range(n):
vmin = dat[i]
for j in range(i+1, n):
v = s[j+1] - s[i]
vmin = max(vmin, dat[j])
tmp = v - vmin
resval = max(resval, tmp)
#print(i, j, v, vmin, tmp)
if resval < 0:
print(0)
else:
print(resval)
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """5
5 -2 10 -1 4"""
output = """6"""
self.assertIO(input, output)
def test_input_2(self):
print("test_input_2")
input = """8
5 2 5 3 -30 -30 6 9"""
output = """10"""
self.assertIO(input, output)
def test_input_3(self):
print("test_input_3")
input = """3
-10 6 -15"""
output = """0"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main() | [
"glenda.kanai@gmail.com"
] | glenda.kanai@gmail.com |
369cc333ac5f443e085115379b09a143723492e7 | 936c893f29a7e3c99450b49512d5ad6dd7e63d12 | /api/migrations/0006_codesubbietype.py | 7a09f80fdcddf9e4a90dccbe52897390cab09456 | [] | no_license | pavithra994/cormack_dev | 0d3f5f794173013f3219c786b83779df2f2ae344 | 78927eaeea27320908b117aa50380ddacd6e2662 | refs/heads/master | 2023-04-09T22:34:33.746134 | 2021-04-13T20:12:08 | 2021-04-13T20:12:08 | 357,628,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,504 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Ocom Software- All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by Ocom Software <licence@ocom.com.au, 2019
#
#
# Generated by Django 1.10.7 on 2017-12-12 04:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('api', '0005_hash'),
]
operations = [
migrations.CreateModel(
name='CodeSubbieType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('active_start_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Active Start Date')),
('active_end_date', models.DateTimeField(blank=True, null=True, verbose_name='Active End Date')),
('description', models.TextField()),
('code', models.CharField(blank=True, max_length=255, null=True, unique=True)),
],
options={
'verbose_name_plural': 'Subbie Types',
'db_table': 'code_subbie_type',
'verbose_name': 'Subbie Type',
},
),
]
| [
"scott.hooper@hordernit.com.au"
] | scott.hooper@hordernit.com.au |
b19af4145b02f7ea5d7e947944ec9842a0a92632 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02886/s480047151.py | 182b8aad58af1035a08c56cbe0d021cb65d131cb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | N=int(input())
List = list(map(int, input().split()))
wa = 0
for i in range(N):
for j in range(N):
if j == i:
pass
else:
wa += List[i]*List[j]
wa = wa //2
print(wa) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8d2091e6ed66f99a803f6b2852b1ad75f6b5c9b0 | cca752de5ce7f2ce2a225a4d67fc05f917716572 | /pyemma/coordinates/clustering/assign.py | 62de35890757e6f5f8dd27392fac2db8b2345893 | [
"BSD-2-Clause"
] | permissive | kziolkowska/PyEMMA | aef5cf697c4d470e380d888e87d4ec81197f3651 | 358edf630f02a893795c41f57bb8ae2843510444 | refs/heads/master | 2021-01-16T22:46:03.817339 | 2015-02-23T07:48:46 | 2015-02-23T07:48:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | '''
Created on 18.02.2015
@author: marscher
'''
from pyemma.coordinates.clustering.interface import AbstractClustering
from pyemma.msm.io import read_matrix
import numpy as np
class AssignCenters(AbstractClustering):
"""Assigns given (precalculated) cluster centers. If you already have
cluster centers from somewhere, you use this class to assign your data to it.
Parameters
----------
clustercenters : path to file (csv) or ndarray
cluster centers to use in assignment of data
Examples
--------
Assuming you have stored your centers in a CSV file:
>>> from pyemma.coordinates.clustering import AssignCenters
>>> from pyemma.coordinates import discretizer
>>> reader = ...
>>> assign = AssignCenters('my_centers.dat')
>>> disc = discretizer(reader, cluster=assign)
>>> disc.run()
"""
def __init__(self, clustercenters):
super(AssignCenters, self).__init__()
if isinstance(clustercenters, str):
self.clustercenters = read_matrix(clustercenters)
self.clustercenters = clustercenters
assert isinstance(self.clustercenters, np.ndarray)
def param_add_data(self, X, itraj, t, first_chunk, last_chunk_in_traj,
last_chunk, ipass, Y=None):
# discretize all
if t == 0:
n = self.data_producer.trajectory_length(itraj)
self.dtrajs.append(np.empty(n, dtype=int))
L = np.shape(X)[0]
# TODO: optimize: assign one chunk at once
for i in xrange(L):
self.dtrajs[itraj][i + t] = self.map(X[i])
if last_chunk:
return True
| [
"m.scherer@fu-berlin.de"
] | m.scherer@fu-berlin.de |
0b045919b9d842b5ca1abb59d93d7ecbd92dd4d6 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_156/ch23_2020_03_09_19_36_00_577839.py | 3639b7a9881fc419db9d75cc326fb12e7b140fb9 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | x = int(input('Velocidade do carro: ?'))
if x>80:
print("Voce foi multado: {0}".format((x-80)*5))
else:
print("Não foi multado") | [
"you@example.com"
] | you@example.com |
6b71d61f7c665ab19ef6b4a329625f9dd05d5188 | b6c7f367306f8f3d9fad7551810c68b392a1b879 | /omoide/tests/unit/infra/test_walking.py | 9dd3fab1ae1c23cbaf0f50c2bc112f8e5e83b910 | [
"MIT"
] | permissive | TaXeH/Omoide | c96ef35b1394125fc98367e8a9ef1674718e7e9e | 8ccc9d47e802433bb2de21ff930e6630658cd5e3 | refs/heads/main | 2023-07-18T12:00:15.469707 | 2021-08-28T11:37:23 | 2021-08-28T11:37:23 | 400,773,814 | 0 | 0 | MIT | 2021-08-28T11:17:55 | 2021-08-28T11:17:55 | null | UTF-8 | Python | false | false | 1,110 | py | """Tests.
"""
import tempfile
from unittest import mock
import pytest
from omoide import infra
@pytest.fixture()
def filesystem():
return infra.Filesystem()
def test_walk(filesystem):
with tempfile.TemporaryDirectory() as tmp_dir:
fake_stdout = mock.Mock()
path_1 = filesystem.join(tmp_dir, 'source_1')
path_2 = filesystem.join(tmp_dir, 'source_1', 'migration_1')
path_3 = filesystem.join(tmp_dir, 'source_1', 'migration_2')
path_4 = filesystem.join(tmp_dir, 'source_2', 'migration_3')
path_5 = filesystem.join(tmp_dir, 'source_2', 'migration_4')
for path in (path_1, path_2, path_3, path_4, path_5):
filesystem.ensure_folder_exists(path, fake_stdout)
gen = infra.walk(tmp_dir, filesystem,
branch='source_2', leaf='migration_3')
assert list(gen) == [('source_2',
'migration_3',
filesystem.join(tmp_dir,
'source_2',
'migration_3'))]
| [
"nicord@yandex.ru"
] | nicord@yandex.ru |
e2453c1ab2c4cb5b531f46e966480c82014a35bf | 450fc27c8c11eb3ffe7c764081c048d506a7fdea | /src/py_script.py | 26f9d3ec788457a4b83465a273dc735636edc2c5 | [] | no_license | rduvalwa5/SysExamples | 6228fd93d4cec66e189ff3561af5c4e062349ea5 | e47e8843b10c06f4f02d94d89a3f75b133c1d617 | refs/heads/master | 2020-04-06T06:28:07.657630 | 2016-10-12T06:37:20 | 2016-10-12T06:37:20 | 70,551,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | #!/usr/local/bin/python
'''
Created on Feb 19, 2016
@author: rduvalwa2
To run a script, go to directory were script is a execute
Simpple Python script
'''
print("1) go run a script, go to directory were script is")
print("2) make sure is defined by system as executable")
print("3) at command line type the script name and hit return")
print("example >./PytthonScriptExmaple.py")
print("4) Or type python <script name> and hit return")
print("example > python PytthonScriptExmaple.py")
import math
# Use math.pow method.
a = math.pow(2, 3)
# Use operator.
b = 2 ** 3
# Print results.
print(a)
print(b)
#Output
#8.0
#8 | [
"rduvalwa5@hotmail.com"
] | rduvalwa5@hotmail.com |
c8377597fc5c29bc3e200dfff5a26e53cad854ca | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/1/PyGame/game_20200606103432.py | a377557bcf3c7a0484507e7545a9a8cfba1b1108 | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,306 | py | # 1 - Import library
import pygame
from pygame.locals import *
import math
import random
import os
import json
# 2 - Initialize the game
pygame.init()
width, height = 640, 480
screen=pygame.display.set_mode((width, height))
keys = [False, False, False, False]
playerpos=[100,100]
acc=[0,0]
arrows=[]
badtimer=100
badtimer1=0
badguys=[[640,100]]
healthvalue=194
pygame.mixer.init()
# 3 - Load image
player = pygame.image.load("resources/images/dude.png")
grass = pygame.image.load("resources/images/grass.png")
castle = pygame.image.load("resources/images/castle.png")
arrow = pygame.image.load("resources/images/bullet.png")
badguyimg1 = pygame.image.load("resources/images/badguy.png")
badguyimg=badguyimg1
healthbar = pygame.image.load("resources/images/healthbar.png")
health = pygame.image.load("resources/images/health.png")
gameover = pygame.image.load("resources/images/gameover.png")
youwin = pygame.image.load("resources/images/youwin.png")
# 3.1 - Load audio
hit = pygame.mixer.Sound("resources/audio/explode.wav")
enemy = pygame.mixer.Sound("resources/audio/enemy.wav")
shoot = pygame.mixer.Sound("resources/audio/shoot.wav")
hit.set_volume(0.05)
enemy.set_volume(0.05)
shoot.set_volume(0.05)
pygame.mixer.music.load('resources/audio/moonlight.wav')
pygame.mixer.music.play(-1, 0.0)
pygame.mixer.music.set_volume(0.25)
# 4 - keep looping through
running = 1
exitcode = 0
while running:
badtimer-=1
# 5 - clear the screen before drawing it again
screen.fill(0)
# 6 - draw the player on the screen at X:100, Y:100
for x in range(width//grass.get_width()+1):
for y in range(height//grass.get_height()+1):
screen.blit(grass,(x*100,y*100))
# initialize font; must be called after 'pygame.init()' to avoid 'Font not Initialized' error
myfont = pygame.font.SysFont("monospace", 15)
mpcs = []
dir_path = os.path.dirname(os.path.realpath(__file__)) + "/../save.json"
with open("") as json_file:
mpcs = json.load(json_file).map(lambda x: x.name)
step = height // len(mpcs)
for x in range(1, len(mpcs)):
label = myfont.render(mpcs[x], 1, (255,255,0))
screen.blit(castle,(0,x*step))
screen.blit(castle,(0,30))
screen.blit(castle,(0,135))
screen.blit(castle,(0,240))
screen.blit(castle,(0,345 ))
# 6.1 - Set player position and rotation
position = pygame.mouse.get_pos()
angle = math.atan2(position[1]-(playerpos[1]+32),position[0]-(playerpos[0]+26))
playerrot = pygame.transform.rotate(player, 360-angle*57.29)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
# 6.2 - Draw arrows
for bullet in arrows:
index=0
velx=math.cos(bullet[0])*10
vely=math.sin(bullet[0])*10
bullet[1]+=velx
bullet[2]+=vely
if bullet[1]<-64 or bullet[1]>640 or bullet[2]<-64 or bullet[2]>480:
arrows.pop(index)
index+=1
for projectile in arrows:
arrow1 = pygame.transform.rotate(arrow, 360-projectile[0]*57.29)
screen.blit(arrow1, (projectile[1], projectile[2]))
# 6.3 - Draw badgers
if badtimer==0:
badguys.append([640, random.randint(50,430)])
badtimer=100-(badtimer1*2)
if badtimer1>=35:
badtimer1=35
else:
badtimer1+=5
index=0
for badguy in badguys:
if badguy[0]<-64:
badguys.pop(index)
badguy[0]-=5
# 6.3.1 - Attack castle
badrect=pygame.Rect(badguyimg.get_rect())
badrect.top=badguy[1]
badrect.left=badguy[0]
if badrect.left<64:
hit.play()
healthvalue -= random.randint(5,20)
badguys.pop(index)
#6.3.2 - Check for collisions
index1=0
for bullet in arrows:
bullrect=pygame.Rect(arrow.get_rect())
bullrect.left=bullet[1]
bullrect.top=bullet[2]
if badrect.colliderect(bullrect):
enemy.play()
acc[0]+=1
badguys.pop(index)
arrows.pop(index1)
index1+=1
# 6.3.3 - Next bad guy
index+=1
for badguy in badguys:
screen.blit(badguyimg, badguy)
# 6.4 - Draw clock
font = pygame.font.Font(None, 24)
survivedtext = font.render(str((90000-pygame.time.get_ticks())/60000)+":"+str((90000-pygame.time.get_ticks())/1000%60).zfill(2), True, (0,0,0))
textRect = survivedtext.get_rect()
textRect.topright=[635,5]
screen.blit(survivedtext, textRect)
# 6.5 - Draw health bar
screen.blit(healthbar, (5,5))
for health1 in range(healthvalue):
screen.blit(health, (health1+8,8))
# 7 - update the screen
pygame.display.flip()
# 8 - loop through the events
for event in pygame.event.get():
# check if the event is the X button
if event.type==pygame.QUIT:
# if it is quit the game
pygame.quit()
exit(0)
if event.type == pygame.KEYDOWN:
if event.key==K_w:
keys[0]=True
elif event.key==K_a:
keys[1]=True
elif event.key==K_s:
keys[2]=True
elif event.key==K_d:
keys[3]=True
if event.type == pygame.KEYUP:
if event.key==pygame.K_w:
keys[0]=False
elif event.key==pygame.K_a:
keys[1]=False
elif event.key==pygame.K_s:
keys[2]=False
elif event.key==pygame.K_d:
keys[3]=False
if event.type==pygame.MOUSEBUTTONDOWN:
shoot.play()
position=pygame.mouse.get_pos()
acc[1]+=1
arrows.append([math.atan2(position[1]-(playerpos1[1]+32),position[0]-(playerpos1[0]+26)),playerpos1[0]+32,playerpos1[1]+32])
# 9 - Move player
if keys[0]:
playerpos[1]-=5
elif keys[2]:
playerpos[1]+=5
if keys[1]:
playerpos[0]-=5
elif keys[3]:
playerpos[0]+=5
#10 - Win/Lose check
if pygame.time.get_ticks()>=90000:
running=0
exitcode=1
if healthvalue<=0:
running=0
exitcode=0
if acc[1]!=0:
accuracy=acc[0]*1.0/acc[1]*100
else:
accuracy=0
# 11 - Win/lose display
if exitcode==0:
pygame.font.init()
font = pygame.font.Font(None, 24)
text = font.render("Accuracy: "+str(accuracy)+"%", True, (255,0,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(gameover, (0,0))
screen.blit(text, textRect)
else:
pygame.font.init()
font = pygame.font.Font(None, 24)
text = font.render("Accuracy: "+str(accuracy)+"%", True, (0,255,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(youwin, (0,0))
screen.blit(text, textRect)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
pygame.display.flip()
| [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
914e9bb4f96c7ca489cc4fbe7f9151479e95c700 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1705+398/sdB_PG_1705+398_lc.py | fb5a2c9a6010f68b8235076c5ed37319371ad64e | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[256.681792,39.732494], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_1705+398 /sdB_PG_1705+398_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
f062e5a2033afeb2b59a95e2eeb47f1166817c97 | 56ade096db1fe376ee43d38c96b43651ee07f217 | /023. Merge k Sorted Lists/Python/Solution.py | 955cbe788c96145af7c9fd5e35bd21a77b6ede15 | [] | no_license | xiaole0310/leetcode | c08649c3f9a9b04579635ee7e768fe3378c04900 | 7a501cf84cfa46b677d9c9fced18deacb61de0e8 | refs/heads/master | 2020-03-17T05:46:41.102580 | 2018-04-20T13:05:32 | 2018-04-20T13:05:32 | 133,328,416 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
def partition(lists, start, end):
if start == end:
return lists[start]
if start < end:
mid = (start + end) // 2
list_1 = partition(lists, start, mid)
list_2 = partition(lists, mid + 1, end)
return merge(list_1, list_2)
return None
def merge(list_1, list_2):
fake_head = ListNode(0)
current = fake_head
while list_1 and list_2:
if list_1.val < list_2.val:
current.next = list_1
list_1 = list_1.next
else:
current.next = list_2
list_2 = list_2.next
current = current.next
current.next = list_1 if list_1 else list_2
return fake_head.next
return partition(lists, 0, len(lists) - 1)
| [
"zhantong1994@163.com"
] | zhantong1994@163.com |
575e4ca2deb65350aa1786280363eb93b2489ec8 | 9e98a7770465227e8e0e962c02850acc5c172e96 | /backend/admin/secure.py | e8efebe1599d1525a767e1c7dad8388d4a903692 | [
"MIT"
] | permissive | pengjinfu/flask-bigger | 281a43770958584c406accb34b2d13eebd4ba8cc | cc5ba476c20129a009ad8a8366daf4dc060bd4ac | refs/heads/master | 2021-04-19T21:24:20.385510 | 2019-03-09T01:07:06 | 2019-03-09T01:07:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | # -*- coding: utf-8 -*-
from functools import wraps
from flask import (
g,
session,
request,
redirect,
url_for,
current_app,
abort
)
def login_required(func):
@wraps(func)
def decorated_function(*args, **kwargs):
not_in_g = not hasattr(g, 'login_user') or g.login_user is None
not_in_s = not 'login_user' in session or session['login_user'] is None
if not_in_g and not_in_s:
_route = 'admin.login_view'
return redirect(url_for(_route, next=request.url))
if not_in_g:
g.login_user = session['login_user']
return func(*args, **kwargs)
return decorated_function
def admin_required(func):
@wraps(func)
def decorated_function(*args, **kwargs):
in_g = hasattr(g, 'login_user') and not getattr(g, 'login_user') is None
in_s = 'login_user' in session and not session['login_user'] is None
if in_g or in_s:
g_admin = in_g and getattr(g, 'login_user').is_admin
s_admin = in_s and 'is_admin' in session['login_user'] and bool(session['login_user']['is_admin'])
if g_admin or s_admin:
if not in_g:
g.login_user = session['login_user']
return func(*args, **kwargs)
else:
return abort(403)
else:
_route = 'admin.login_view'
return redirect(url_for(_route, next=request.url))
return decorated_function | [
"gogs@fake.local"
] | gogs@fake.local |
c4e79832b0eae413614aef8f2f1b3143244b8230 | 0b14062e8db610817b7f0730bfb21bf3e93765b8 | /component/intent/response.py | 8539ee74f4e2f507f8567ee3b13ee8c97c86dc48 | [
"MIT"
] | permissive | bkosciow/tenchi | 63fa827607b7b725ea61b73119193904bde25a6a | e53e59df34934e3e81da3e9321c1648a844aa23c | refs/heads/develop | 2023-06-24T17:14:45.696811 | 2023-06-12T09:53:40 | 2023-06-12T09:53:40 | 175,896,569 | 0 | 0 | MIT | 2023-06-12T09:53:42 | 2019-03-15T21:59:49 | Python | UTF-8 | Python | false | false | 682 | py |
class Response(object):
def __init__(self, request=None):
self.intent_name = request.intent_name if request else ''
self.lang = request.lang if request else ''
self.data = request.data if request else ''
self._speech = ''
self._text = ''
@property
def text(self):
if self._text == '':
return self._speech
return self._text
@text.setter
def text(self, value):
self._text = value
@property
def speech(self):
if self._speech == '':
return self._text
return self._speech
@speech.setter
def speech(self, value):
self._speech = value
| [
"kosci1@gmail.com"
] | kosci1@gmail.com |
4b3d81773808ab07ce6131fa88b8d2fc3dd8e8e0 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /bpmn/models/t_global_conversation.py | d14fa1bdd32f9e7ae6b1d934d12d5dd4c4b99b1d | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 250 | py | from dataclasses import dataclass
from .t_collaboration import TCollaboration
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class TGlobalConversation(TCollaboration):
class Meta:
name = "tGlobalConversation"
| [
"chris@komposta.net"
] | chris@komposta.net |
3cddfeb55bb003700c11e2ec31184cc755b38f94 | 3e4fd46157cd8bee454fe0bff227da6204ade9f4 | /api/migrations/0020_cdekkey_updated_at.py | c6a46496d2f3621b432afd3eeca684ce5a14729b | [] | no_license | skiboorg/docs_api | 8e7017457cc111311d836f572597aeb3d6bed1c4 | 4bae50c8ea772439b93bf4e0fc95cb6395bb9cfb | refs/heads/master | 2023-06-26T14:43:54.248638 | 2021-08-02T15:32:28 | 2021-08-02T15:32:28 | 336,874,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # Generated by Django 3.1.5 on 2021-02-24 10:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0019_cdekkey'),
]
operations = [
migrations.AddField(
model_name='cdekkey',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
| [
"d@skib.org"
] | d@skib.org |
744c5eaa6cb3ca445d2636ef9869716a03f6577a | 498e792e16ab1a74ac034c53177c4cccbeef2749 | /classification/swin_transformer/utils.py | 96ad54a4b596ffeb720d7ffd0dac0cc47157abe1 | [] | no_license | ydwisroad/imageprocessingpytorch | f97bec4469c087f6bbbca5d42da180c95be8b13f | bd8d1af228619c9c6c9c1a2b880422f7d5048dd5 | refs/heads/master | 2023-07-29T05:05:11.145832 | 2022-02-21T23:32:03 | 2022-02-21T23:32:03 | 284,976,501 | 7 | 3 | null | 2023-07-24T01:08:22 | 2020-08-04T12:43:24 | Jupyter Notebook | UTF-8 | Python | false | false | 6,729 | py | import os
import sys
import json
import pickle
import random
import torch
from tqdm import tqdm
import matplotlib.pyplot as plt
def read_split_data(root: str, val_rate: float = 0.2):
random.seed(0) # 保证随机结果可复现
assert os.path.exists(root), "dataset root: {} does not exist.".format(root)
# 遍历文件夹,一个文件夹对应一个类别
flower_class = [cla for cla in os.listdir(root) if os.path.isdir(os.path.join(root, cla))]
# 排序,保证顺序一致
flower_class.sort()
# 生成类别名称以及对应的数字索引
class_indices = dict((k, v) for v, k in enumerate(flower_class))
json_str = json.dumps(dict((val, key) for key, val in class_indices.items()), indent=4)
with open('class_indices.json', 'w') as json_file:
json_file.write(json_str)
train_images_path = [] # 存储训练集的所有图片路径
train_images_label = [] # 存储训练集图片对应索引信息
val_images_path = [] # 存储验证集的所有图片路径
val_images_label = [] # 存储验证集图片对应索引信息
every_class_num = [] # 存储每个类别的样本总数
supported = [".jpg", ".JPG", ".png", ".PNG"] # 支持的文件后缀类型
# 遍历每个文件夹下的文件
for cla in flower_class:
cla_path = os.path.join(root, cla)
# 遍历获取supported支持的所有文件路径
images = [os.path.join(root, cla, i) for i in os.listdir(cla_path)
if os.path.splitext(i)[-1] in supported]
# 获取该类别对应的索引
image_class = class_indices[cla]
# 记录该类别的样本数量
every_class_num.append(len(images))
# 按比例随机采样验证样本
val_path = random.sample(images, k=int(len(images) * val_rate))
for img_path in images:
if img_path in val_path: # 如果该路径在采样的验证集样本中则存入验证集
val_images_path.append(img_path)
val_images_label.append(image_class)
else: # 否则存入训练集
train_images_path.append(img_path)
train_images_label.append(image_class)
print("{} images were found in the dataset.".format(sum(every_class_num)))
print("{} images for training.".format(len(train_images_path)))
print("{} images for validation.".format(len(val_images_path)))
plot_image = False
if plot_image:
# 绘制每种类别个数柱状图
plt.bar(range(len(flower_class)), every_class_num, align='center')
# 将横坐标0,1,2,3,4替换为相应的类别名称
plt.xticks(range(len(flower_class)), flower_class)
# 在柱状图上添加数值标签
for i, v in enumerate(every_class_num):
plt.text(x=i, y=v + 5, s=str(v), ha='center')
# 设置x坐标
plt.xlabel('image class')
# 设置y坐标
plt.ylabel('number of images')
# 设置柱状图的标题
plt.title('flower class distribution')
plt.show()
return train_images_path, train_images_label, val_images_path, val_images_label
def plot_data_loader_image(data_loader):
batch_size = data_loader.batch_size
plot_num = min(batch_size, 4)
json_path = './class_indices.json'
assert os.path.exists(json_path), json_path + " does not exist."
json_file = open(json_path, 'r')
class_indices = json.load(json_file)
for data in data_loader:
images, labels = data
for i in range(plot_num):
# [C, H, W] -> [H, W, C]
img = images[i].numpy().transpose(1, 2, 0)
# 反Normalize操作
img = (img * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]) * 255
label = labels[i].item()
plt.subplot(1, plot_num, i+1)
plt.xlabel(class_indices[str(label)])
plt.xticks([]) # 去掉x轴的刻度
plt.yticks([]) # 去掉y轴的刻度
plt.imshow(img.astype('uint8'))
plt.show()
def write_pickle(list_info: list, file_name: str):
with open(file_name, 'wb') as f:
pickle.dump(list_info, f)
def read_pickle(file_name: str) -> list:
with open(file_name, 'rb') as f:
info_list = pickle.load(f)
return info_list
def train_one_epoch(model, optimizer, data_loader, device, epoch):
model.train()
loss_function = torch.nn.CrossEntropyLoss()
accu_loss = torch.zeros(1).to(device) # 累计损失
accu_num = torch.zeros(1).to(device) # 累计预测正确的样本数
optimizer.zero_grad()
sample_num = 0
data_loader = tqdm(data_loader, file=sys.stdout)
for step, data in enumerate(data_loader):
images, labels = data
sample_num += images.shape[0]
pred = model(images.to(device))
pred_classes = torch.max(pred, dim=1)[1]
accu_num += torch.eq(pred_classes, labels.to(device)).sum()
loss = loss_function(pred, labels.to(device))
loss.backward()
accu_loss += loss.detach()
data_loader.desc = "[train epoch {}] loss: {:.3f}, acc: {:.3f}".format(epoch,
accu_loss.item() / (step + 1),
accu_num.item() / sample_num)
if not torch.isfinite(loss):
print('WARNING: non-finite loss, ending training ', loss)
sys.exit(1)
optimizer.step()
optimizer.zero_grad()
return accu_loss.item() / (step + 1), accu_num.item() / sample_num
@torch.no_grad()
def evaluate(model, data_loader, device, epoch):
loss_function = torch.nn.CrossEntropyLoss()
model.eval()
accu_num = torch.zeros(1).to(device) # 累计预测正确的样本数
accu_loss = torch.zeros(1).to(device) # 累计损失
sample_num = 0
data_loader = tqdm(data_loader, file=sys.stdout)
for step, data in enumerate(data_loader):
images, labels = data
sample_num += images.shape[0]
pred = model(images.to(device))
pred_classes = torch.max(pred, dim=1)[1]
accu_num += torch.eq(pred_classes, labels.to(device)).sum()
loss = loss_function(pred, labels.to(device))
accu_loss += loss
data_loader.desc = "[valid epoch {}] loss: {:.3f}, acc: {:.3f}".format(epoch,
accu_loss.item() / (step + 1),
accu_num.item() / sample_num)
return accu_loss.item() / (step + 1), accu_num.item() / sample_num
| [
"wandf12345@163.com"
] | wandf12345@163.com |
18e9b2b74f11283b6321cc911e118ff92ad27bc1 | 0c6b4e9c5ecc5a7595717f9699953b227486ef3e | /tests/unit/modules/remote_management/oneview/test_oneview_network_set.py | 925b136aea929159834fde6379a0dea5fd372036 | [] | no_license | ansible-collection-migration/ansible.misc | d9c92e8bb0c17b3e2a92976215f523c2afaa5a46 | 3c02be2a8c03b2e375a1e1f37b0c119145ea358c | refs/heads/master | 2020-12-26T23:11:36.544511 | 2020-02-03T22:18:53 | 2020-02-03T22:18:53 | 237,681,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,364 | py | # Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible_collections.ansible.misc.tests.unit.compat import unittest, mock
from .hpe_test_utils import OneViewBaseTestCase
from .oneview_module_loader import NetworkSetModule
FAKE_MSG_ERROR = 'Fake message error'
NETWORK_SET = dict(
name='OneViewSDK Test Network Set',
networkUris=['/rest/ethernet-networks/aaa-bbb-ccc']
)
NETWORK_SET_WITH_NEW_NAME = dict(name='OneViewSDK Test Network Set - Renamed')
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
data=dict(name=NETWORK_SET['name'],
networkUris=['/rest/ethernet-networks/aaa-bbb-ccc'])
)
PARAMS_WITH_CHANGES = dict(
config='config.json',
state='present',
data=dict(name=NETWORK_SET['name'],
newName=NETWORK_SET['name'] + " - Renamed",
networkUris=['/rest/ethernet-networks/aaa-bbb-ccc', 'Name of a Network'])
)
PARAMS_FOR_ABSENT = dict(
config='config.json',
state='absent',
data=dict(name=NETWORK_SET['name'])
)
class NetworkSetModuleSpec(unittest.TestCase,
OneViewBaseTestCase):
"""
OneViewBaseTestCase has common tests for class constructor and main function,
also provides the mocks used in this test case.
"""
def setUp(self):
self.configure_mocks(self, NetworkSetModule)
self.resource = self.mock_ov_client.network_sets
self.ethernet_network_client = self.mock_ov_client.ethernet_networks
def test_should_create_new_network_set(self):
self.resource.get_by.return_value = []
self.resource.create.return_value = NETWORK_SET
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=NetworkSetModule.MSG_CREATED,
ansible_facts=dict(network_set=NETWORK_SET)
)
def test_should_not_update_when_data_is_equals(self):
self.resource.get_by.return_value = [NETWORK_SET]
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=NetworkSetModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(network_set=NETWORK_SET)
)
def test_update_when_data_has_modified_attributes(self):
data_merged = dict(name=NETWORK_SET['name'] + " - Renamed",
networkUris=['/rest/ethernet-networks/aaa-bbb-ccc',
'/rest/ethernet-networks/ddd-eee-fff']
)
self.resource.get_by.side_effect = [NETWORK_SET], []
self.resource.update.return_value = data_merged
self.ethernet_network_client.get_by.return_value = [{'uri': '/rest/ethernet-networks/ddd-eee-fff'}]
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=NetworkSetModule.MSG_UPDATED,
ansible_facts=dict(network_set=data_merged)
)
def test_should_raise_exception_when_ethernet_network_not_found(self):
self.resource.get_by.side_effect = [NETWORK_SET], []
self.ethernet_network_client.get_by.return_value = []
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
NetworkSetModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(
exception=mock.ANY,
msg=NetworkSetModule.MSG_ETHERNET_NETWORK_NOT_FOUND + "Name of a Network"
)
def test_should_remove_network(self):
self.resource.get_by.return_value = [NETWORK_SET]
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=NetworkSetModule.MSG_DELETED
)
def test_should_do_nothing_when_network_set_not_exist(self):
self.resource.get_by.return_value = []
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=NetworkSetModule.MSG_ALREADY_ABSENT
)
def test_update_scopes_when_different(self):
params_to_scope = PARAMS_FOR_PRESENT.copy()
params_to_scope['data']['scopeUris'] = ['test']
self.mock_ansible_module.params = params_to_scope
resource_data = NETWORK_SET.copy()
resource_data['scopeUris'] = ['fake']
resource_data['uri'] = 'rest/network-sets/fake'
self.resource.get_by.return_value = [resource_data]
patch_return = resource_data.copy()
patch_return['scopeUris'] = ['test']
self.resource.patch.return_value = patch_return
NetworkSetModule().run()
self.resource.patch.assert_called_once_with('rest/network-sets/fake',
operation='replace',
path='/scopeUris',
value=['test'])
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
ansible_facts=dict(network_set=patch_return),
msg=NetworkSetModule.MSG_UPDATED
)
def test_should_do_nothing_when_scopes_are_the_same(self):
params_to_scope = PARAMS_FOR_PRESENT.copy()
params_to_scope['data']['scopeUris'] = ['test']
self.mock_ansible_module.params = params_to_scope
resource_data = NETWORK_SET.copy()
resource_data['scopeUris'] = ['test']
self.resource.get_by.return_value = [resource_data]
NetworkSetModule().run()
self.resource.patch.not_been_called()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(network_set=resource_data),
msg=NetworkSetModule.MSG_ALREADY_PRESENT
)
if __name__ == '__main__':
unittest.main()
| [
"ansible_migration@example.com"
] | ansible_migration@example.com |
864af22112c10a166d04ae1997c58bc756b43a51 | f3f38a66daddccbb7fd0bfc1cacce40c7ad31010 | /tippecanoe-downloads.py | 0f09b1da5d9ca1e7df0f83a2ca8a0e864b40bc63 | [] | no_license | kimballjohnson/dotmaps | 386b5b87ce757412eeb7712def8bb595cc59e98f | 09c9a3ceb16ba7f350247eee9a3b65ddb53fe290 | refs/heads/master | 2021-09-12T09:28:11.772233 | 2017-03-01T19:44:56 | 2017-03-01T19:44:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | import sys, csv, zipfile, os, itertools, io, json, tempfile, subprocess
OA_PROPERTIES = 'HASH', 'NUMBER', 'STREET', 'UNIT', 'CITY', 'POSTCODE'
with open('downloaded/files.csv') as file:
run_rows = csv.DictReader(file)
set_key = lambda run_row: int(run_row['set_id'])
sorted_rows = sorted(run_rows, key=set_key, reverse=False)
grouped_rows = itertools.groupby(sorted_rows, set_key)
for (set_id, runs) in grouped_rows:
print('Starting set', set_id, '...', file=sys.stderr)
mbtiles_filename = 'set_{}.mbtiles'.format(set_id)
cmd = 'tippecanoe', '-l', 'dots', '-r', '3', \
'-n', 'OpenAddresses Dots, Set {}'.format(set_id), '-f', \
'-t', tempfile.gettempdir(), '-o', mbtiles_filename
print(' '.join(cmd), file=sys.stderr)
tippecanoe = subprocess.Popen(cmd, stdin=subprocess.PIPE, bufsize=1)
for run_row in runs:
data_path = os.path.join('downloaded', run_row['path'])
_, data_ext = os.path.splitext(data_path)
if data_ext == '.csv':
csv_buff = open(data_path)
elif data_ext == '.zip':
zip = zipfile.ZipFile(data_path)
(csv_name, ) = [name for name in zip.namelist()
if os.path.splitext(name)[1] == '.csv']
csv_buff = io.TextIOWrapper(zip.open(csv_name))
for csv_row in csv.DictReader(csv_buff):
try:
x, y = float(csv_row['LON']), float(csv_row['LAT'])
except ValueError:
continue
else:
geometry = dict(type='Point', coordinates=[x, y])
properties = {key.lower(): csv_row.get(key, '') for key in OA_PROPERTIES}
properties.update(source_path=run_row['source_path'])
feature = dict(type='Feature', geometry=geometry, properties=properties)
tippecanoe.stdin.write(json.dumps(feature).encode('utf8'))
tippecanoe.stdin.write(b'\n')
#break
tippecanoe.stdin.close()
tippecanoe.wait()
#break
| [
"mike@teczno.com"
] | mike@teczno.com |
01eb492e08352e8b7c503545b15924f421b2b23a | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/choroplethmapbox/_reversescale.py | 50dd2164ab3093d034fb5a3d4f8f4c172e533564 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 435 | py | import _plotly_utils.basevalidators
class ReversescaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="reversescale", parent_name="choroplethmapbox", **kwargs
):
super(ReversescaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
e748ebb1dc90bf83276047fd9bf39673e39504bb | 7d1fd87e1aaf7e6b2ea72ab086a128d03ab059f1 | /Python_Web_Udemy/Udemy_REST_APIs/4_FLASK_RESFUL_MYSQLITE/create_table.py | 62f58b871ea5f67e59f094c3c47fae03970e0dbb | [] | no_license | efren1990/codepy | 05fb34fb608d9921cd5b1c257a9869f2d42eafae | 1bd957e7a7285d459ba76e99c4bccb8dbabf8da4 | refs/heads/main | 2023-06-30T06:31:51.154519 | 2021-08-13T01:30:57 | 2021-08-13T01:30:57 | 395,486,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | """
#APLICACION FLASK RESTFUL SQLITE3
----------------------------------------------------------------
Archivo para crear base de datos y tablas
"""
# Libreria Sqlite3 ------->
import sqlite3
# Conexion ------->
connection = sqlite3.connect('data.db')
# Cursor ------->
cursor = connection.cursor()
# Query table ------->
# INTEGER- ENTERO AUTOINCREMENTAL EN SQLITE3
create_table = "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, username text, password text)"
# Ejecutar query table users------->
cursor.execute(create_table)
# Ejecutar query table items------->
create_table = "CREATE TABLE IF NOT EXISTS items (id INTEGER PRIMARY KEY, name text, price real)"
cursor.execute(create_table)
cursor.execute("INSERT INTO items VALUES(NULL, 'test', 9.99)")
# Commit ------->
connection.commit()
# Cierre ------->
connection.close()
| [
"52503339+incodeEfren@users.noreply.github.com"
] | 52503339+incodeEfren@users.noreply.github.com |
f353297693f93cf1bb526a1b505a7bc7cceb929c | 4a5f11b55e23999a82b62f5c72b44e9a36d24f63 | /simplemooc/settings.py | 16965576ab5e4b9506cda51fa320f5cf46a46247 | [] | no_license | diogo-alves/simplemooc | dca62bfcb2ea6357a551a5760778537f083b675c | cfec59f99888e4e23d41f020ff06bfdf39f70203 | refs/heads/master | 2022-05-10T10:32:18.686313 | 2019-06-04T19:30:43 | 2019-06-04T19:30:43 | 190,260,470 | 0 | 0 | null | 2022-04-22T21:34:44 | 2019-06-04T18:46:43 | Python | UTF-8 | Python | false | false | 4,739 | py | """
Django settings for simplemooc project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import django_heroku
from decouple import config
from dj_database_url import parse as db_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ADMINS = [('Diogo Alves', 'diogo.alves.ti@gmail.com')]
# Allow host headers for Heroku
ALLOWED_HOSTS = ['http://mymooc.herokuapp.com', '127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'taggit',
'simplemooc.core',
'simplemooc.accounts',
'simplemooc.courses',
'simplemooc.forum',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
# Simplified static file serving.
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'simplemooc.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'simplemooc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': config(
'DATABASE_URL',
default='sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3'),
cast=db_url
)
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_ROOT = os.path.join(BASE_DIR, 'simplemooc', 'media')
MEDIA_URL = '/media/'
# E-mails
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = config('EMAIL_HOST', default='localhost')
EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
DEFAULT_FROM_EMAIL = config('DEFAULT_FROM_EMAIL', default='')
EMAIL_LINK_DOMAIN = config('EMAIL_LINK_DOMAIN', default='')
CONTACT_EMAIL = config('CONTACT_EMAIL', default='')
# Auth
LOGIN_URL = 'accounts:login'
LOGIN_REDIRECT_URL = 'core:home'
LOGOUT_URL = 'accounts:logout'
AUTH_USER_MODEL = 'accounts.User'
PASSWORD_RESET_TIMEOUT_DAYS = 2
# Activate Django-Heroku.
django_heroku.settings(locals())
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
| [
"diogo.alves.ti@gmail.com"
] | diogo.alves.ti@gmail.com |
7656c3adcf817be7059c97d960b189aefd1dfef1 | db5b57a505d0ecdecf342d80b79a0e22e280c606 | /bitbox/script.py | fcf5815868eb3669d4753c83febb5073ad144e43 | [
"MIT"
] | permissive | lightswarm124/bitbox-py | 6620ead6fc4272585389f3004aeec05b2b6784a3 | 67ee0d216e2630fd44dba83b5233f33c315dd30b | refs/heads/master | 2020-09-12T18:00:01.353437 | 2019-11-17T02:33:52 | 2019-11-17T02:33:52 | 222,001,618 | 0 | 0 | MIT | 2019-11-15T20:54:24 | 2019-11-15T20:54:23 | null | UTF-8 | Python | false | false | 3,640 | py | class Script:
def opcodes():
codes = {
"OP_FALSE": 0,
"OP_0": 0,
"OP_PUSHDATA1": 76,
"OP_PUSHDATA2": 77,
"OP_PUSHDATA4": 78,
"OP_1NEGATE": 79,
"OP_RESERVED": 80,
"OP_TRUE": 81,
"OP_1": 81,
"OP_2": 82,
"OP_3": 83,
"OP_4": 84,
"OP_5": 85,
"OP_6": 86,
"OP_7": 87,
"OP_8": 88,
"OP_9": 89,
"OP_10": 90,
"OP_11": 91,
"OP_12": 92,
"OP_13": 93,
"OP_14": 94,
"OP_15": 95,
"OP_16": 96,
"OP_NOP": 97,
"OP_VER": 98,
"OP_IF": 99,
"OP_NOTIF": 100,
"OP_VERIF": 101,
"OP_VERNOTIF": 102,
"OP_ELSE": 103,
"OP_ENDIF": 104,
"OP_VERIFY": 105,
"OP_RETURN": 106,
"OP_TOALTSTACK": 107,
"OP_FROMALTSTACK": 108,
"OP_2DROP": 109,
"OP_2DUP": 110,
"OP_3DUP": 111,
"OP_2OVER": 112,
"OP_2ROT": 113,
"OP_2SWAP": 114,
"OP_IFDUP": 115,
"OP_DEPTH": 116,
"OP_DROP": 117,
"OP_DUP": 118,
"OP_NIP": 119,
"OP_OVER": 120,
"OP_PICK": 121,
"OP_ROLL": 122,
"OP_ROT": 123,
"OP_SWAP": 124,
"OP_TUCK": 125,
"OP_CAT": 126,
"OP_SPLIT": 127,
"OP_NUM2BIN": 128,
"OP_BIN2NUM": 129,
"OP_SIZE": 130,
"OP_INVERT": 131,
"OP_AND": 132,
"OP_OR": 133,
"OP_XOR": 134,
"OP_EQUAL": 135,
"OP_EQUALVERIFY": 136,
"OP_RESERVED1": 137,
"OP_RESERVED2": 138,
"OP_1ADD": 139,
"OP_1SUB": 140,
"OP_2MUL": 141,
"OP_2DIV": 142,
"OP_NEGATE": 143,
"OP_ABS": 144,
"OP_NOT": 145,
"OP_0NOTEQUAL": 146,
"OP_ADD": 147,
"OP_SUB": 148,
"OP_MUL": 149,
"OP_DIV": 150,
"OP_MOD": 151,
"OP_LSHIFT": 152,
"OP_RSHIFT": 153,
"OP_BOOLAND": 154,
"OP_BOOLOR": 155,
"OP_NUMEQUAL": 156,
"OP_NUMEQUALVERIFY": 157,
"OP_NUMNOTEQUAL": 158,
"OP_LESSTHAN": 159,
"OP_GREATERTHAN": 160,
"OP_LESSTHANOREQUAL": 161,
"OP_GREATERTHANOREQUAL": 162,
"OP_MIN": 163,
"OP_MAX": 164,
"OP_WITHIN": 165,
"OP_RIPEMD160": 166,
"OP_SHA1": 167,
"OP_SHA256": 168,
"OP_HASH160": 169,
"OP_HASH256": 170,
"OP_CODESEPARATOR": 171,
"OP_CHECKSIG": 172,
"OP_CHECKSIGVERIFY": 173,
"OP_CHECKMULTISIG": 174,
"OP_CHECKMULTISIGVERIFY": 175,
"OP_NOP1": 176,
"OP_NOP2": 177,
"OP_CHECKLOCKTIMEVERIFY": 177,
"OP_NOP3": 178,
"OP_CHECKSEQUENCEVERIFY": 178,
"OP_NOP4": 179,
"OP_NOP5": 180,
"OP_NOP6": 181,
"OP_NOP7": 182,
"OP_NOP8": 183,
"OP_NOP9": 184,
"OP_NOP10": 185,
"OP_CHECKDATASIG": 186,
"OP_CHECKDATASIGVERIFY": 187,
"OP_PUBKEYHASH": 253,
"OP_PUBKEY": 254,
"OP_INVALIDOPCODE": 255
}
return codes
| [
"merwanedr@gmail.com"
] | merwanedr@gmail.com |
f3340d0bd5c5e5f803e09c14c522220a33e21689 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_8/fkzluv001/question1.py | ceffc8905a53ff115df948b1aef2e73ac10f9c3c | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | """program with a recursive function to calculate whether or not a string is a palindrome
Luvo Fokazi
09 May 2014"""
def alt(n,var,j,count):
if j>n:
return count
else:
if var%j==0:
count+=1
j+=1
return alt(n,var,j,count)
def Palindrome(dString,n):
d=(n+1)*-1
if n+1==len(dString):
return "Palindrome!"
if(dString[n]==dString[d]):
return Palindrome(dString,n+1)
else:
return "Not a palindrome!"
if __name__ == "__main__":
x=input("Enter a string:\n")
print(Palindrome(x,0)) | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
38af6589a64233b172d528c200c9076dee62b25d | 0806d939ef0e8218c0f727e025eb0b3bb328d343 | /vote/vote/urls.py | 74da84523c3b4c2be083cf880e6398fd17ab17ef | [] | no_license | aiegoo/django-web | bcd89687d951e0877000c23230661ce566144e78 | 5476ed77cf95919d9b825f4cef03d42f217768ce | refs/heads/master | 2021-07-17T01:20:24.279227 | 2020-09-27T05:15:05 | 2020-09-27T05:15:05 | 215,240,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | from django.urls import path
from . import views
app_name = 'vote'
urlpatterns = [
# ex: /polls/
path('', views.IndexView.as_view(), name='index'),
# ex: /polls/5/
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
# ex: /polls/5/results/
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
# ex: /polls/5/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
] | [
"eozz21@gmail.com"
] | eozz21@gmail.com |
c24181654bf4d3d4afa935bc6785919436c43826 | 82ca891008793f570668a7f2c760ae0f22d40494 | /src/VAMPzero/Component/Fuel/Mass/mFuelMAX.py | 3af884d709051594463eba984ea2a716f1333e4e | [
"Apache-2.0"
] | permissive | p-chambers/VAMPzero | 22f20415e83140496b1c5702b6acbb76a5b7bf52 | 4b11d059b1c7a963ec7e7962fa12681825bc2f93 | refs/heads/master | 2021-01-19T10:49:06.393888 | 2015-06-24T10:33:41 | 2015-06-24T10:33:41 | 82,208,448 | 1 | 0 | null | 2017-02-16T17:42:55 | 2017-02-16T17:42:55 | null | UTF-8 | Python | false | false | 3,632 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Copyright: Deutsches Zentrum fuer Luft- und Raumfahrt e.V., 2015 (c)
Contact: daniel.boehnke@dlr.de and jonas.jepsen@dlr.de
'''
from cmath import sqrt
from VAMPzero.Handler.Parameter import parameter
class mFuelMAX(parameter):
'''
The maximum fuel mass that can be stored in the tanks
:Unit: [kg]
'''
def __init__(self, value=0., unit='kg', parent='', cpacsPath=''):
super(mFuelMAX, self).__init__(value=value, unit=unit, doc=self.__doc__, status='init', parent=parent,
cpacsPath=cpacsPath)
def calc(self):
'''
Sets the calc method to calcHeinze
'''
self.calc = self.calcHeinze
def calcHeinze(self):
'''
Calculates the maximum fuel mass that can be stored in the wing from the
geometrical definition of a single trapezoid
k Faktor from Heinze was chosen to be 0.32
:Source: Entwerfen von Verkehrsflugzeugen II, W. Heinze, TU Braunschweig, 2005, pp. 169
'''
taperRatio = self.parent.aircraft.wing.taperRatio.getValue()
span = self.parent.aircraft.wing.span.getValue()
cRoot = self.parent.aircraft.wing.cRoot.getValue()
tcRoot = self.parent.aircraft.wing.airfoilr.tc.getValue()
tcTip = self.parent.aircraft.wing.airfoilt.tc.getValue()
k = 0.32
density = 775 #[kg/m3]
#Calculate the tanks Volume
if tcRoot != 0.:
brace1 = 1 + taperRatio ** 2 * tcTip / tcRoot + taperRatio * sqrt(tcTip / tcRoot)
else:
brace1 = 0.
Vtank = 2. / 3. * span / 2. * k * cRoot ** 2 * tcRoot * (brace1)
#Return result as Volume of the tank times the density
return self.setValueCalc(Vtank * density)
def calcFLOPS(self):
'''
Calculation of the maximum Fuel Mass from the amount of fuel that can be stored in the wing
Calculation Method in Flops sets FWMX to 23 as default. This is altered to 23/2.2046 for SI Units
:Source: Flight Optimization System (FLOPS) User's Guide, McCullers, L.A., NASA Langeley, 2009, p.
'''
FWMX = 23 / 2.2046
refArea = self.parent.aircraft.wing.refArea.getValue()
taperRatio = self.parent.aircraft.wing.taperRatio.getValue()
span = self.parent.aircraft.wing.span.getValue()
tcAVG = self.parent.aircraft.wing.tcAVG.getValue()
#Span and Area must be converted into ft / ft**2 for correct results
term1 = tcAVG * (refArea / 0.092903 ) ** 2 / (span / 0.3048)
term2 = taperRatio / (1 + taperRatio) ** 2
FuelMass = FWMX * term1 * (1 - term2)
return self.setValueCalc(FuelMass)
###################################################################################################
#EOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFE#
################################################################################################### | [
"daniel.boehnke@dlr.de"
] | daniel.boehnke@dlr.de |
c352969fb7fafde0ee6e6ff1bb2743e14ce90e4c | 7e729ea05a6a4e297bb832b77720a18cd0227805 | /Projects/Online Workouts/w3resource/String/program-52.py | 5515ab19e111fdd9fa6c453eb8b5e32cb3e6da1c | [
"MIT"
] | permissive | ivenpoker/Python-Projects | 943d127ae900df52b43aac07c395e9d717196115 | 2975e1bd687ec8dbcc7a4842c13466cb86292679 | refs/heads/master | 2022-12-18T16:36:37.954835 | 2020-09-14T19:42:46 | 2020-09-14T19:43:09 | 180,323,469 | 1 | 0 | MIT | 2022-12-08T01:05:35 | 2019-04-09T08:42:40 | Python | UTF-8 | Python | false | false | 1,891 | py | #############################################################################################
# #
# Program purpose: Prints all permutation with given repetition number of characters #
# of a given string. #
# Program Author : Happi Yvan <ivensteinpoker@gmail.com> #
# Creation Date : October 25, 2019 #
# #
#############################################################################################
from itertools import product
def obtain_user_data(mess: str):
is_valid = False
user_data = ''
while is_valid is False:
try:
user_data = input(mess)
if len(user_data) == 0:
raise ValueError('Please provide some data to work with')
is_valid = True
except ValueError as ve:
print(f'[ERROR]: {ve}')
return user_data
def all_repeat(main_str: str, perm_num: int):
chars = list(main_str)
results = []
for c in product(chars, repeat=perm_num):
results.append(c)
return results
if __name__ == "__main__":
main_data = obtain_user_data(mess='Enter some data: ')
num_perm, valid = 0, False
while not valid:
try:
num_perm = int(obtain_user_data(mess='Enter number of permutations: '))
if num_perm <= 0:
raise ValueError('Please, enter positive number')
valid = True
except ValueError as ve:
print(f'[ERROR]: {ve}')
# main test
print(f"Combinations with repeat #{num_perm}: {all_repeat(main_str=main_data, perm_num=num_perm)}") | [
"nwaforhappiyvan@gmail.com"
] | nwaforhappiyvan@gmail.com |
4beb0e58abe9c75d8744830eb9004bed5a7b0751 | 433da00d8da1c28b528a34775db66a53cb505d82 | /players/Different Experiment and Final Script /Ex7- Script58/Script53.py | 1848a546e0ad6521427d135594d2fa0e7661a879 | [] | no_license | maithrreye/Cant-stop-game | 9cf0e5ffe4862a6f5cd9aaafcb9e0e6359debfd9 | 225b638854643af3168fb75516a08167a431bb35 | refs/heads/master | 2022-11-05T21:07:32.627004 | 2020-03-04T09:03:59 | 2020-03-04T09:03:59 | 244,762,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,102 | py |
from players.player import Player
import random
from players.scripts.DSL import DSL
class Script53(Player):
def __init__(self):
self._counter_calls = []
for i in range(17):
self._counter_calls.append(0)
def get_counter_calls(self):
return self._counter_calls
def get_action(self, state):
actions = state.available_moves()
for a in actions:
if DSL.actionWinsColumn(state,a) and DSL.actionWinsColumn(state,a):
self._counter_calls[0] += 1
return a
if DSL.isStopAction(a) and DSL.isStopAction(a):
self._counter_calls[1] += 1
return a
if DSL.numberPositionsConquered(state, 4 ) > 1 and DSL.containsNumber(a, 4 ):
self._counter_calls[2] += 1
return a
if DSL.containsNumber(a, 4 ) and DSL.actionWinsColumn(state,a):
self._counter_calls[3] += 1
return a
if DSL.isDoubles(a) and DSL.isDoubles(a):
self._counter_calls[4] += 1
return a
if DSL.isDoubles(a):
self._counter_calls[5] += 1
return a
if DSL.actionWinsColumn(state,a) and DSL.hasWonColumn(state,a):
self._counter_calls[6] += 1
return a
if DSL.containsNumber(a, 4 ):
self._counter_calls[7] += 1
return a
if DSL.actionWinsColumn(state,a):
self._counter_calls[8] += 1
return a
if DSL.isStopAction(a):
self._counter_calls[9] += 1
return a
if DSL.isDoubles(a) and DSL.containsNumber(a, 5 ):
self._counter_calls[10] += 1
return a
if DSL.containsNumber(a, 2 ):
self._counter_calls[11] += 1
return a
if DSL.hasWonColumn(state,a):
self._counter_calls[12] += 1
return a
if DSL.containsNumber(a, 3 ) and DSL.containsNumber(a, 3 ) and DSL.isDoubles(a):
self._counter_calls[13] += 1
return a
if DSL.numberPositionsConquered(state, 3 ) > 0 and DSL.containsNumber(a, 3 ):
self._counter_calls[14] += 1
return a
if DSL.actionWinsColumn(state,a) and DSL.actionWinsColumn(state,a) and DSL.actionWinsColumn(state,a):
self._counter_calls[15] += 1
return a
if DSL.containsNumber(a, 3 ):
self._counter_calls[16] += 1
return a
return actions[0]
| [
"maithrreye1994@gmail.com"
] | maithrreye1994@gmail.com |
e0766521e175164493e412fdd41801bcf8248c51 | 65e73c6c4a9e66715be2cbdd93339ebcab93976e | /windmill/boletagem/forms.py | ed5142d00f80dfe6141151b93321bb3cbde5cdf4 | [] | no_license | AnimaTakeshi/windmill-django | 3577f304d5e7f74750c7d95369e87d37209f1ac6 | 78bde49ace1ed215f6238fe94c142eac16e164dc | refs/heads/master | 2022-12-13T11:13:21.859012 | 2019-02-07T20:50:01 | 2019-02-07T20:50:01 | 150,470,109 | 0 | 0 | null | 2022-12-08T01:29:36 | 2018-09-26T18:13:54 | Python | UTF-8 | Python | false | false | 786 | py | from django import forms
from . import models
class FormBoletaAcao(forms.ModelForm):
class Meta:
model = models.BoletaAcao
fields = "__all__"
def clean_quantidade(self):
data = self.cleaned_data['quantidade']
print(data)
if self.cleaned_data['operacao'] == 'C':
data = abs(data)
else:
data = -abs(data)
return data
class FormBoletaRendaFixaLocal(forms.ModelForm):
class Meta:
model = models.BoletaRendaFixaLocal
fields = "__all__"
def clean_quantidade(self):
data = self.cleaned_data['quantidade']
print(data)
if self.cleaned_data['operacao'] == 'C':
data = abs(data)
else:
data = -abs(data)
return data
| [
"33721822+AnimaTakeshi@users.noreply.github.com"
] | 33721822+AnimaTakeshi@users.noreply.github.com |
afd1459f1116172ee4305c5657bb7774a8069b34 | 103c413086fbfadee1c52a7ea3125b9f20864f67 | /setup.py | 7c43572a3cb6819a6055ef6a5645b869c9dbb5c1 | [
"MIT"
] | permissive | tudstlennkozh/python-minifobf | 2dff2cb407c665afc6cb23f795661092086b4ff7 | 1d4a59ede7298d46cde43a27f739991ad60c5171 | refs/heads/master | 2023-07-25T06:27:34.359672 | 2021-05-16T13:30:23 | 2021-07-22T16:41:21 | 398,327,879 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | import os.path
from setuptools import setup, find_packages
readme_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.md')
with open(readme_path) as f:
long_desc = f.read()
setup(
name='python_minifier',
description='Transform Python source code into it\'s most compact representation',
author='Daniel Flook',
author_email='daniel@flook.org',
url='https://github.com/dflook/python-minifier',
license='MIT',
project_urls={
'Issues': 'https://github.com/dflook/python-minifier/issues',
'Documentation': 'https://dflook.github.io/python-minifier/',
},
keywords='minify minifier',
use_scm_version=True,
package_dir={'': 'src'},
packages=find_packages('src'),
long_description=long_desc,
long_description_content_type='text/markdown',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <3.10',
setup_requires=['setuptools_scm'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Intended Audience :: Developers',
'Topic :: Software Development'
],
entry_points = {
'console_scripts': ['pyminify=python_minifier.__main__:main']
},
zip_safe=True
)
| [
"daniel@flook.org"
] | daniel@flook.org |
961058db0405e628c78d3744179be1a2aa4f188f | d507d0846902e0012a4b2a0aaaea1cbbdb21db46 | /supervisely_lib/metric/classification_metrics.py | 4392dea0801a666cd774015e3b43a9e1891fafec | [] | no_license | wpilibsuite/supervisely | a569fdc0d5e5f2fb912f32beab8f3fedb277504e | 19805ca9b2bd20e31d6d41a99dc37dc439bc257a | refs/heads/master | 2022-09-09T02:32:54.883109 | 2020-06-01T20:55:49 | 2020-06-01T20:55:49 | 267,916,361 | 2 | 3 | null | 2020-06-03T13:59:56 | 2020-05-29T17:27:30 | Python | UTF-8 | Python | false | false | 5,021 | py | # coding: utf-8
from copy import deepcopy
from supervisely_lib.sly_logger import logger
from supervisely_lib.annotation.tag_meta import TagValueType
from supervisely_lib.metric.metric_base import MetricsBase
from supervisely_lib.metric.common import log_line, safe_ratio, sum_counters, TRUE_POSITIVE, TRUE_NEGATIVE, \
FALSE_POSITIVE, FALSE_NEGATIVE, ACCURACY, PRECISION, RECALL, F1_MEASURE
RAW_COUNTERS = [TRUE_POSITIVE, TRUE_NEGATIVE, FALSE_POSITIVE, FALSE_NEGATIVE]
class ClassificationMetrics(MetricsBase):
def __init__(self, tags_mapping, confidence_threshold=0):
if len(tags_mapping) < 1:
raise RuntimeError('At least one tags pair should be defined!')
self._tags_mapping = tags_mapping.copy()
self._confidence_threshold = confidence_threshold
self._counters = {tag_name_gt: {counter: 0 for counter in RAW_COUNTERS} for tag_name_gt in
self._tags_mapping.keys()}
def _classification_metrics(self, ann_1, ann_2):
def is_passes_confidence_threshold(tag):
if tag.meta.value_type == TagValueType.NONE:
return True
elif tag.meta.value_type == TagValueType.ANY_NUMBER:
return tag.value >= self._confidence_threshold
elif tag.meta.value_type == TagValueType.ANY_STRING or tag.meta.value_type == TagValueType.ONEOF_STRING:
logger.warning("Classification tag '{}'".format(tag.name))
return True
current_metric_res = {}
for tag_name_gt, tag_name_pred in self._tags_mapping.items():
tag1 = ann_1.img_tags.get(tag_name_gt)
tag2 = ann_2.img_tags.get(tag_name_pred)
c1 = is_passes_confidence_threshold(tag1) if tag1 is not None else False
c2 = is_passes_confidence_threshold(tag2) if tag2 is not None else False
current_metric_res[tag_name_gt] = {
TRUE_POSITIVE: int(c1 and c2),
TRUE_NEGATIVE: int(not c1 and not c2),
FALSE_POSITIVE: int(not c1 and c2),
FALSE_NEGATIVE: int(c1 and not c2)
}
return current_metric_res
def add_pair(self, ann_gt, ann_pred):
res = self._classification_metrics(ann_gt, ann_pred)
for tag_name_gt, met_data in res.items():
for metric_name, metric_value in met_data.items():
self._counters[tag_name_gt][metric_name] += metric_value
@staticmethod
def _calculate_complex_metrics(values):
tp = values[TRUE_POSITIVE]
tn = values[TRUE_NEGATIVE]
fp = values[FALSE_POSITIVE]
fn = values[FALSE_NEGATIVE]
values[ACCURACY] = safe_ratio(tp + tn, tp + tn + fp + fn)
values[PRECISION] = safe_ratio(tp, tp + fp)
values[RECALL] = safe_ratio(tp, tp + fn)
values[F1_MEASURE] = safe_ratio(2.0 * tp, 2.0 * tp + fp + fn)
def get_metrics(self):
result = deepcopy(self._counters)
for pair_counters in result.values():
self._calculate_complex_metrics(pair_counters)
return result
def get_total_metrics(self):
result = sum_counters(self._counters.values(), (TRUE_POSITIVE, TRUE_NEGATIVE, FALSE_POSITIVE, FALSE_NEGATIVE))
self._calculate_complex_metrics(result)
return result
def log_total_metrics(self):
common_info = """
P = condition positive (the number of real positive cases in the data)
N = condition negative (the number of real negative cases in the data)
TP = True Positive prediction
TN = True Negative prediction
FP = False Positive prediction (Type I error)
FN = False Negative prediction (Type II error)
Accuracy = (TP + TN)/(TP + TN + FP + FN) = TRUE/TOTAL
Precision = TP / (TP + FP)
Recall = TP / (TP + FN)
F1-Measure = (2 * TP) / (2 * TP + FP + FN)
"""
log_line()
log_line(c='*')
for line in common_info.split('\n'):
line = line.strip()
if len(line) > 0:
logger.info(line.ljust(80))
log_line(c='*')
log_line()
def print_evaluation_values(tag_pair_metrics):
labels = [ACCURACY, PRECISION, RECALL, F1_MEASURE, TRUE_POSITIVE, TRUE_NEGATIVE, FALSE_POSITIVE,
FALSE_NEGATIVE]
for label in labels:
logger.info(' {0}: {1:2.4f}'.format(label.ljust(16), tag_pair_metrics[label]))
for i, (tag_name_gt, tag_metrics) in enumerate(self.get_metrics().items(), start=1):
logger.info('{}) {} <--> {}:'.format(i, tag_name_gt, self._tags_mapping[tag_name_gt]))
print_evaluation_values(tag_metrics)
log_line()
logger.info('Total values:')
total_values = self.get_total_metrics()
print_evaluation_values(total_values)
log_line()
log_line(c='*')
| [
"austinshalit@gmail.com"
] | austinshalit@gmail.com |
b8d15a47a5af0f68dbc337f3085e8229d1001478 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5769900270288896_0/Python/gilesg/B.py | 9b6e7164caea2e5f0ee7de5be27548db97ad2c10 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | def ints():
return map(int, raw_input().split())
INF = 10 ** 9
LIM = 16
num_cases, = ints()
def count(grid):
R = len(grid)
C = len(grid[0])
ret = 0
for i in range(R):
for j in range(C):
for d in ((0, 1), (1, 0)):
ii = i + d[0]
jj = j + d[1]
if ii < R and jj < C:
if grid[i][j] and grid[ii][jj]:
ret += 1
return ret
def construct(R, C, x):
ret = []
for i in range(R):
row = [0] * C
for j in range(C):
row[j] = x % 2
x /= 2
ret.append(row)
return ret
def bf(R, C):
ret = [INF] * (R*C + 1)
for x in range(2 ** (R*C)):
grid = construct(R, C, x)
n = sum(sum(row) for row in grid)
cost = count(grid)
ret[n] = min(ret[n], cost)
return ret
d = {}
for R in range(1, LIM+1):
for C in range(1, LIM+1):
if R * C <= LIM:
d[(R, C)] = bf(R, C)
for case_num in xrange(1, num_cases + 1):
R, C, N = ints()
print "Case #%d: %s" % (case_num, d[(R, C)][N])
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
1fff13e4ffd8369a4724c0d5ac905d670593c68b | a8769709aeb7299fa3757f0e7bba5c617eb8cfe3 | /lesson-3/k8s/lib/python2.7/site-packages/adal/self_signed_jwt.py | 54c0fd9913797739a85a8e938eb40ccea83fac03 | [
"Apache-2.0"
] | permissive | simox-83/workshop-k8s | 2ac5e8b282bb7c3337acc726a7d972717bf649cc | 04cb18e8b5925a3cfd84ca316952a6cb64960b31 | refs/heads/master | 2020-03-31T20:52:21.421995 | 2018-10-11T14:43:08 | 2018-10-11T14:43:08 | 152,558,678 | 0 | 0 | Apache-2.0 | 2018-10-11T08:37:20 | 2018-10-11T08:37:20 | null | UTF-8 | Python | false | false | 4,883 | py | #------------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation.
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#------------------------------------------------------------------------------
import time
import datetime
import uuid
import base64
import binascii
import re
import jwt
from .constants import Jwt
from .log import Logger
from .adal_error import AdalError
def _get_date_now():
return datetime.datetime.now()
def _get_new_jwt_id():
return str(uuid.uuid4())
def _create_x5t_value(thumbprint):
hex_val = binascii.a2b_hex(thumbprint)
return base64.urlsafe_b64encode(hex_val).decode()
def _sign_jwt(header, payload, certificate):
try:
encoded_jwt = _encode_jwt(payload, certificate, header)
except Exception as exp:
raise AdalError("Error:Invalid Certificate: Expected Start of Certificate to be '-----BEGIN RSA PRIVATE KEY-----'", exp)
_raise_on_invalid_jwt_signature(encoded_jwt)
return encoded_jwt
def _encode_jwt(payload, certificate, header):
return jwt.encode(payload, certificate, algorithm='RS256', headers=header).decode()
def _raise_on_invalid_jwt_signature(encoded_jwt):
segments = encoded_jwt.split('.')
if len(segments) < 3 or not segments[2]:
raise AdalError('Failed to sign JWT. This is most likely due to an invalid certificate.')
class SelfSignedJwt(object):
NumCharIn128BitHexString = 128/8*2
numCharIn160BitHexString = 160/8*2
ThumbprintRegEx = r"^[a-f\d]*$"
def __init__(self, call_context, authority, client_id):
self._log = Logger('SelfSignedJwt', call_context['log_context'])
self._call_context = call_context
self._authortiy = authority
self._token_endpoint = authority.token_endpoint
self._client_id = client_id
def _create_header(self, thumbprint):
x5t = _create_x5t_value(thumbprint)
header = {'typ':'JWT', 'alg':'RS256', 'x5t':x5t}
self._log.debug("Creating self signed JWT header. x5t: %(x5t)s",
{"x5t": x5t})
return header
def _create_payload(self):
now = _get_date_now()
minutes = datetime.timedelta(0, 0, 0, 0, Jwt.SELF_SIGNED_JWT_LIFETIME)
expires = now + minutes
self._log.debug(
'Creating self signed JWT payload. Expires: %(expires)s NotBefore: %(nbf)s',
{"expires": expires, "nbf": now})
jwt_payload = {}
jwt_payload[Jwt.AUDIENCE] = self._token_endpoint
jwt_payload[Jwt.ISSUER] = self._client_id
jwt_payload[Jwt.SUBJECT] = self._client_id
jwt_payload[Jwt.NOT_BEFORE] = int(time.mktime(now.timetuple()))
jwt_payload[Jwt.EXPIRES_ON] = int(time.mktime(expires.timetuple()))
jwt_payload[Jwt.JWT_ID] = _get_new_jwt_id()
return jwt_payload
def _raise_on_invalid_thumbprint(self, thumbprint):
thumbprint_sizes = [self.NumCharIn128BitHexString, self.numCharIn160BitHexString]
size_ok = len(thumbprint) in thumbprint_sizes
if not size_ok or not re.search(self.ThumbprintRegEx, thumbprint):
raise AdalError("The thumbprint does not match a known format")
def _reduce_thumbprint(self, thumbprint):
canonical = thumbprint.lower().replace(' ', '').replace(':', '')
self._raise_on_invalid_thumbprint(canonical)
return canonical
def create(self, certificate, thumbprint):
thumbprint = self._reduce_thumbprint(thumbprint)
header = self._create_header(thumbprint)
payload = self._create_payload()
return _sign_jwt(header, payload, certificate)
| [
"simone.dandreta@concur.com"
] | simone.dandreta@concur.com |
b7f79c4d8d2f67f78e73f488547be2567370dd3a | 998a978c0cefcb62d462c64ec88f61b8bdcbbbab | /braindecode/datautil/splitters.py | 3cb3f7769369e41edc1817d0ee0b238ab085eb98 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | petsas/braindecode | cf16b52f0bb0897165e14110959e486d0e379d64 | 651ef3d7a467b22948802527018a7a8e686c567d | refs/heads/master | 2020-03-31T00:31:35.215291 | 2018-09-19T10:33:20 | 2018-09-19T10:33:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,840 | py | import numpy as np
from braindecode.datautil.iterators import get_balanced_batches
from braindecode.datautil.signal_target import apply_to_X_y, SignalAndTarget
def concatenate_sets(sets):
"""
Concatenate all sets together.
Parameters
----------
sets: list of :class:`.SignalAndTarget`
Returns
-------
concatenated_set: :class:`.SignalAndTarget`
"""
concatenated_set = sets[0]
for s in sets[1:]:
concatenated_set = concatenate_two_sets(concatenated_set, s)
return concatenated_set
def concatenate_two_sets(set_a, set_b):
"""
Concatenate two sets together.
Parameters
----------
set_a, set_b: :class:`.SignalAndTarget`
Returns
-------
concatenated_set: :class:`.SignalAndTarget`
"""
new_X = concatenate_np_array_or_add_lists(set_a.X, set_b.X)
new_y = concatenate_np_array_or_add_lists(set_a.y, set_b.y)
return SignalAndTarget(new_X, new_y)
def concatenate_np_array_or_add_lists(a, b):
if hasattr(a, 'ndim') and hasattr(b, 'ndim'):
new = np.concatenate((a, b), axis=0)
else:
if hasattr(a, 'ndim'):
a = a.tolist()
if hasattr(b, 'ndim'):
b = b.tolist()
new = a + b
return new
def split_into_two_sets(dataset, first_set_fraction=None, n_first_set=None):
"""
Split set into two sets either by fraction of first set or by number
of trials in first set.
Parameters
----------
dataset: :class:`.SignalAndTarget`
first_set_fraction: float, optional
Fraction of trials in first set.
n_first_set: int, optional
Number of trials in first set
Returns
-------
first_set, second_set: :class:`.SignalAndTarget`
The two splitted sets.
"""
assert (first_set_fraction is None) != (n_first_set is None), (
"Pass either first_set_fraction or n_first_set")
if n_first_set is None:
n_first_set = int(round(len(dataset.X) * first_set_fraction))
assert n_first_set < len(dataset.X)
first_set = apply_to_X_y(lambda a: a[:n_first_set], dataset)
second_set = apply_to_X_y(lambda a: a[n_first_set:], dataset)
return first_set, second_set
def select_examples(dataset, indices):
"""
Select examples from dataset.
Parameters
----------
dataset: :class:`.SignalAndTarget`
indices: list of int, 1d-array of int
Indices to select
Returns
-------
reduced_set: :class:`.SignalAndTarget`
Dataset with only examples selected.
"""
# probably not necessary
indices = np.array(indices)
if hasattr(dataset.X, 'ndim'):
# numpy array
new_X = np.array(dataset.X)[indices]
else:
# list
new_X = [dataset.X[i] for i in indices]
new_y = np.asarray(dataset.y)[indices]
return SignalAndTarget(new_X, new_y)
def split_into_train_valid_test(dataset, n_folds, i_test_fold, rng=None):
"""
Split datasets into folds, select one valid fold, one test fold and merge rest as train fold.
Parameters
----------
dataset: :class:`.SignalAndTarget`
n_folds: int
Number of folds to split dataset into.
i_test_fold: int
Index of the test fold (0-based). Validation fold will be immediately preceding fold.
rng: `numpy.random.RandomState`, optional
Random Generator for shuffling, None means no shuffling
Returns
-------
reduced_set: :class:`.SignalAndTarget`
Dataset with only examples selected.
"""
n_trials = len(dataset.X)
if n_trials < n_folds:
raise ValueError("Less Trials: {:d} than folds: {:d}".format(
n_trials, n_folds
))
shuffle = rng is not None
folds = get_balanced_batches(
n_trials, rng, shuffle, n_batches=n_folds)
test_inds = folds[i_test_fold]
valid_inds = folds[i_test_fold - 1]
all_inds = list(range(n_trials))
train_inds = np.setdiff1d(all_inds, np.union1d(test_inds, valid_inds))
assert np.intersect1d(train_inds, valid_inds).size == 0
assert np.intersect1d(train_inds, test_inds).size == 0
assert np.intersect1d(valid_inds, test_inds).size == 0
assert np.array_equal(np.sort(
np.union1d(train_inds, np.union1d(valid_inds, test_inds))),
all_inds)
train_set = select_examples(dataset, train_inds)
valid_set = select_examples(dataset, valid_inds)
test_set = select_examples(dataset, test_inds)
return train_set, valid_set, test_set
def split_into_train_test(dataset, n_folds, i_test_fold, rng=None):
"""
Split datasets into folds, select one test fold and merge rest as train fold.
Parameters
----------
dataset: :class:`.SignalAndTarget`
n_folds: int
Number of folds to split dataset into.
i_test_fold: int
Index of the test fold (0-based)
rng: `numpy.random.RandomState`, optional
Random Generator for shuffling, None means no shuffling
Returns
-------
reduced_set: :class:`.SignalAndTarget`
Dataset with only examples selected.
"""
n_trials = len(dataset.X)
if n_trials < n_folds:
raise ValueError("Less Trials: {:d} than folds: {:d}".format(
n_trials, n_folds
))
shuffle = rng is not None
folds = get_balanced_batches(n_trials, rng, shuffle,
n_batches=n_folds)
test_inds = folds[i_test_fold]
all_inds = list(range(len(n_trials)))
train_inds = np.setdiff1d(all_inds, test_inds)
assert np.intersect1d(train_inds, test_inds).size == 0
assert np.array_equal(np.sort(np.union1d(train_inds, test_inds)),
all_inds)
train_set = select_examples(dataset, train_inds)
test_set = select_examples(dataset, test_inds)
return train_set, test_set
| [
"robintibor@gmail.com"
] | robintibor@gmail.com |
a259289c6a6b232a8abc5b6e5b43ede75f107410 | 60715c9ea4c66d861708531def532814eab781fd | /python-programming-workshop/pythondatastructures/dateandtime/time_yesterday.py | e220240661773649db71af463af4edfd0954a20a | [] | no_license | bala4rtraining/python_programming | 6ce64d035ef04486f5dc9572cb0975dd322fcb3e | 99a5e6cf38448f5a01b310d5f7fa95493139b631 | refs/heads/master | 2023-09-03T00:10:26.272124 | 2021-11-01T08:20:52 | 2021-11-01T08:20:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py |
#Python program that returns yesterday
from datetime import date
from datetime import timedelta
def yesterday():
# Get today.
today = date.today()
# Subtract timedelta of 1 day.
yesterday = today - timedelta(days=1)
return yesterday
print(date.today())
print(yesterday())
| [
"karthikkannan@gmail.com"
] | karthikkannan@gmail.com |
01a2714dc644c9a68e8435d2bc8bbb20cdd487fb | c0c8aeb5aaf08925d8c9e1d660b02c89cbc7ad71 | /Algorithms/Medium/55. Jump Game/answer.py | 034d33909be49fa36615b2eb76dd8385990d05ed | [
"Apache-2.0"
] | permissive | kenwoov/PlayLeetCode | b2fdc43d799c37683a9efdc31c4df159cf553bf5 | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | refs/heads/master | 2022-12-17T05:54:22.775972 | 2020-09-26T14:08:43 | 2020-09-26T14:08:43 | 214,839,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | from typing import List
class Solution:
def canJump(self, nums: List[int]) -> bool:
N = len(nums)
lastPosition = N - 1
for i in range(N-1, -1, -1):
if i + nums[i] >= lastPosition:
lastPosition = i
return lastPosition == 0
if __name__ == "__main__":
s = Solution()
result = s.canJump([3, 2, 1, 0, 4])
print(result)
| [
"kenwoov@outlook.com"
] | kenwoov@outlook.com |
d03ff966642e5d1c4511a4ac1e8024d75bf5f2e2 | 3ca67d69abd4e74b7145b340cdda65532f90053b | /LeetCode/Two_Pointers/Valid Palindrome/6047198844.py | b5bb7a7d4d16f6b7caa8c0ae6180b37f36c7b1bb | [] | no_license | DKU-STUDY/Algorithm | 19549516984b52a1c5cd73e1ed1e58f774d6d30e | 6f78efdbefd8eedab24e43d74c7dae7f95c2893b | refs/heads/master | 2023-02-18T06:48:39.309641 | 2023-02-09T07:16:14 | 2023-02-09T07:16:14 | 258,455,710 | 175 | 49 | null | 2023-02-09T07:16:16 | 2020-04-24T08:42:27 | Python | UTF-8 | Python | false | false | 145 | py | import re
class Solution:
def isPalindrome(self, s: str) -> bool:
s = re.sub('[^a-z0-9A-Z]','',s).lower()
return s == s[::-1] | [
"2615240@gmail.com"
] | 2615240@gmail.com |
5d22821c7019c51b77ad3f13c64f5f67fa04579d | 75f0e04c6330950a9bd225fd8b62fdc9fb0021b8 | /103.binaryTreeZigzagLevelOrderTraversal.py | 3511ea332bce45326491b783728bd51dc0aa9c57 | [] | no_license | Minzhe/LeetCode | f07bc1edce77cee778d7dc3c4a379921747637a5 | e5cb0212cb83daac829456c14aec557e26eea68c | refs/heads/master | 2020-05-04T11:25:07.084483 | 2020-03-09T02:10:17 | 2020-03-09T02:10:17 | 179,107,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if root is None: return []
ans, order = [], 1
level = [root]
while level:
if order == 1:
ans.append([node.val for node in level])
elif order == -1:
ans.append([node.val for node in reversed(level)])
level = self.getnextlevel(level)
order *= -1
return ans
def getnextlevel(self, level):
nextlevel = []
for node in level:
if node.left:
nextlevel.append(node.left)
if node.right:
nextlevel.append(node.right)
return nextlevel | [
"zenroute.mzhang@gmail.com"
] | zenroute.mzhang@gmail.com |
f1c240360245fffab7d3d52118d4c4bc8ff0e397 | 33890b0061d0701b32bb7d78776485c3afb9f67e | /pyexe/cpumon.py | 93ce008962966b4d1ff7aff3145f366a9cc1de24 | [] | no_license | songzg/winterpy | 677c7ec77a6923ba89d5f0b98c01d30e981336ae | 05f8b3eb43588c452a0f76f4b6e04ee37ca0afb3 | refs/heads/master | 2020-05-29T11:46:26.768980 | 2016-05-19T04:08:02 | 2016-05-19T04:08:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | #!/usr/bin/env python3
# vim:fileencoding=utf-8
'''
监视CPU的使用,过高时自动执行命令
2010年7月17日
'''
cmd = 'echo ================== >> ~/tmpfs/cpumon && top -n 1 -b | awk \'{if($4 != 0) print}\' >> ~/tmpfs/cpumon'
import os
import time
def getCPUUsage():
cpu_before = open('/proc/stat').readline().split()[1:]
time.sleep(1)
cpu_after = open('/proc/stat').readline().split()[1:]
cpu = list(map(lambda x, y: int(y)-int(x), cpu_before, cpu_after))
# print(cpu_before, cpu_after, sep='\n')
# print(cpu, sum(cpu))
return 1 - cpu[3] / sum(cpu)
def monitor(cmd=cmd, threshold=0.9):
while True:
usage = getCPUUsage()
print('CPU Usage: %.2f' % usage)
if usage > threshold:
os.system(cmd)
if __name__ == '__main__':
try:
monitor(threshold=.5)
except KeyboardInterrupt:
print('退出')
| [
"lilydjwg@gmail.com"
] | lilydjwg@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.