hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1083a2affb9a63631077241caffe2b17bde2cca | 5,172 | py | Python | visualise.py | ksang/fiery | b41e0138e388d9b846f174c09d60539b5b226f2d | [
"MIT"
] | null | null | null | visualise.py | ksang/fiery | b41e0138e388d9b846f174c09d60539b5b226f2d | [
"MIT"
] | null | null | null | visualise.py | ksang/fiery | b41e0138e388d9b846f174c09d60539b5b226f2d | [
"MIT"
] | null | null | null | import os
from argparse import ArgumentParser
from glob import glob
import cv2
import numpy as np
import torch
import torchvision
import matplotlib as mpl
import matplotlib.pyplot as plt
from PIL import Image
from fiery.trainer import TrainingModule
from fiery.utils.network import NormalizeInverse
from fiery.utils.instance import predict_instance_segmentation_and_trajectories
from fiery.utils.visualisation import plot_instance_map, generate_instance_colours, make_contour, convert_figure_numpy
EXAMPLE_DATA_PATH = 'example_data'
if __name__ == '__main__':
parser = ArgumentParser(description='Fiery visualisation')
parser.add_argument('--checkpoint', default='./fiery.ckpt', type=str, help='path to checkpoint')
args = parser.parse_args()
visualise(args.checkpoint)
| 38.029412 | 118 | 0.672467 |
b10a7ba9df13f93730fafc42256936a0555a720d | 7,034 | py | Python | autoelective/util.py | apomeloYM/PKUAutoElective | 21b4ab000919f68080e7a942ddff4ca070cf41e7 | [
"MIT"
] | null | null | null | autoelective/util.py | apomeloYM/PKUAutoElective | 21b4ab000919f68080e7a942ddff4ca070cf41e7 | [
"MIT"
] | null | null | null | autoelective/util.py | apomeloYM/PKUAutoElective | 21b4ab000919f68080e7a942ddff4ca070cf41e7 | [
"MIT"
] | 2 | 2020-02-07T04:02:14.000Z | 2020-02-16T23:34:16.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: util.py
import os
import csv
from copy import deepcopy
import hashlib
from functools import wraps
from collections import OrderedDict
from ._compat import json, JSONDecodeError
from .exceptions import NoInstanceError, ImmutableTypeError, ReadonlyPropertyError
__Util_Funcs__ = ["mkdir","json_load","json_dump","read_csv","to_bytes","to_utf8","MD5","SHA1",]
__Util_Class__ = ["ImmutableAttrsMixin",]
__Util_Decorator__ = ["singleton","noinstance","ReadonlyProperty",]
__Util_MetaClass__ = ["Singleton","NoInstance",]
__all__ = __Util_Funcs__ + __Util_Class__ + __Util_Decorator__ + __Util_MetaClass__
def noinstance(cls):
""" """
return wrapper
def singleton(cls):
""" """
_inst = {}
return get_inst
def _is_readonly(obj, key):
raise ReadonlyPropertyError("'%s.%s' property is read-only" % (obj.__class__.__name__, key))
| 27.802372 | 111 | 0.633068 |
b10b445b6f929ecc345e5226229e53a873023020 | 1,827 | py | Python | reference_data/uk_biobank_v3/1_extract_ukb_variables.py | thehyve/genetics-backend | 81d09bf5c70c534a59940eddfcd9c8566d2b2ec1 | [
"Apache-2.0"
] | 6 | 2019-06-01T11:17:41.000Z | 2021-09-24T14:06:30.000Z | reference_data/uk_biobank_v3/1_extract_ukb_variables.py | opentargets/genetics-backend | 1ab0314f9fe4b267f8ffb5ed94187d55fbb3431c | [
"Apache-2.0"
] | 7 | 2018-11-28T10:06:21.000Z | 2020-01-26T18:55:39.000Z | reference_data/uk_biobank_v3/1_extract_ukb_variables.py | thehyve/genetics-backend | 81d09bf5c70c534a59940eddfcd9c8566d2b2ec1 | [
"Apache-2.0"
] | 4 | 2019-05-09T13:57:57.000Z | 2021-08-03T18:19:16.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Ed Mountjoy
#
"""
bsub -q small -J interactive -n 1 -R "select[mem>8000] rusage[mem=8000] span[hosts=1]" -M8000 -Is bash
"""
import gzip
import sys
if __name__ == '__main__':
main()
| 31.5 | 102 | 0.604269 |
b10d15ba52e0f2579184cc4a6747cccecf9ad61c | 6,088 | py | Python | main.py | Staubtornado/juliandc | 47e41f9e10088f94af44dcfab00073b788121777 | [
"MIT"
] | null | null | null | main.py | Staubtornado/juliandc | 47e41f9e10088f94af44dcfab00073b788121777 | [
"MIT"
] | null | null | null | main.py | Staubtornado/juliandc | 47e41f9e10088f94af44dcfab00073b788121777 | [
"MIT"
] | null | null | null | import asyncio
import discord
from discord.ext import commands, tasks
import os
import random
import dotenv
import difflib
import configparser
###
version = '4.0.0'
###
bot = commands.Bot(command_prefix = '!', owner_id = 272446903940153345, intents = discord.Intents.all())
bot.remove_command('help')
config = configparser.ConfigParser()
config.read('settings.cfg')
dotenv.load_dotenv()
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
bot.load_extension(f'cogs.{filename[:-3]}')
presence = [f'{version} Released', 'Belle Delphine <3', 'Fortnite is gay', 'Bugs are Features', 'By Staubtornado', 'Hentai']
status_change.start()
CommandOnCooldown_check = []
CommandNotFound_check = []
Else_check = []
bot.run(os.environ['DISCORD_TOKEN']) | 41.135135 | 360 | 0.587714 |
b10e5e4bf82f717f9759daccbbc32309f685a6ee | 565 | py | Python | lib/utils.py | MusaTamzid05/simple_similar_image_lib | 3882cc3d6c3d8d61f67c71fcbe5a3cbad4e10697 | [
"MIT"
] | null | null | null | lib/utils.py | MusaTamzid05/simple_similar_image_lib | 3882cc3d6c3d8d61f67c71fcbe5a3cbad4e10697 | [
"MIT"
] | null | null | null | lib/utils.py | MusaTamzid05/simple_similar_image_lib | 3882cc3d6c3d8d61f67c71fcbe5a3cbad4e10697 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
| 28.25 | 81 | 0.612389 |
b10e8fe5318c13af9359ac8f09fb570418b7c0b2 | 2,226 | py | Python | dataloaders/voc.py | psui3905/CCT | 637cbac130b39f02733339c79cdf1d531e339e9c | [
"MIT"
] | 308 | 2020-06-09T13:37:17.000Z | 2022-03-24T07:43:33.000Z | dataloaders/voc.py | lesvay/CCT | cf98ea7e6aefa7091e6c375a9025ba1e0f6e53ca | [
"MIT"
] | 55 | 2020-06-16T11:57:54.000Z | 2022-03-09T12:04:58.000Z | dataloaders/voc.py | lesvay/CCT | cf98ea7e6aefa7091e6c375a9025ba1e0f6e53ca | [
"MIT"
] | 51 | 2020-06-08T02:42:14.000Z | 2022-02-25T16:38:36.000Z | from base import BaseDataSet, BaseDataLoader
from utils import pallete
import numpy as np
import os
import scipy
import torch
from PIL import Image
import cv2
from torch.utils.data import Dataset
from torchvision import transforms
import json
| 36.491803 | 114 | 0.637017 |
b10ed1a87457d0709ae65d88b218cf1992004525 | 16,418 | py | Python | FWCore/Integration/test/ThinningTest1_cfg.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | FWCore/Integration/test/ThinningTest1_cfg.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | FWCore/Integration/test/ThinningTest1_cfg.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | # This process is the first step of a test that involves multiple
# processing steps. It tests the thinning collections and
# redirecting Refs, Ptrs, and RefToBases.
#
# Produce 15 thinned collections
#
# Collection A contains Things 0-8
# Collection B contains Things 0-3 and made from collection A
# Collection C contains Things 4-7 and made from collection A
#
# x Collection D contains Things 10-18
# Collection E contains Things 10-14 and made from collection D
# Collection F contains Things 14-17 and made from collection D
#
# Collection G contains Things 20-28
# x Collection H contains Things 20-23 and made from collection G
# x Collection I contains Things 24-27 and made from collection G
#
# x Collection J contains Things 30-38
# x Collection K contains Things 30-33 and made from collection J
# x Collection L contains Things 34-37 and made from collection J
#
# x Collection M contains Things 40-48
# x Collection N contains Things 40-43 and made from collection M
# Collection O contains Things 44-47 and made from collection M
#
# The collections marked with an x will get deleted in the next
# processing step.
#
# The Things kept are set by creating TracksOfThings which
# reference them and using those in the selection of a
# Thinning Producer.
#
# The ThinningTestAnalyzer checks that things are working as
# they are supposed to work.
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.options = cms.untracked.PSet(
numberOfStreams = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(3)
)
process.source = cms.Source("EmptySource")
process.WhatsItESProducer = cms.ESProducer("WhatsItESProducer")
process.DoodadESSource = cms.ESSource("DoodadESSource")
process.thingProducer = cms.EDProducer("ThingProducer",
offsetDelta = cms.int32(100),
nThings = cms.int32(50)
)
process.thingProducer2 = cms.EDProducer("ThingProducer",
offsetDelta = cms.int32(100),
nThings = cms.int32(50)
)
process.thingProducer2alias = cms.EDAlias(
thingProducer2 = cms.VPSet(
cms.PSet(type = cms.string('edmtestThings'))
)
)
process.trackOfThingsProducerA = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(0, 1, 2, 3, 4, 5, 6, 7, 8)
)
process.trackOfThingsProducerB = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(0, 1, 2, 3)
)
process.trackOfThingsProducerC = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(4, 5, 6, 7)
)
process.trackOfThingsProducerD = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14, 15, 16, 17, 18)
)
process.trackOfThingsProducerDPlus = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14, 15, 16, 17, 18, 21)
)
process.trackOfThingsProducerE = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14)
)
process.trackOfThingsProducerF = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(14, 15, 16, 17)
)
process.trackOfThingsProducerG = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(20, 21, 22, 23, 24, 25, 26, 27, 28)
)
process.trackOfThingsProducerH = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(20, 21, 22, 23)
)
process.trackOfThingsProducerI = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(24, 25, 26, 27)
)
process.trackOfThingsProducerJ = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(30, 31, 32, 33, 34, 35, 36, 37, 38)
)
process.trackOfThingsProducerK = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(30, 31, 32, 33)
)
process.trackOfThingsProducerL = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(34, 35, 36, 37)
)
process.trackOfThingsProducerM = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(40, 41, 42, 43, 44, 45, 46, 47, 48)
)
process.trackOfThingsProducerN = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(40, 41, 42, 43)
)
process.trackOfThingsProducerO = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer'),
keysToReference = cms.vuint32(44, 45, 46, 47)
)
process.trackOfThingsProducerD2 = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer2'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14, 15, 16, 17, 18)
)
process.trackOfThingsProducerE2 = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer2'),
keysToReference = cms.vuint32(10, 11, 12, 13, 14)
)
process.trackOfThingsProducerF2 = cms.EDProducer("TrackOfThingsProducer",
inputTag = cms.InputTag('thingProducer2'),
keysToReference = cms.vuint32(14, 15, 16, 17)
)
process.thinningThingProducerA = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerA'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerB = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerA'),
trackTag = cms.InputTag('trackOfThingsProducerB'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerC = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerA'),
trackTag = cms.InputTag('trackOfThingsProducerC'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerD = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerD'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerE = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerD'),
trackTag = cms.InputTag('trackOfThingsProducerE'),
offsetToThinnedKey = cms.uint32(10),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerF = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerD'),
trackTag = cms.InputTag('trackOfThingsProducerF'),
offsetToThinnedKey = cms.uint32(10),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerG = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerG'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerH = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerG'),
trackTag = cms.InputTag('trackOfThingsProducerH'),
offsetToThinnedKey = cms.uint32(20),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerI = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerG'),
trackTag = cms.InputTag('trackOfThingsProducerI'),
offsetToThinnedKey = cms.uint32(20),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerJ = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerJ'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.thinningThingProducerK = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerJ'),
trackTag = cms.InputTag('trackOfThingsProducerK'),
offsetToThinnedKey = cms.uint32(30),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerL = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerJ'),
trackTag = cms.InputTag('trackOfThingsProducerL'),
offsetToThinnedKey = cms.uint32(30),
expectedCollectionSize = cms.uint32(9)
)
process.thinningThingProducerM = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thingProducer'),
trackTag = cms.InputTag('trackOfThingsProducerM'),
offsetToThinnedKey = cms.uint32(0),
expectedCollectionSize = cms.uint32(50)
)
process.aliasM = cms.EDAlias(
thinningThingProducerM = cms.VPSet(
cms.PSet(type = cms.string('edmtestThings')),
# the next one should get ignored
cms.PSet(type = cms.string('edmThinnedAssociation'))
)
)
process.thinningThingProducerN = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('thinningThingProducerM'),
trackTag = cms.InputTag('trackOfThingsProducerN'),
offsetToThinnedKey = cms.uint32(40),
expectedCollectionSize = cms.uint32(9)
)
process.aliasN = cms.EDAlias(
thinningThingProducerN = cms.VPSet(
cms.PSet(type = cms.string('edmtestThings')),
# the next one should get ignored
cms.PSet(type = cms.string('edmThinnedAssociation'))
)
)
process.thinningThingProducerO = cms.EDProducer("ThinningThingProducer",
inputTag = cms.InputTag('aliasM'),
trackTag = cms.InputTag('trackOfThingsProducerO'),
offsetToThinnedKey = cms.uint32(40),
expectedCollectionSize = cms.uint32(9)
)
process.aliasO = cms.EDAlias(
thinningThingProducerO = cms.VPSet(
cms.PSet(type = cms.string('edmtestThings')),
# the next one should get ignored
cms.PSet(type = cms.string('edmThinnedAssociation'))
)
)
process.testA = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thingProducer'),
thinnedTag = cms.InputTag('thinningThingProducerA'),
associationTag = cms.InputTag('thinningThingProducerA'),
trackTag = cms.InputTag('trackOfThingsProducerA'),
expectedParentContent = cms.vint32( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49
),
expectedThinnedContent = cms.vint32(0, 1, 2, 3, 4, 5, 6, 7, 8),
expectedIndexesIntoParent = cms.vuint32(0, 1, 2, 3, 4, 5, 6, 7, 8),
expectedValues = cms.vint32(0, 1, 2, 3, 4, 5, 6, 7, 8)
)
process.testB = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thinningThingProducerA'),
thinnedTag = cms.InputTag('thinningThingProducerB'),
associationTag = cms.InputTag('thinningThingProducerB'),
trackTag = cms.InputTag('trackOfThingsProducerB'),
expectedParentContent = cms.vint32( 0, 1, 2, 3, 4, 5, 6, 7, 8),
expectedThinnedContent = cms.vint32(0, 1, 2, 3),
expectedIndexesIntoParent = cms.vuint32(0, 1, 2, 3),
expectedValues = cms.vint32(0, 1, 2, 3)
)
process.testC = cms.EDAnalyzer("ThinningTestAnalyzer",
parentTag = cms.InputTag('thinningThingProducerA'),
thinnedTag = cms.InputTag('thinningThingProducerC'),
associationTag = cms.InputTag('thinningThingProducerC'),
trackTag = cms.InputTag('trackOfThingsProducerC'),
expectedParentContent = cms.vint32( 0, 1, 2, 3, 4, 5, 6, 7, 8),
expectedThinnedContent = cms.vint32(4, 5, 6, 7),
expectedIndexesIntoParent = cms.vuint32(4, 5, 6, 7),
expectedValues = cms.vint32(4, 5, 6, 7)
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testThinningTest1.root'),
outputCommands = cms.untracked.vstring(
'keep *',
'drop *_thingProducer2_*_*',
'drop *_thinningThingProducerM_*_*',
'drop *_thinningThingProducerN_*_*',
'drop *_thinningThingProducerO_*_*'
)
)
process.out2 = cms.OutputModule("EventStreamFileWriter",
fileName = cms.untracked.string('testThinningStreamerout.dat'),
compression_level = cms.untracked.int32(1),
use_compression = cms.untracked.bool(True),
max_event_size = cms.untracked.int32(7000000),
outputCommands = cms.untracked.vstring(
'keep *',
'drop *_thingProducer_*_*',
'drop *_thingProducer2_*_*',
'drop *_thinningThingProducerD_*_*',
'drop *_thinningThingProducerH_*_*',
'drop *_thinningThingProducerI_*_*',
'drop *_thinningThingProducerJ_*_*',
'drop *_thinningThingProducerK_*_*',
'drop *_thinningThingProducerL_*_*',
'drop *_thinningThingProducerM_*_*',
'drop *_thinningThingProducerN_*_*',
'drop *_thinningThingProducerO_*_*',
'drop *_aliasM_*_*',
'drop *_aliasN_*_*'
)
)
process.p = cms.Path(process.thingProducer * process.thingProducer2
* process.trackOfThingsProducerA
* process.trackOfThingsProducerB
* process.trackOfThingsProducerC
* process.trackOfThingsProducerD
* process.trackOfThingsProducerDPlus
* process.trackOfThingsProducerE
* process.trackOfThingsProducerF
* process.trackOfThingsProducerG
* process.trackOfThingsProducerH
* process.trackOfThingsProducerI
* process.trackOfThingsProducerJ
* process.trackOfThingsProducerK
* process.trackOfThingsProducerL
* process.trackOfThingsProducerM
* process.trackOfThingsProducerN
* process.trackOfThingsProducerO
* process.trackOfThingsProducerD2
* process.trackOfThingsProducerE2
* process.trackOfThingsProducerF2
* process.thinningThingProducerA
* process.thinningThingProducerB
* process.thinningThingProducerC
* process.thinningThingProducerD
* process.thinningThingProducerE
* process.thinningThingProducerF
* process.thinningThingProducerG
* process.thinningThingProducerH
* process.thinningThingProducerI
* process.thinningThingProducerJ
* process.thinningThingProducerK
* process.thinningThingProducerL
* process.thinningThingProducerM
* process.thinningThingProducerN
* process.thinningThingProducerO
* process.testA
* process.testB
* process.testC
)
process.endPath = cms.EndPath(process.out * process.out2)
| 40.339066 | 79 | 0.652394 |
b10ef155b141d1ff49de7abd5e3a562536e9e728 | 771 | py | Python | tests/Bio/test_tandem.py | iwasakishuto/Keras-Imitation | 8ac0cd7c8912d49d13b19a0182ad534c0781fbfe | [
"MIT"
] | 4 | 2020-04-25T08:50:36.000Z | 2020-04-26T04:49:16.000Z | tests/Bio/test_tandem.py | iwasakishuto/Keras-Imitation | 8ac0cd7c8912d49d13b19a0182ad534c0781fbfe | [
"MIT"
] | null | null | null | tests/Bio/test_tandem.py | iwasakishuto/Keras-Imitation | 8ac0cd7c8912d49d13b19a0182ad534c0781fbfe | [
"MIT"
] | null | null | null | # coding: utf-8
from kerasy.Bio.tandem import find_tandem
from kerasy.utils import generateSeq
len_sequences = 1000
| 29.653846 | 91 | 0.660182 |
b10f2700bf5dd4688d783eebd9aacb68abc85ac5 | 679 | py | Python | NEW_PRAC/HackerRank/Python/SetDifferenceString.py | side-projects-42/INTERVIEW-PREP-COMPLETE | 627a3315cee4bbc38a0e81c256f27f928eac2d63 | [
"MIT"
] | 13 | 2021-03-11T00:25:22.000Z | 2022-03-19T00:19:23.000Z | NEW_PRAC/HackerRank/Python/SetDifferenceString.py | side-projects-42/INTERVIEW-PREP-COMPLETE | 627a3315cee4bbc38a0e81c256f27f928eac2d63 | [
"MIT"
] | 160 | 2021-04-26T19:04:15.000Z | 2022-03-26T20:18:37.000Z | NEW_PRAC/HackerRank/Python/SetDifferenceString.py | side-projects-42/INTERVIEW-PREP-COMPLETE | 627a3315cee4bbc38a0e81c256f27f928eac2d63 | [
"MIT"
] | 12 | 2021-04-26T19:43:01.000Z | 2022-01-31T08:36:29.000Z | # >>> s = set("Hacker")
# >>> print s.difference("Rank")
# set(['c', 'r', 'e', 'H'])
# >>> print s.difference(set(['R', 'a', 'n', 'k']))
# set(['c', 'r', 'e', 'H'])
# >>> print s.difference(['R', 'a', 'n', 'k'])
# set(['c', 'r', 'e', 'H'])
# >>> print s.difference(enumerate(['R', 'a', 'n', 'k']))
# set(['a', 'c', 'r', 'e', 'H', 'k'])
# >>> print s.difference({"Rank":1})
# set(['a', 'c', 'e', 'H', 'k', 'r'])
# >>> s - set("Rank")
# set(['H', 'c', 'r', 'e'])
if __name__ == "__main__":
eng = input()
eng_stu = set(map(int, input().split()))
fre = input()
fre_stu = set(map(int, input().split()))
eng_only = eng_stu - fre_stu
print(len(eng_only))
| 24.25 | 57 | 0.443299 |
b10ff91b57739eb21f6eb6d10c2777a5221bc00d | 4,898 | py | Python | src/dotacrunch/drawer.py | tpinetz/dotacrunch | 9f53404ac3556e14bdc3e159f36d34e39c747898 | [
"MIT"
] | 1 | 2019-09-20T04:03:13.000Z | 2019-09-20T04:03:13.000Z | src/dotacrunch/drawer.py | tpinetz/dotacrunch | 9f53404ac3556e14bdc3e159f36d34e39c747898 | [
"MIT"
] | null | null | null | src/dotacrunch/drawer.py | tpinetz/dotacrunch | 9f53404ac3556e14bdc3e159f36d34e39c747898 | [
"MIT"
] | null | null | null | from PIL import Image, ImageDraw
from numpy import array, random, vstack, ones, linalg
from const import TOWERS
from copy import deepcopy
from os import path
| 42.964912 | 136 | 0.600857 |
b112b2802063ecfa7ce3db6c16ab4326c7eda2fb | 1,746 | py | Python | nsm.py | svepe/neural-stack | c48e6b94f00e77cedd9d692bdc2a6715bb007db5 | [
"MIT"
] | null | null | null | nsm.py | svepe/neural-stack | c48e6b94f00e77cedd9d692bdc2a6715bb007db5 | [
"MIT"
] | 1 | 2017-07-26T07:18:42.000Z | 2017-07-26T07:18:42.000Z | nsm.py | svepe/neural-stack | c48e6b94f00e77cedd9d692bdc2a6715bb007db5 | [
"MIT"
] | null | null | null | import numpy as np
import chainer.functions as F
from chainer import Variable
batch_size = 3
stack_element_size = 2
V = Variable(np.zeros((batch_size, 1, stack_element_size)))
s = Variable(np.zeros((batch_size, 1)))
d = Variable(np.ones((batch_size, 1)) * 0.4)
u = Variable(np.ones((batch_size, 1)) * 0.)
v = Variable(np.ones((batch_size, stack_element_size)))
V, s, r = neural_stack(V, s, d, u, v)
d = Variable(np.ones((batch_size, 1)) * 0.8)
u = Variable(np.ones((batch_size, 1)) * 0.)
v = Variable(np.ones((batch_size, stack_element_size)) * 2.)
V, s, r = neural_stack(V, s, d, u, v)
d = Variable(np.ones((batch_size, 1)) * 0.9)
u = Variable(np.ones((batch_size, 1)) * 0.9)
v = Variable(np.ones((batch_size, stack_element_size)) * 3.)
V, s, r = neural_stack(V, s, d, u, v)
d = Variable(np.ones((batch_size, 1)) * 0.1)
u = Variable(np.ones((batch_size, 1)) * 0.1)
v = Variable(np.ones((batch_size, stack_element_size)) * 3.)
V, s, r = neural_stack(V, s, d, u, v)
print V.data
print s.data
print r.data
| 29.59322 | 80 | 0.613402 |
b11347dca32d00ada08a415a09ab2e6c4431c76c | 2,354 | py | Python | chaos_genius/celery_config.py | eltociear/chaos_genius | eb3bc27181c8af4144b95e685386814109173164 | [
"MIT"
] | 1 | 2022-02-25T16:11:34.000Z | 2022-02-25T16:11:34.000Z | chaos_genius/celery_config.py | eltociear/chaos_genius | eb3bc27181c8af4144b95e685386814109173164 | [
"MIT"
] | null | null | null | chaos_genius/celery_config.py | eltociear/chaos_genius | eb3bc27181c8af4144b95e685386814109173164 | [
"MIT"
] | null | null | null | from datetime import timedelta
from celery.schedules import crontab, schedule
CELERY_IMPORTS = ("chaos_genius.jobs")
CELERY_TASK_RESULT_EXPIRES = 30
CELERY_TIMEZONE = "UTC"
CELERY_ACCEPT_CONTENT = ["json", "msgpack", "yaml"]
CELERY_TASK_SERIALIZER = "json"
CELERY_RESULT_SERIALIZER = "json"
CELERYBEAT_SCHEDULE = {
"anomaly-scheduler": {
"task": "chaos_genius.jobs.anomaly_tasks.anomaly_scheduler",
"schedule": schedule(timedelta(minutes=10)),
"args": ()
},
'alerts-daily': {
'task': 'chaos_genius.jobs.alert_tasks.check_event_alerts',
'schedule': crontab(hour="3", minute="0"), # Daily: at 3am
'args': ('daily',)
},
"alert-digest-daily-scheduler": {
"task": "chaos_genius.jobs.alert_tasks.alert_digest_daily_scheduler",
"schedule": schedule(timedelta(minutes=10)),
"args": ()
},
# 'anomaly-task-every-minute': {
# 'task': 'chaos_genius.jobs.anomaly_tasks.add_together',
# 'schedule': crontab(minute="*"), # Every minutes
# 'args': (5,10,)
# },
# "anomaly-tasks-all-kpis": {
# "task": "chaos_genius.jobs.anomaly_tasks.anomaly_kpi",
# # "schedule": crontab(hour=[11]),
# "schedule": schedule(timedelta(minutes=1)), # for testing
# "args": ()
# },
# 'alerts-weekly': {
# 'task': 'chaos_genius.jobs.alert_tasks.check_event_alerts',
# 'schedule': crontab(day_of_week="0"), # Weekly: every sunday
# 'args': ('weekly',)
# },
# 'alerts-hourly': {
# 'task': 'chaos_genius.jobs.alert_tasks.check_event_alerts',
# 'schedule': crontab(hour="*"), # Hourly: at 0th minute
# 'args': ('hourly',)
# },
# 'alerts-every-15-minute': {
# 'task': 'chaos_genius.jobs.alert_tasks.check_event_alerts',
# 'schedule': crontab(minute="*/15"), # Every 15 minutes
# 'args': ('every_15_minute',)
# }
}
CELERY_ROUTES = {
"chaos_genius.jobs.anomaly_tasks.*": {"queue": "anomaly-rca"},
"chaos_genius.jobs.alert_tasks.*": {"queue": "alerts"},
}
# Scheduler runs every hour
# looks at tasks in last n hour
# if they are in processing in 24 hours, schedule them right away
# job expiry window
# add details of job into a table, then schedule it
# TODO: Use this for config
| 32.694444 | 77 | 0.619371 |
b114d5a538b75c9a4b75747db2d55272076b7fcc | 232 | py | Python | oldcontrib/media/image/servee_registry.py | servee/django-servee-oldcontrib | 836447ebbd53db0b53879a35468c02e57f65105f | [
"BSD-Source-Code"
] | null | null | null | oldcontrib/media/image/servee_registry.py | servee/django-servee-oldcontrib | 836447ebbd53db0b53879a35468c02e57f65105f | [
"BSD-Source-Code"
] | null | null | null | oldcontrib/media/image/servee_registry.py | servee/django-servee-oldcontrib | 836447ebbd53db0b53879a35468c02e57f65105f | [
"BSD-Source-Code"
] | null | null | null | from servee import frontendadmin
from servee.frontendadmin.insert import ModelInsert
from oldcontrib.media.image.models import Image
frontendadmin.site.register_insert(ImageInsert) | 29 | 51 | 0.844828 |
b114f3af35aa6791557a994b86492206a441c7e5 | 974 | py | Python | run.py | Lohitapallanti/Predicting-Titanic-Survive | 681e513ec0abfb66797c827139d4e6d99c6b22bf | [
"Apache-2.0"
] | null | null | null | run.py | Lohitapallanti/Predicting-Titanic-Survive | 681e513ec0abfb66797c827139d4e6d99c6b22bf | [
"Apache-2.0"
] | null | null | null | run.py | Lohitapallanti/Predicting-Titanic-Survive | 681e513ec0abfb66797c827139d4e6d99c6b22bf | [
"Apache-2.0"
] | null | null | null | from train import train
from processing import Processing
""" The Main run file, where the program execution and controller is based. """
object = Run()
object.final_function()
| 37.461538 | 131 | 0.661191 |
b1155590dddadba4928d8c63159a637854f7865e | 2,646 | py | Python | scripts/pretty-printers/gdb/install.py | tobireinhard/cbmc | fc165c119985adf8db9a13493f272a2def4e79fa | [
"BSD-4-Clause"
] | 412 | 2016-04-02T01:14:27.000Z | 2022-03-27T09:24:09.000Z | scripts/pretty-printers/gdb/install.py | tobireinhard/cbmc | fc165c119985adf8db9a13493f272a2def4e79fa | [
"BSD-4-Clause"
] | 4,671 | 2016-02-25T13:52:16.000Z | 2022-03-31T22:14:46.000Z | scripts/pretty-printers/gdb/install.py | tobireinhard/cbmc | fc165c119985adf8db9a13493f272a2def4e79fa | [
"BSD-4-Clause"
] | 266 | 2016-02-23T12:48:00.000Z | 2022-03-22T18:15:51.000Z | #!/usr/bin/env python3
import os
from shutil import copyfile
def create_gdbinit_file():
"""
Create and insert into a .gdbinit file the python code to set-up cbmc pretty-printers.
"""
print("Attempting to enable cbmc-specific pretty-printers.")
home_folder = os.path.expanduser("~")
if not home_folder:
print(home_folder + " is an invalid home folder, can't auto-configure .gdbinit.")
return
# This is the code that should be copied if you're applying the changes by hand.
gdb_directory = os.path.dirname(os.path.abspath(__file__))
code_block_start = "cbmc_printers_folder = "
code_block = \
[
"{0}'{1}'".format(code_block_start, gdb_directory),
"if os.path.exists(cbmc_printers_folder):",
" sys.path.insert(1, cbmc_printers_folder)",
" from pretty_printers import load_cbmc_printers",
" load_cbmc_printers()",
]
gdbinit_file = os.path.join(home_folder, ".gdbinit")
lines = []
imports = { "os", "sys" }
if os.path.exists(gdbinit_file):
with open(gdbinit_file, 'r') as file:
lines = [ line.rstrip() for line in file ]
line_no = 0
while line_no < len(lines):
if lines[line_no].startswith('import '):
imports.add(lines[line_no][len("import "):].strip())
lines.pop(line_no)
else:
if lines[line_no].startswith(code_block_start):
print(".gdbinit already contains our pretty printers, not changing it")
return
line_no += 1
while len(lines) != 0 and (lines[0] == "" or lines[0] == "python"):
lines.pop(0)
backup_file = os.path.join(home_folder, "backup.gdbinit")
if os.path.exists(backup_file):
print("backup.gdbinit file already exists. Type 'y' if you would like to overwrite it or any other key to exit.")
choice = input().lower()
if choice != 'y':
return
print("Backing up {0}".format(gdbinit_file))
copyfile(gdbinit_file, backup_file)
lines = [ "python" ] + list(map("import {}".format, sorted(imports))) + [ "", "" ] + code_block + [ "", "" ] + lines + [ "" ]
print("Adding pretty-print commands to {0}.".format(gdbinit_file))
try:
with open(gdbinit_file, 'w+') as file:
file.write('\n'.join(lines))
print("Commands added.")
except:
print("Exception occured writing to file. Please apply changes manually.")
if __name__ == "__main__":
create_gdbinit_file()
| 37.267606 | 129 | 0.588435 |
b1156db34bf35cdfc3d30e9b0d6bdddd6d15330a | 5,613 | py | Python | pyutils/solve.py | eltrompetero/maxent_fim | b5e8942a20aad67e4055c506248605df50ab082d | [
"MIT"
] | null | null | null | pyutils/solve.py | eltrompetero/maxent_fim | b5e8942a20aad67e4055c506248605df50ab082d | [
"MIT"
] | null | null | null | pyutils/solve.py | eltrompetero/maxent_fim | b5e8942a20aad67e4055c506248605df50ab082d | [
"MIT"
] | null | null | null | # ====================================================================================== #
# Module for solving maxent problem on C elegans data set.
#
# Author : Eddie Lee, edlee@santafe.edu
# ====================================================================================== #
from .utils import *
from scipy.optimize import minimize
from scipy.special import logsumexp
from scipy.stats import multinomial
from scipy.interpolate import interp1d
def _indpt(X):
"""Solve independent spin model.
Parameters
----------
X : ndarray
Dimension (n_samples, n_neurons).
Returns
-------
ndarray
Solved field returned as fields for 0 state for all spins, then 1 state for all
spins, 2 state for all spins to form a total length of 3 * N.
"""
p = np.vstack([(X==i).mean(0) for i in range(3)])
p = p.T
h = np.zeros((p.shape[0], 3))
# solve each spin
for i in range(p.shape[0]):
pi = p[i]
h[i] = minimize(cost, [0,0,0])['x']
# set the third field to zero (this is our normalized representation)
h -= h[:,2][:,None]
return h.T.ravel()
#end Independent3
| 26.856459 | 94 | 0.492785 |
b1160a8726aaf21bb1cf8728387263736c4c3084 | 8,117 | py | Python | lingvo/tasks/car/ops/nms_3d_op_test.py | Singed-jj/lingvo | a2a4ac8bd835ffc2f95fc38ee3e9bc17c30fcc56 | [
"Apache-2.0"
] | null | null | null | lingvo/tasks/car/ops/nms_3d_op_test.py | Singed-jj/lingvo | a2a4ac8bd835ffc2f95fc38ee3e9bc17c30fcc56 | [
"Apache-2.0"
] | null | null | null | lingvo/tasks/car/ops/nms_3d_op_test.py | Singed-jj/lingvo | a2a4ac8bd835ffc2f95fc38ee3e9bc17c30fcc56 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import time
import unittest
from lingvo import compat as tf
from lingvo.core import test_utils
from lingvo.tasks.car import ops
import numpy as np
from six.moves import range
if __name__ == '__main__':
tf.test.main()
| 39.402913 | 83 | 0.603795 |
b1175a77d1f41faf9425e6e42edc2d9127d3fe7c | 10,773 | py | Python | BDS_2nd_downsampling.py | oxon-612/BDSR | c468061ed9e139be96d9da91c1b5419b122eeb4f | [
"MIT"
] | 1 | 2021-03-03T13:13:33.000Z | 2021-03-03T13:13:33.000Z | BDS_2nd_downsampling.py | oxon-612/BDSR | c468061ed9e139be96d9da91c1b5419b122eeb4f | [
"MIT"
] | null | null | null | BDS_2nd_downsampling.py | oxon-612/BDSR | c468061ed9e139be96d9da91c1b5419b122eeb4f | [
"MIT"
] | null | null | null | #
import numpy as np
import random
# import sys
import os
from sklearn import preprocessing
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# import math
from scipy.stats import norm
# from sklearn.neighbors import KernelDensity
# import statistics
# from scipy.stats import ks_2samp
# from scipy.stats import ttest_1samp
# from scipy.stats import ttest_ind
# from scipy.stats import chisquare
# from scipy.spatial import ConvexHull
"""
1. Get down-sampled PCD by using dimension-reduction methods: Random, PCA, FA, KernelPCA, TruncatredSVD
2. For each dimension-reduction method, get 10%, 20%, ..., 90% of original data (down-sampling) and save the down-sampled data
"""
def BDS_Downsampling(_input_data, _output_dir, _digit=38, _which_dimension_reduction = ['PCA', 'FA', 'KernelPCA', 'TruncatedSVD']):
'''
A function to conduct the best-discripancy downsampling
:param _input_data: a multi-dimensional dataset with feacture vectors and a class label vector
:param _digit: how many digits after the decimal place of constant e, by default 38
:param _which_dimension_reduction: choose one or multiple dimensionality reduction technique(s) to produce a linear transformation T and to result in an one-dimensional vector E
['PCA', 'FA', 'KernelPCA', 'TruncatedSVD']
:return: mean EPEs over k iterations of the three classifiers
'''
def get_BDS(_r, _digit):
'''
A subfunction to gerenate a best-discrepancy number with Equation 3
:param _r: an integer
:param _digit: round the best-discrepancy number to a certain number of digits
:return: a best-discrepancy number
'''
_product = _r * 2.71828182845904523536028747135266249775
_product_decimal = round(_product - int(_product), _digit)
return float(str(_product_decimal))
def get_rank(_input_list):
'''
A subfunction to get a ranking vector of a sequence
:param _input_list: a one-dimensional list
:return: a ranking vector
'''
_array = np.array(_input_list)
_temp = _array.argsort()
_ranks = np.arange(len(_array))[_temp.argsort()]
return list(_ranks)
def dimension_redu(_data, _method):
'''
A subfunction to transform a multi-dimensional dataset from the high-diemsnional space to a one-dimensional space
:param _data: a multi-dimensional dataset
:param _method: one or multiple dimensionality-reduction techniques
:return: a one-dimensional vector
'''
min_max_scaler = preprocessing.MinMaxScaler()
# print(_data[:, :-2])
z_data = min_max_scaler.fit_transform(_data)
# print(z_data)
from sklearn import decomposition
# Choose one method
if _method == 'PCA':
dim_redu_method = decomposition.PCA(n_components=1)
elif _method == 'FA':
dim_redu_method = decomposition.FactorAnalysis(n_components=1, max_iter=5000)
elif _method == 'KernelPCA':
dim_redu_method = decomposition.KernelPCA(kernel='cosine', n_components=1)
elif _method == 'TruncatedSVD':
dim_redu_method = decomposition.TruncatedSVD(1)
dimension_redu_vector = dim_redu_method.fit_transform(z_data)
z_dimension_redu_vector = np.ndarray.tolist(min_max_scaler.fit_transform(dimension_redu_vector))
return z_dimension_redu_vector
def get_temporary_data(_data, _dim_vector):
'''
A subfunction to
1) attach the one-dimensional vector E to the original dataset D;
2) assendingly sort E as E_tilde and then sort D as D_tilde
:param _data: a multi-dimensional dataset D
:param _dim_vector: the one-dimensional vector E
:return: sorted dataset D_tilde
'''
_labels = _data[:, -1]
_features = _data[:, :-1]
#_features_minmax = np.ndarray.tolist(min_max_scaler.fit_transform(_features)) # normalize feature vectors
_features_minmax = np.ndarray.tolist(_features)
for i in range(len(_data)):
_features_minmax[i].append(_labels[i])
_features_minmax[i].append(_dim_vector[i][0])
# D is sorted along E_tilde and becomes D_tilde
_conjointed_data_sorted = sorted(_features_minmax, key=lambda a_entry: a_entry[-1]) # sort the dataset by the one-dimensional vector E_tilde
# E_tilde is removed from D_tilde
for cj in _conjointed_data_sorted: # delete the one-dimensional vector E_tilde
################################################################################################
# #
# this is the one-dimensional feature #
# #
################################################################################################
# print(cj[-1])
del cj[-1]
rearranged_data = np.array(_conjointed_data_sorted)
return rearranged_data
min_max_scaler = preprocessing.MinMaxScaler()
_duplicated_data = [i for i in _input_data] # Create a copy of the input data so that the original input data won't be affected by a k-fold CV function.
_data_size = len(_duplicated_data)
# Generate a BDS with n elements using Equation 3
_BD_seqence = []
for bd in range(_data_size):
_BD_seqence.append(get_BDS(bd + 1, _digit))
print("Generate a BDS with {} elements using Equation 3".format(len(_BD_seqence)))
# Generate the BDS's ranking vector R
_BDS_ranking = list(get_rank(_BD_seqence))
print("\n")
print("Generate the ranking vector of the BDS with {} elements".format(len(_BDS_ranking)))
# print(_BDS_ranking)
print("\n")
for dim_method in _which_dimension_reduction:
print("-" * 100)
print("Generate one-dimensional vector E based on D with a dimensionality-reduction technique {}".format(dim_method))
print("-" * 100)
_z_duplicated_data = min_max_scaler.fit_transform(_duplicated_data)
_z_dim_vector = dimension_redu(_z_duplicated_data, dim_method)
_temporary_data = get_temporary_data(_input_data, _z_dim_vector)
print('\t',"Ascendingly sort E as E_tilde")
print('\t',"Sort D as D_tilde using E_tilde")
# print(_temporary_data[:, -1])
_BDS_rearranged_data = []
for l in _BDS_ranking:
_BDS_rearranged_data.append(_temporary_data[l])
print('\t',"D_tilde is rearranged with R, the ranking vector of a BDS")
# _file_name='./Datasets/'+dim_method+"_Sleep"+".txt"
_file_name = _output_dir + dim_method + ".txt"
np.savetxt(_file_name, _BDS_rearranged_data)
"""
1. Read a data file
2. Dimension reduction
3. Get the lowest discrepancy
"""
def get_normalized_list(_list):
'''
normalize data to [0, 1]
'''
_norm_list = []
for _i in _list:
_j = (_i-min(_list))/(max(_list)-min(_list))
_norm_list.append(_j)
return _norm_list
if __name__ == '__main__':
folder = "FA"
CR1 = "0.2" # BDS BDSR_project.cpp
path_2 = "D:/MengjieXu/Science/BDSR2019-2020/test202009/BDSR/"
dataset = path_2 + folder + "/txt_file1/" + folder +"_down_" + CR1 + "_NC_bounding_upsampling_result2.txt"
raw_data = np.loadtxt(dataset)
BDS_Downsampling(_input_data=raw_data[:, 0:3], _output_dir=path_2 + folder + "/")
evaluation_table_dir = path_2 + folder + "/evaluation_data/"
up_to_how_many_to_keep = 0.4 #50%
for how_many_to_keep in np.arange(start = 0.3, stop = up_to_how_many_to_keep, step = 0.1): #1%1%10%
################################
# #
# down-sampled PCD #
# #
################################
# for PCA, FA, KernelPCATruncatedSVD
down_method_list = ['FA', 'PCA', 'KernelPCA', 'TruncatedSVD'] #
print("*" * 22)
print("* *")
print("* *")
print("* keep {} data *".format(how_many_to_keep))
print("* *")
print("* *")
print("*" * 22)
'''#30random
for down_method in down_method_list:
output_f_dir = "E:/XMJ/3Drebuilding/paper/test/test_2019_10/test32/down_sampled_data2/"
output_f_name = "{}_down_{}_PCD.txt".format(down_method, how_many_to_keep)
# random down-sampling
if down_method == 'Random':
rand_count = 0
for rand_seed in rand_seed_list:
rand_count += 1
random.seed(rand_seed)
down_data = random.sample(list(raw_data), int(how_many_to_keep*len(raw_data)))
np.savetxt(output_f_dir + output_f_name, down_data)
################################################################################################################
################################################################################################################
else:
bds_re_ordered_data = np.loadtxt("E:/XMJ/3Drebuilding/paper/test/test_2019_10/test20/" + down_method + ".txt")
down_data = bds_re_ordered_data[0:int(how_many_to_keep*len(bds_re_ordered_data))]
np.savetxt(output_f_dir + output_f_name, down_data)'''
#30randomrand_seed
for down_method in down_method_list:
output_f_dir = path_2 + folder + "/down_sampled_data/"
output_f_name = "{}_down_{}_PCD.txt".format(down_method, how_many_to_keep)
bds_re_ordered_data = np.loadtxt(path_2 + folder + "/" + down_method + ".txt")
down_data = bds_re_ordered_data[0:int(how_many_to_keep*len(bds_re_ordered_data))]
np.savetxt(output_f_dir + output_f_name, down_data) | 43.615385 | 182 | 0.597698 |
b11770de6ba3e72b06e86a670a85a8fd098eb3aa | 3,630 | py | Python | model.py | e-yi/hin2vec_pytorch | 7c3b6c4160476568985622117cf2263e7b78760e | [
"MIT"
] | 18 | 2019-10-17T03:12:07.000Z | 2022-03-11T02:58:12.000Z | model.py | e-yi/hin2vec_pytorch | 7c3b6c4160476568985622117cf2263e7b78760e | [
"MIT"
] | 5 | 2019-12-12T03:15:21.000Z | 2021-04-02T07:54:38.000Z | model.py | e-yi/hin2vec_pytorch | 7c3b6c4160476568985622117cf2263e7b78760e | [
"MIT"
] | 4 | 2019-12-26T07:36:38.000Z | 2021-04-24T11:35:45.000Z | import torch
import numpy as np
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
if __name__ == '__main__':
## test binary_reg
print('sigmoid')
a = torch.tensor([-1.,0.,1.],requires_grad=True)
b = torch.sigmoid(a)
c = b.sum()
print(a)
print(b)
print(c)
c.backward()
print(c.grad)
print(b.grad)
print(a.grad)
print('binary')
a = torch.tensor([-1., 0., 1.], requires_grad=True)
b = binary_reg(a)
c = b.sum()
print(a)
print(b)
print(c)
c.backward()
print(c.grad)
print(b.grad)
print(a.grad)
| 27.709924 | 110 | 0.571901 |
b117f2719a56a1e59d4109b5312d5d87fdc50a2d | 2,689 | py | Python | pygrep/classes/boyerMoore.py | sstadick/pygrep | 13c53ac427adda9974ee9e62c22391bf0682008c | [
"Apache-2.0"
] | null | null | null | pygrep/classes/boyerMoore.py | sstadick/pygrep | 13c53ac427adda9974ee9e62c22391bf0682008c | [
"Apache-2.0"
] | null | null | null | pygrep/classes/boyerMoore.py | sstadick/pygrep | 13c53ac427adda9974ee9e62c22391bf0682008c | [
"Apache-2.0"
] | null | null | null | import string
from helpers import *
if __name__ == '__main__':
pattern = 'thou'
text = 'cow th ou cat art hat thou mow the lawn'
bm = BoyerMoore(pattern)
# print([char for char in text])
# print([(i, char) for i, char in enumerate(text)])
print(bm.search(text)) | 36.337838 | 104 | 0.579026 |
b118f2f3e6c0e9617cb2cf673e9a7f3e68d6f9ce | 53 | py | Python | basicts/archs/DGCRN_arch/__init__.py | zezhishao/GuanCang_BasicTS | bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c | [
"Apache-2.0"
] | 3 | 2022-02-22T12:50:08.000Z | 2022-03-13T03:38:46.000Z | basicts/archs/DGCRN_arch/__init__.py | zezhishao/GuanCang_BasicTS | bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c | [
"Apache-2.0"
] | null | null | null | basicts/archs/DGCRN_arch/__init__.py | zezhishao/GuanCang_BasicTS | bbf82b9d08e82db78d4e9e9b11f43a676b54ad7c | [
"Apache-2.0"
] | null | null | null | from basicts.archs.DGCRN_arch.DGCRN_arch import DGCRN | 53 | 53 | 0.886792 |
b119bf6083a2cc2bfb9320284b71a47bcee04389 | 159 | py | Python | kido/settings/production.example.py | alanhamlett/kid-o | 18f88f7dc78c678e017fdc7e0dfb2711bcf2bf74 | [
"BSD-3-Clause"
] | 34 | 2015-08-22T06:57:26.000Z | 2021-11-08T10:47:23.000Z | kido/settings/production.example.py | alanhamlett/kid-o | 18f88f7dc78c678e017fdc7e0dfb2711bcf2bf74 | [
"BSD-3-Clause"
] | 15 | 2015-08-21T20:25:49.000Z | 2022-03-11T23:25:44.000Z | kido/settings/production.example.py | dominino/kid-o | 18f88f7dc78c678e017fdc7e0dfb2711bcf2bf74 | [
"BSD-3-Clause"
] | 5 | 2016-08-22T08:23:45.000Z | 2019-05-07T01:38:38.000Z | SECRET_KEY = None
DB_HOST = "localhost"
DB_NAME = "kido"
DB_USERNAME = "kido"
DB_PASSWORD = "kido"
COMPRESSOR_DEBUG = False
COMPRESSOR_OFFLINE_COMPRESS = True
| 19.875 | 34 | 0.773585 |
b11a302f53a38192c5dd68e4767ae96d3e146ef3 | 301 | py | Python | run.py | Prakash2403/ultron | 7d1067eb98ef52f6a88299534ea204e7ae45d7a7 | [
"MIT"
] | 13 | 2017-08-15T15:50:13.000Z | 2019-06-03T10:24:50.000Z | run.py | Prakash2403/ultron | 7d1067eb98ef52f6a88299534ea204e7ae45d7a7 | [
"MIT"
] | 3 | 2017-08-29T16:35:04.000Z | 2021-06-01T23:49:16.000Z | run.py | Prakash2403/ultron | 7d1067eb98ef52f6a88299534ea204e7ae45d7a7 | [
"MIT"
] | 4 | 2017-08-16T09:33:59.000Z | 2019-06-05T07:25:30.000Z | #! /usr/bin/python3
from default_settings import default_settings
from ultron_cli import UltronCLI
if __name__ == '__main__':
default_settings()
try:
UltronCLI().cmdloop()
except KeyboardInterrupt:
print("\nInterrupted by user.")
print("Goodbye")
exit(0)
| 23.153846 | 45 | 0.664452 |
b11a595d5c6b314526d2c13c66fd8ddfdd9ef9ec | 2,689 | py | Python | losses/dice_loss.py | CharlesAuthier/geo-deep-learning | e97ea1d362327cdcb2849cd2f810f1e914078243 | [
"MIT"
] | 121 | 2018-10-01T15:27:08.000Z | 2022-02-16T14:04:34.000Z | losses/dice_loss.py | CharlesAuthier/geo-deep-learning | e97ea1d362327cdcb2849cd2f810f1e914078243 | [
"MIT"
] | 196 | 2018-09-26T19:32:29.000Z | 2022-03-30T15:17:53.000Z | losses/dice_loss.py | CharlesAuthier/geo-deep-learning | e97ea1d362327cdcb2849cd2f810f1e914078243 | [
"MIT"
] | 36 | 2018-09-25T12:55:55.000Z | 2022-03-03T20:31:33.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
| 35.381579 | 119 | 0.579398 |
b11a616d1b56aaeabf4b500c344345675c245118 | 2,766 | py | Python | src/pytezos/jupyter.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | 98 | 2019-02-07T16:33:38.000Z | 2022-03-31T15:53:41.000Z | src/pytezos/jupyter.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | 152 | 2019-05-20T16:38:56.000Z | 2022-03-30T14:24:38.000Z | src/pytezos/jupyter.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | 34 | 2019-07-25T12:03:51.000Z | 2021-11-11T22:23:38.000Z | import inspect
import re
from functools import update_wrapper
from typing import Optional
| 26.596154 | 85 | 0.544107 |
b11be2ae97985e6cfb1d4fb8b0941137d4427bee | 2,492 | py | Python | torch_template/training.py | dongqifong/inspiration | f3168217729063f79f18972a4abe9db821ad5b91 | [
"MIT"
] | null | null | null | torch_template/training.py | dongqifong/inspiration | f3168217729063f79f18972a4abe9db821ad5b91 | [
"MIT"
] | null | null | null | torch_template/training.py | dongqifong/inspiration | f3168217729063f79f18972a4abe9db821ad5b91 | [
"MIT"
] | null | null | null | import torch
| 32.789474 | 101 | 0.597111 |
b11c83cde4ab47f5fe3448e7a1b6b3e0baac54ab | 3,331 | py | Python | pytext/__init__.py | NunoEdgarGFlowHub/pytext | 2358b2d7c8c4e6800c73f4bd1c9731723e503ed6 | [
"BSD-3-Clause"
] | 1 | 2019-02-25T01:50:03.000Z | 2019-02-25T01:50:03.000Z | pytext/__init__.py | NunoEdgarGFlowHub/pytext | 2358b2d7c8c4e6800c73f4bd1c9731723e503ed6 | [
"BSD-3-Clause"
] | null | null | null | pytext/__init__.py | NunoEdgarGFlowHub/pytext | 2358b2d7c8c4e6800c73f4bd1c9731723e503ed6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import uuid
from typing import Callable, Mapping, Optional
import numpy as np
from caffe2.python import workspace
from caffe2.python.predictor import predictor_exporter
from .builtin_task import register_builtin_tasks
from .config import PyTextConfig, pytext_config_from_json
from .config.component import create_featurizer
from .data.featurizer import InputRecord
from .utils.onnx_utils import CAFFE2_DB_TYPE, convert_caffe2_blob_name
register_builtin_tasks()
Predictor = Callable[[Mapping[str, str]], Mapping[str, np.array]]
def load_config(filename: str) -> PyTextConfig:
"""
Load a PyText configuration file from a file path.
See pytext.config.pytext_config for more info on configs.
"""
with open(filename) as file:
config_json = json.loads(file.read())
if "config" not in config_json:
return pytext_config_from_json(config_json)
return pytext_config_from_json(config_json["config"])
def create_predictor(
config: PyTextConfig, model_file: Optional[str] = None
) -> Predictor:
"""
Create a simple prediction API from a training config and an exported caffe2
model file. This model file should be created by calling export on a trained
model snapshot.
"""
workspace_id = str(uuid.uuid4())
workspace.SwitchWorkspace(workspace_id, True)
predict_net = predictor_exporter.prepare_prediction_net(
filename=model_file or config.export_caffe2_path, db_type=CAFFE2_DB_TYPE
)
task = config.task
feature_config = task.features
featurizer = create_featurizer(task.featurizer, feature_config)
return lambda input: _predict(
workspace_id, feature_config, predict_net, featurizer, input
)
| 36.604396 | 88 | 0.727409 |
b11d7725740230346fbe8555198c64720b464851 | 1,374 | py | Python | modules/cudaobjdetect/misc/python/test/test_cudaobjdetect.py | ptelang/opencv_contrib | dd68e396c76f1db4d82e5aa7a6545580939f9b9d | [
"Apache-2.0"
] | 7,158 | 2016-07-04T22:19:27.000Z | 2022-03-31T07:54:32.000Z | modules/cudaobjdetect/misc/python/test/test_cudaobjdetect.py | ptelang/opencv_contrib | dd68e396c76f1db4d82e5aa7a6545580939f9b9d | [
"Apache-2.0"
] | 2,184 | 2016-07-05T12:04:14.000Z | 2022-03-30T19:10:12.000Z | modules/cudaobjdetect/misc/python/test/test_cudaobjdetect.py | ptelang/opencv_contrib | dd68e396c76f1db4d82e5aa7a6545580939f9b9d | [
"Apache-2.0"
] | 5,535 | 2016-07-06T12:01:10.000Z | 2022-03-31T03:13:24.000Z | #!/usr/bin/env python
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
if __name__ == '__main__':
NewOpenCVTests.bootstrap() | 36.157895 | 92 | 0.659389 |
b11ddd81227b3782058ba9f99a70d0ae0079cb41 | 32,677 | py | Python | gizmo/mapper.py | emehrkay/gizmo | 01db2f51118f7d746061ace0b491237481949bad | [
"MIT"
] | 19 | 2015-10-06T12:55:09.000Z | 2021-01-09T09:53:38.000Z | gizmo/mapper.py | emehrkay/Gizmo | 01db2f51118f7d746061ace0b491237481949bad | [
"MIT"
] | 2 | 2016-01-21T02:55:55.000Z | 2020-08-16T23:05:07.000Z | gizmo/mapper.py | emehrkay/gizmo | 01db2f51118f7d746061ace0b491237481949bad | [
"MIT"
] | 3 | 2016-01-21T02:18:41.000Z | 2018-04-25T06:06:25.000Z | import logging
import inspect
import re
from collections import OrderedDict
from gremlinpy.gremlin import Gremlin, Param, AS
from .entity import (_Entity, Vertex, Edge, GenericVertex, GenericEdge,
ENTITY_MAP)
from .exception import (AstronomerQueryException, AstronomerMapperException)
from .traversal import Traversal
from .util import (camel_to_underscore, GIZMO_ID, GIZMO_LABEL, GIZMO_TYPE,
GIZMO_ENTITY, GIZMO_VARIABLE, entity_name)
logger = logging.getLogger(__name__)
ENTITY_MAPPER_MAP = {}
GENERIC_MAPPER = 'generic.mapper'
_count = -1
_query_count = 0
_query_params = {}
def __getitem__(self, key):
entity = self._entities.get(key, None)
if entity is None:
try:
data = self.response[key]
if data is not None:
entity = self.mapper.create(data=data,
data_type=self._data_type)
entity.dirty = False
self._entities[key] = entity
else:
raise StopIteration()
except Exception as e:
raise StopIteration()
return entity
def __setitem__(self, key, value):
self._entities[key] = value
def __delitem__(self, key):
if key in self._entities:
del self._entities[key]
| 29.896615 | 79 | 0.565903 |
b11de849f44d264e334f554dabd0e3fd62c6c1ae | 849 | py | Python | utils.py | Nicolas-Lefort/conv_neural_net_time_serie | 3075d3f97cdd45f91612f8300af2b4af7f232c42 | [
"MIT"
] | null | null | null | utils.py | Nicolas-Lefort/conv_neural_net_time_serie | 3075d3f97cdd45f91612f8300af2b4af7f232c42 | [
"MIT"
] | null | null | null | utils.py | Nicolas-Lefort/conv_neural_net_time_serie | 3075d3f97cdd45f91612f8300af2b4af7f232c42 | [
"MIT"
] | null | null | null | import pandas_ta as ta
| 31.444444 | 111 | 0.64311 |
b11f6265d46fdca364a4dd3bf4dcf5a12d2f410f | 2,871 | py | Python | praetorian_ssh_proxy/hanlers/menu_handler.py | Praetorian-Defence/praetorian-ssh-proxy | 068141bf0cee9fcf10434fab2dc5c16cfdd35f5a | [
"MIT"
] | null | null | null | praetorian_ssh_proxy/hanlers/menu_handler.py | Praetorian-Defence/praetorian-ssh-proxy | 068141bf0cee9fcf10434fab2dc5c16cfdd35f5a | [
"MIT"
] | null | null | null | praetorian_ssh_proxy/hanlers/menu_handler.py | Praetorian-Defence/praetorian-ssh-proxy | 068141bf0cee9fcf10434fab2dc5c16cfdd35f5a | [
"MIT"
] | null | null | null | import sys
| 45.571429 | 122 | 0.492163 |
b122b1664a2960a396de4fbb595bf3821559d96f | 563 | py | Python | orderedtable/urls.py | Shivam2k16/DjangoOrderedTable | da133a23a6659ce5467b8161edcf6db35f1c0b76 | [
"MIT"
] | 2 | 2018-04-15T17:03:59.000Z | 2019-03-23T04:45:00.000Z | orderedtable/urls.py | Shivam2k16/DjangoOrderedTable | da133a23a6659ce5467b8161edcf6db35f1c0b76 | [
"MIT"
] | null | null | null | orderedtable/urls.py | Shivam2k16/DjangoOrderedTable | da133a23a6659ce5467b8161edcf6db35f1c0b76 | [
"MIT"
] | 1 | 2018-04-15T16:54:07.000Z | 2018-04-15T16:54:07.000Z | from django.conf.urls import include, url
from django.contrib import admin
import orderedtable
from orderedtable import views
app_name="orderedtable"
urlpatterns = [
url(r'^$', views.home,name="home"),
url(r'^import-json/$', views.import_json,name="import_json"),
url(r'^project-list/$', views.project_list,name="project_list"),
url(r'^empty-list/$', views.delete_table,name="delete_table"),
url(r'^multiple-sorting/$', views.multiple_sorting,name="multiple_sorting"),
url(r'^sort-by = (?P<pk>[\w-]+)/$', views.sorted,name="sorted"),
]
| 33.117647 | 80 | 0.698046 |
b123669e9c0103e63c00a8b4dcdbc0e0596f1442 | 2,242 | py | Python | call_google_translate.py | dadap/klingon-assistant-data | 5371f8ae6e3669f48a83087a4937af0dee8d23d1 | [
"Apache-2.0"
] | null | null | null | call_google_translate.py | dadap/klingon-assistant-data | 5371f8ae6e3669f48a83087a4937af0dee8d23d1 | [
"Apache-2.0"
] | 5 | 2018-07-11T09:17:19.000Z | 2018-10-14T10:33:51.000Z | call_google_translate.py | dadap/klingon-assistant-data | 5371f8ae6e3669f48a83087a4937af0dee8d23d1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Calls Google Translate to produce translations.
# To use, set "language" and "dest_language" below. (They are normally the same,
# unless Google uses a different language code than we do.) Then fill in
# the definition_[language] fields with "TRANSLATE" or
# "TRANSLATE: [replacement definition]". The latter is to allow for a better
# translation when the original definition is ambiguous, e.g., if the definition
# is "launcher", a better translation might result from
# "TRANSLATE: rocket launcher".
from googletrans import Translator
import fileinput
import re
import time
# TODO: Refactor this and also use in renumber.py.
# Ignore mem-00-header.xml and mem-28-footer.xml because they don't contain entries.
filenames = ['mem-01-b.xml', 'mem-02-ch.xml', 'mem-03-D.xml', 'mem-04-gh.xml', 'mem-05-H.xml', 'mem-06-j.xml', 'mem-07-l.xml', 'mem-08-m.xml', 'mem-09-n.xml', 'mem-10-ng.xml', 'mem-11-p.xml', 'mem-12-q.xml', 'mem-13-Q.xml', 'mem-14-r.xml', 'mem-15-S.xml', 'mem-16-t.xml', 'mem-17-tlh.xml', 'mem-18-v.xml', 'mem-19-w.xml', 'mem-20-y.xml', 'mem-21-a.xml', 'mem-22-e.xml', 'mem-23-I.xml', 'mem-24-o.xml', 'mem-25-u.xml', 'mem-26-suffixes.xml', 'mem-27-extra.xml']
translator = Translator()
language = "zh-HK"
dest_language = "zh-TW"
limit = 250
for filename in filenames:
with fileinput.FileInput(filename, inplace=True) as file:
definition = ""
for line in file:
definition_match = re.search(r"definition\">?(.+)<", line)
definition_translation_match = re.search(r"definition_(.+)\">TRANSLATE(?:: (.*))?<", line)
if (definition_match):
definition = definition_match.group(1)
if (limit > 0 and \
definition != "" and \
definition_translation_match and \
language.replace('-','_') == definition_translation_match.group(1)):
if definition_translation_match.group(2):
definition = definition_translation_match.group(2)
translation = translator.translate(definition, src='en', dest=dest_language)
line = re.sub(r">(.*)<", ">%s [AUTOTRANSLATED]<" % translation.text, line)
# Rate-limit calls to Google Translate.
limit = limit - 1
time.sleep(0.1)
print(line, end='')
| 44.84 | 460 | 0.666369 |
b123989fc301ccc896657660002120b9f5336e64 | 6,451 | py | Python | xenavalkyrie/xena_object.py | xenadevel/PyXenaValkyrie | 9bb1d0b058c45dc94a778fd674a679b53f03a34c | [
"Apache-2.0"
] | 4 | 2018-07-13T08:09:38.000Z | 2022-02-09T01:36:13.000Z | xenavalkyrie/xena_object.py | xenadevel/PyXenaValkyrie | 9bb1d0b058c45dc94a778fd674a679b53f03a34c | [
"Apache-2.0"
] | 1 | 2019-07-31T04:56:43.000Z | 2019-08-01T07:11:21.000Z | xenavalkyrie/xena_object.py | xenadevel/PyXenaValkyrie | 9bb1d0b058c45dc94a778fd674a679b53f03a34c | [
"Apache-2.0"
] | 3 | 2019-05-30T23:47:02.000Z | 2022-02-04T12:32:14.000Z | """
Base classes and utilities for all Xena Manager (Xena) objects.
:author: yoram@ignissoft.com
"""
import time
import re
import logging
from collections import OrderedDict
from trafficgenerator.tgn_utils import TgnError
from trafficgenerator.tgn_object import TgnObject, TgnObjectsDict
logger = logging.getLogger(__name__)
| 32.746193 | 115 | 0.611843 |
b124d44c02271ffc2f5af0ccc84d1e1a14ca372b | 2,051 | py | Python | test/ryu/vsw-602_mp_port_desc.py | iMasaruOki/lagopus | 69c303b65acbc2d4661691c190c42946654de1b3 | [
"Apache-2.0"
] | 281 | 2015-01-06T13:36:14.000Z | 2022-03-14T03:29:46.000Z | test/ryu/vsw-602_mp_port_desc.py | iMasaruOki/lagopus | 69c303b65acbc2d4661691c190c42946654de1b3 | [
"Apache-2.0"
] | 115 | 2015-01-06T11:09:21.000Z | 2020-11-26T11:44:23.000Z | test/ryu/vsw-602_mp_port_desc.py | lagopus/lagopus | 69c303b65acbc2d4661691c190c42946654de1b3 | [
"Apache-2.0"
] | 108 | 2015-01-06T05:12:01.000Z | 2022-01-02T03:28:50.000Z | from ryu.base.app_manager import RyuApp
from ryu.controller.ofp_event import EventOFPSwitchFeatures
from ryu.controller.ofp_event import EventOFPPortDescStatsReply
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.ofproto.ofproto_v1_2 import OFPG_ANY
from ryu.ofproto.ofproto_v1_3 import OFP_VERSION
from ryu.lib.mac import haddr_to_bin
| 41.02 | 75 | 0.644564 |
b128e2f322061ebf320f3ab6964b531facfd7042 | 21,812 | py | Python | test/phytozome_test.py | samseaver/GenomeFileUtil | b17afb465569a34a12844283918ec654911f96cf | [
"MIT"
] | null | null | null | test/phytozome_test.py | samseaver/GenomeFileUtil | b17afb465569a34a12844283918ec654911f96cf | [
"MIT"
] | null | null | null | test/phytozome_test.py | samseaver/GenomeFileUtil | b17afb465569a34a12844283918ec654911f96cf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
import os # noqa: F401
import json # noqa: F401
import time
import shutil
import re
import sys
import datetime
import collections
#import simplejson
from os import environ
try:
from ConfigParser import ConfigParser # py2
except:
from configparser import ConfigParser # py3
from pprint import pprint # noqa: F401
from GenomeFileUtil.GenomeFileUtilImpl import GenomeFileUtil
from GenomeFileUtil.core.GenomeInterface import GenomeInterface
from GenomeFileUtil.GenomeFileUtilImpl import SDKConfig
from GenomeFileUtil.GenomeFileUtilServer import MethodContext
from GenomeFileUtil.core.FastaGFFToGenome import FastaGFFToGenome
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.WorkspaceClient import Workspace as workspaceService
| 46.606838 | 139 | 0.496011 |
b129413908fca02566b29b673b606e60be14141b | 7,824 | py | Python | icetray_version/trunk/resources/scripts/make_plots.py | hershalpandya/airshowerclassification_llhratio_test | a2a2ce5234c8f455fe56c332ab4fcc65008e9409 | [
"MIT"
] | null | null | null | icetray_version/trunk/resources/scripts/make_plots.py | hershalpandya/airshowerclassification_llhratio_test | a2a2ce5234c8f455fe56c332ab4fcc65008e9409 | [
"MIT"
] | null | null | null | icetray_version/trunk/resources/scripts/make_plots.py | hershalpandya/airshowerclassification_llhratio_test | a2a2ce5234c8f455fe56c332ab4fcc65008e9409 | [
"MIT"
] | null | null | null |
# coding: utf-8
# In[1]:
import numpy as np
get_ipython().magic(u'matplotlib inline')
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
import sys
sys.path.append('../../python/')
from general_functions import load_5D_PDF_from_file
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import tables
import glob
# In[3]:
sig_pdf_file='../../files/PDF_12360_0123x.hd5'
bkg_pdf_file='../../files/PDF_12362_0123x.hd5'
temp=load_5D_PDF_from_file(SigPDFFileName=sig_pdf_file, BkgPDFFileName=bkg_pdf_file)
sig_hist=temp[0]
bkg_hist=temp[1]
binedges=temp[2]
distinct_regions_binedges=temp[3]
labels=temp[4]
sig_n_events=temp[5]
bkg_n_events = temp[6]
# In[4]:
# find the logE and coszen bins select those bins in sig/bkg pdfs
logEbincenters = np.array((binedges[0][1:] + binedges[0][:-1] )/2.)
coszenbincenters = np.array((binedges[1][1:] + binedges[1][:-1] )/2.)
logE=-0.01
dE = np.absolute(logEbincenters - logE)
Ebin=np.where(np.amin(dE)==dE)[0][0]
coszen=0.96
dcZ = np.absolute(coszenbincenters - coszen)
cZbin = np.where(np.amin(dcZ)==dcZ)[0][0]
sig_hist_3dslice = sig_hist[Ebin][cZbin]
bkg_hist_3dslice = bkg_hist[Ebin][cZbin]
binedges_3dslice = binedges[2:]
# In[7]:
plot_2D_projected_hist(sig_hist_3dslice,binedges_3dslice,axis=2)
# In[27]:
sig_hdf_files=glob.glob('../../files/Events_12360_?x.hd5.hd5')
bkg_hdf_files=glob.glob('../../files/Events_12362_?x.hd5.hd5')
# In[30]:
# In[31]:
llhr={}
llhr['sig']=load_hdf_file(sig_hdf_files)
llhr['bkg']=load_hdf_file(bkg_hdf_files)
# In[45]:
low_E=1.5
high_E=1.6
low_z=0.8
high_z=.85
for key in llhr.keys():
cut1=llhr[key]['isGood']==1.0
cut2=llhr[key]['tanks_have_nans']==0.
cut3=llhr[key]['log_s125']>=low_E
cut4=llhr[key]['log_s125']<high_E
cut5=llhr[key]['cos_zen']>=low_z
cut6=llhr[key]['cos_zen']<high_z
select=cut1&cut2&cut3&cut4&cut5&cut6
print len(select)
print len(select[select])
hist_this ='llh_ratio'
range=[-10,15]
bins=35
#hist_this='n_extrapolations_bkg_PDF'
#range=[0,20]
#bins=20
plt.hist(llhr[key][hist_this][select],range=range,bins=bins,label=key,histtype='step')
plt.legend()
# In[34]:
llhr['sig'].keys()
# In[2]:
# In[3]:
sig_hist, edges, sig_nevents, labels = load_results_hist('../../files/results_sig_Ezenllhr.hd5')
bkg_hist, edges, bkg_nevents, labels = load_results_hist('../../files/results_bkg_Ezenllhr.hd5')
# In[4]:
sig_onedhist=hist_2d_proj(sig_hist,axis=1)[3]
bkg_onedhist=hist_2d_proj(bkg_hist,axis=1)[3]
# In[5]:
plt.bar(edges[2][:-1],sig_onedhist,alpha=1.,label='rand')
plt.bar(edges[2][:-1],bkg_onedhist,alpha=0.3,label='data')
plt.yscale('log')
#plt.xlim([-1,1])
plt.legend()
# In[54]:
| 23.709091 | 103 | 0.61797 |
b12945ba640ad4a03105665c4e82e2d609d22997 | 3,171 | py | Python | tests/test_vector.py | slode/triton | d440c510f4841348dfb9109f03858c75adf75564 | [
"MIT"
] | null | null | null | tests/test_vector.py | slode/triton | d440c510f4841348dfb9109f03858c75adf75564 | [
"MIT"
] | null | null | null | tests/test_vector.py | slode/triton | d440c510f4841348dfb9109f03858c75adf75564 | [
"MIT"
] | null | null | null | # Copyright (c) 2013 Stian Lode
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import math
import fixtures
from triton.vector import Vector
from triton.vector3d import Vector3d
from triton.vector2d import Vector2d
from pytest import approx
| 24.022727 | 79 | 0.602964 |
b129d2583e5ec5edf4eaa2db0112f68dbe43bc35 | 3,912 | py | Python | build.py | Lvue-YY/Lvue-YY | 630b1ea5d4db9b5d9373d4e9dbbfa9f8fc9baf2e | [
"Apache-2.0"
] | 1 | 2020-07-28T15:48:06.000Z | 2020-07-28T15:48:06.000Z | build.py | Lvue-YY/Lvue-YY | 630b1ea5d4db9b5d9373d4e9dbbfa9f8fc9baf2e | [
"Apache-2.0"
] | null | null | null | build.py | Lvue-YY/Lvue-YY | 630b1ea5d4db9b5d9373d4e9dbbfa9f8fc9baf2e | [
"Apache-2.0"
] | null | null | null | import httpx
import pathlib
import re
import datetime
from bs4 import BeautifulSoup
root = pathlib.Path(__file__).parent.resolve()
if __name__ == "__main__":
readme = root / "README.md"
readme_contents = readme.open().read()
events = get_events()
events_md = "\n".join(
["* {action} <a href={url} target='_blank'>{target}</a> - {time}".format(**item) for item in events]
)
rewritten = replace_chunk(readme_contents, "event", events_md)
entries = get_blogs()
blogs_md = "\n".join(
["* <a href={url} target='_blank'>{title}</a> - {date}".format(**entry) for entry in entries]
)
rewritten = replace_chunk(rewritten, "blog", blogs_md)
time = (datetime.datetime.now() + datetime.timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S')
time_md = "Automatically updated on " + time
rewritten = replace_chunk(rewritten, "time", time_md)
readme.open("w").write(rewritten) | 37.615385 | 110 | 0.57362 |
b12c849d2ef4e720802c1f093c8c0678dd35a0b0 | 1,061 | py | Python | app/models/news_article_test.py | engineer237/News-application | 66d7e8d70c5c023292dea4f5b87bd11ab5fb102e | [
"MIT"
] | null | null | null | app/models/news_article_test.py | engineer237/News-application | 66d7e8d70c5c023292dea4f5b87bd11ab5fb102e | [
"MIT"
] | null | null | null | app/models/news_article_test.py | engineer237/News-application | 66d7e8d70c5c023292dea4f5b87bd11ab5fb102e | [
"MIT"
] | null | null | null | import unittest # module for testing
from models import news_article
if __name__ == "__main__":
unittest.main() | 48.227273 | 227 | 0.697455 |
b12dc2d34aac9627697ee3968231db8487e21dff | 2,216 | py | Python | samples/at.bestsolution.framework.grid.personsample.model/utils/datafaker.py | BestSolution-at/framework-grid | cdab70e916e20a1ce6bc81fa69339edbb34a2731 | [
"Apache-2.0"
] | 4 | 2015-01-19T11:35:38.000Z | 2021-05-20T04:31:26.000Z | samples/at.bestsolution.framework.grid.personsample.model/utils/datafaker.py | BestSolution-at/framework-grid | cdab70e916e20a1ce6bc81fa69339edbb34a2731 | [
"Apache-2.0"
] | 3 | 2015-01-22T10:42:51.000Z | 2015-02-04T13:06:56.000Z | samples/at.bestsolution.framework.grid.personsample.model/utils/datafaker.py | BestSolution-at/framework-grid | cdab70e916e20a1ce6bc81fa69339edbb34a2731 | [
"Apache-2.0"
] | 3 | 2015-01-15T09:45:13.000Z | 2016-03-08T11:29:58.000Z | #! /usr/bin/env python3
import sys
import random
import os
from faker import Factory as FFactory
OUTFILE = "samples.xmi"
NUM_SAMPLES = 10
NUM_COUNTRIES = 4
TEMPLATE = """<?xml version="1.0" encoding="ASCII"?>
<person:Root
xmi:version="2.0"
xmlns:xmi="http://www.omg.org/XMI"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:person="http://www.bestsolution.at/framework/grid/personsample/1.0"
xsi:schemaLocation="http://www.bestsolution.at/framework/grid/personsample/1.0 ../model/Person.xcore#/EPackage">
{0}
</person:Root>
"""
TEMPLATE_COUNTRY = """<countries name="{0}"/>"""
TEMPLATE_PERSON = """<persons firstname="{0}"
lastname="{1}"
gender="{2}"
married="{3}"
birthdate="{4}">
<address
street="{5}"
number="{6}"
zipcode="{7}"
city="{8}"
country="//@countries.{9}"/>
</persons>
"""
COUNTRIES = []
PERSONS = []
if __name__ == "__main__":
if "-n" in sys.argv:
position_param = sys.argv.index("-n")
NUM_SAMPLES = int(sys.argv[position_param + 1])
sys.argv.pop(position_param)
sys.argv.pop(position_param)
if len(sys.argv) > 1:
OUTFILE = sys.argv.pop()
print("Writing samples to {0}.".format(OUTFILE))
fake_xmi()
| 24.622222 | 116 | 0.553249 |
b12e9ab06bf81720fa6f6bbe4f8fd67e00e19bb0 | 977 | py | Python | tests/test_utility.py | ericbdaniels/pygeostat | 94d9cba9265945268f08302f86ce5ba1848fd601 | [
"MIT"
] | null | null | null | tests/test_utility.py | ericbdaniels/pygeostat | 94d9cba9265945268f08302f86ce5ba1848fd601 | [
"MIT"
] | null | null | null | tests/test_utility.py | ericbdaniels/pygeostat | 94d9cba9265945268f08302f86ce5ba1848fd601 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__author__ = 'pygeostat development team'
__date__ = '2020-01-04'
__version__ = '1.0.0'
import os, sys
try:
import pygeostat as gs
except ImportError:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname( __file__ ), r'..')))
import pygeostat as gs
import unittest
import warnings
import subprocess
if __name__ == '__main__':
subprocess.call([sys.executable, '-m', 'unittest', str(__file__), '-v']) | 22.204545 | 86 | 0.684749 |
b12fefdc2ed55826f47db62ac7208620f95060a4 | 10,654 | py | Python | rockets/rocket.py | rsewell97/open-starship | ecb5f848b8ce2d7119defec0960b6ccdc176a9db | [
"Unlicense"
] | null | null | null | rockets/rocket.py | rsewell97/open-starship | ecb5f848b8ce2d7119defec0960b6ccdc176a9db | [
"Unlicense"
] | null | null | null | rockets/rocket.py | rsewell97/open-starship | ecb5f848b8ce2d7119defec0960b6ccdc176a9db | [
"Unlicense"
] | null | null | null | import time
import multiprocessing as mp
import numpy as np
from scipy.spatial.transform import Rotation
from world import Earth
| 33.71519 | 141 | 0.583255 |
b1311d08ef54f651d8ccb73e1a63e7ab49ee598f | 868 | py | Python | examples/complex/tcp_message.py | 0x7c48/mitmproxy | f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba | [
"MIT"
] | 74 | 2016-03-20T17:39:26.000Z | 2020-05-12T13:53:23.000Z | examples/complex/tcp_message.py | 0x7c48/mitmproxy | f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba | [
"MIT"
] | 7 | 2020-06-16T06:35:02.000Z | 2022-03-15T20:15:53.000Z | examples/complex/tcp_message.py | 0x7c48/mitmproxy | f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba | [
"MIT"
] | 5 | 2016-12-14T14:56:57.000Z | 2020-03-08T20:58:31.000Z | """
tcp_message Inline Script Hook API Demonstration
------------------------------------------------
* modifies packets containing "foo" to "bar"
* prints various details for each packet.
example cmdline invocation:
mitmdump --rawtcp --tcp-host ".*" -s examples/complex/tcp_message.py
"""
from mitmproxy.utils import strutils
from mitmproxy import ctx
from mitmproxy import tcp
| 31 | 68 | 0.644009 |
b1318eb081bf81d3b2433e9aac0b4bedfc511b35 | 186 | py | Python | notes/notebook/apps.py | spam128/notes | 100008b7e0a2afa5677c15826588105027f52883 | [
"MIT"
] | null | null | null | notes/notebook/apps.py | spam128/notes | 100008b7e0a2afa5677c15826588105027f52883 | [
"MIT"
] | null | null | null | notes/notebook/apps.py | spam128/notes | 100008b7e0a2afa5677c15826588105027f52883 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
| 20.666667 | 54 | 0.763441 |
b1319080e17c411506273e715ba06f2cae72f330 | 409 | py | Python | tests/unit/test_app_init.py | isabella232/typeseam | 3e9d090ec84f2110ae69051364bb0905feb2f02c | [
"BSD-3-Clause"
] | 2 | 2016-02-02T01:14:33.000Z | 2016-04-22T03:45:50.000Z | tests/unit/test_app_init.py | codeforamerica/typeseam | 3e9d090ec84f2110ae69051364bb0905feb2f02c | [
"BSD-3-Clause"
] | 114 | 2015-12-21T23:57:01.000Z | 2016-08-18T01:47:31.000Z | tests/unit/test_app_init.py | isabella232/typeseam | 3e9d090ec84f2110ae69051364bb0905feb2f02c | [
"BSD-3-Clause"
] | 2 | 2016-01-21T09:22:02.000Z | 2021-04-16T09:49:56.000Z | from unittest import TestCase
from unittest.mock import Mock, patch
from typeseam.app import (
load_initial_data,
)
| 24.058824 | 46 | 0.662592 |
b1334d852e2065801f7e2f8ab3a80a2b0c5761be | 2,090 | py | Python | execution/execution.py | nafetsHN/environment | 46bf40e5b4bdf3259c5306497cc70c359ca197d2 | [
"MIT"
] | null | null | null | execution/execution.py | nafetsHN/environment | 46bf40e5b4bdf3259c5306497cc70c359ca197d2 | [
"MIT"
] | null | null | null | execution/execution.py | nafetsHN/environment | 46bf40e5b4bdf3259c5306497cc70c359ca197d2 | [
"MIT"
] | null | null | null | import sys
sys.path.append('../')
from abc import ABCMeta, abstractmethod
# https://www.python-course.eu/python3_abstract_classes.php
import logging
import oandapyV20
from oandapyV20 import API
import oandapyV20.endpoints.orders as orders
from oandapyV20.contrib.requests import MarketOrderRequest
| 29.43662 | 79 | 0.635407 |
b133b22a086276eadb705450f1bd4e54352efb5b | 3,360 | py | Python | conda/update_versions.py | PicoJr/StereoPipeline | 146110a4d43ce6cb5e950297b8dca3f3b5e3f3b4 | [
"Apache-2.0"
] | 323 | 2015-01-10T12:34:24.000Z | 2022-03-24T03:52:22.000Z | conda/update_versions.py | PicoJr/StereoPipeline | 146110a4d43ce6cb5e950297b8dca3f3b5e3f3b4 | [
"Apache-2.0"
] | 252 | 2015-07-27T16:36:31.000Z | 2022-03-31T02:34:28.000Z | conda/update_versions.py | PicoJr/StereoPipeline | 146110a4d43ce6cb5e950297b8dca3f3b5e3f3b4 | [
"Apache-2.0"
] | 105 | 2015-02-28T02:37:27.000Z | 2022-03-14T09:17:30.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
'''
Use dependency versions from a conda enviornment .yaml file to update
a recipe/meta.yaml file of a given package. Such an input file can
be created from the given environment with:
conda env export > myenv.yaml
'''
import sys, os, re
if len(sys.argv) < 3:
print("Usage: " + os.path.basename(sys.argv[0]) + " input.yaml mypackage-feedstock")
sys.exit(1)
inFile = sys.argv[1]
outDir = sys.argv[2]
outFile = outDir + "/recipe/meta.yaml"
if not os.path.exists(outFile):
print("Cannot open file: " + outFile)
sys.exit(1)
# parse the versions from the conda env
conda_env = {}
print("Reading: " + inFile)
inHandle = open(inFile, 'r')
lines = inHandle.readlines()
for line in lines:
# Wipe comments
m = re.match('^(.*?)\#', line)
if m:
line = m.group(1)
# Match the package
m = re.match('^\s*-\s*(.*?)\s*=+\s*(.*?)(=|\s|$)', line)
if not m:
continue
package = m.group(1)
version = m.group(2)
if re.match('^\s*$', package):
continue # ignore empty lines
conda_env[package] = version
#print("got ", package, version)
# Update the lines in the output ile
outHandle = open(outFile, 'r')
lines = outHandle.readlines()
for it in range(len(lines)):
line = lines[it]
# Ignore comments
m = re.match('^\#', line)
if m:
continue
# Match the package
m = re.match('^(\s+-[\t ]+)([^\s]+)(\s*)(.*?)$', line)
if not m:
continue
pre = m.group(1)
package = m.group(2)
spaces = m.group(3).rstrip("\n")
old_version = m.group(4).rstrip("\n")
if spaces == "":
# Ensure there's at least one space
spaces = " "
if old_version == "":
# If there was no version before, don't put one now
continue
if not package in conda_env:
continue
version = conda_env[package]
if old_version != version:
if ('[linux]' in old_version) or ('[osx]' in old_version):
# In this case the user better take a closer look
print("For package " + package + ", not replacing " +
old_version + " with " + version + ", a closer look is suggested.")
else:
print("For package " + package + ", replacing version "
+ old_version + " with " + version)
lines[it] = pre + package + spaces + version + ".\n"
# Save the updated lines to disk
print("Updating: " + outFile)
outHandle = open(outFile, "w")
outHandle.writelines(lines)
outHandle.close()
| 29.734513 | 88 | 0.622024 |
b133ecf4dd2609e5dbd8da4502d3368bb3abe2c9 | 172 | py | Python | test.py | uuidd/SimilarCharacter | 22e5f4b0b2798d903435aeb989ff2d0a4ad59d70 | [
"MIT"
] | 199 | 2019-09-09T08:44:19.000Z | 2022-03-24T12:42:04.000Z | test.py | uuidd/SimilarCharacter | 22e5f4b0b2798d903435aeb989ff2d0a4ad59d70 | [
"MIT"
] | 4 | 2020-08-06T08:03:28.000Z | 2022-01-06T15:14:36.000Z | test.py | uuidd/SimilarCharacter | 22e5f4b0b2798d903435aeb989ff2d0a4ad59d70 | [
"MIT"
] | 58 | 2019-10-10T06:56:43.000Z | 2022-03-21T02:58:01.000Z | import cv2
import ProcessWithCV2
img1 = cv2.imread("D:/py/chinese/7.png")
img2 = cv2.imread("D:/py/chinese/8.png")
a = ProcessWithCV2.dHash(img1, img2, 1)
print(a)
| 21.5 | 41 | 0.686047 |
b134a6803ce8be92cdcf0e2af682a4bd189585d7 | 3,782 | py | Python | scripts/common/alignments.py | SilasK/genome_sketch | 83366703669d749957e1935d6794b93023ed063d | [
"MIT"
] | 1 | 2021-03-26T11:41:55.000Z | 2021-03-26T11:41:55.000Z | scripts/common/alignments.py | SilasK/FastDrep | 83366703669d749957e1935d6794b93023ed063d | [
"MIT"
] | null | null | null | scripts/common/alignments.py | SilasK/FastDrep | 83366703669d749957e1935d6794b93023ed063d | [
"MIT"
] | null | null | null | import pandas as pd
import os
MINIMAP_HEADERS = [
"Contig2",
"Length2",
"Start2",
"End2",
"Strand",
"Contig1",
"Length1",
"Start1",
"End1",
"Nmatches",
"Allength",
"Quality",
]
MINIMAP_DATATYPES = [str, int, int, int, str, str, int, int, int, int, int, int]
assert len(MINIMAP_HEADERS) == len(MINIMAP_DATATYPES)
minimap_dtypes_map = {"i": int, "f": float}
def parse_minimap_line(line):
"""parses a minmap paf line, return a dict.
reads tags and converts datatyes"""
elements = line.strip().split()
out = {}
if not len(elements) == 0:
try:
for i, h in enumerate(MINIMAP_HEADERS):
dtype = MINIMAP_DATATYPES[i]
out[h] = dtype(elements[i])
for i in range(len(MINIMAP_HEADERS), len(elements)):
parse_minimap_tag(elements[i], out)
except Exception as e:
raise IOError(f"Error during parsing paf line : {elements}") from e
return out
| 26.447552 | 87 | 0.536489 |
b13523d49b7c54fc6f8d9d277610505b22619edf | 961 | py | Python | python/sprint1_nonfinals/l.py | tu2gin/algorithms-templates | 14267819a11d36ee9125009b05049334bfdcec2a | [
"MIT"
] | null | null | null | python/sprint1_nonfinals/l.py | tu2gin/algorithms-templates | 14267819a11d36ee9125009b05049334bfdcec2a | [
"MIT"
] | null | null | null | python/sprint1_nonfinals/l.py | tu2gin/algorithms-templates | 14267819a11d36ee9125009b05049334bfdcec2a | [
"MIT"
] | null | null | null | # L.
# , .
# 2 s t, .
# t s 1
# . .
#
# s t, .
# 1000 . .
#
# .
from typing import Tuple
shorter, longer = read_input()
print(get_excessive_letter(shorter, longer))
| 29.121212 | 69 | 0.707596 |
b1353e1a12ba28028561c94ebd3cbfad77dbf672 | 194 | py | Python | bentoml/lightgbm.py | francoisserra/BentoML | 213e9e9b39e055286f2649c733907df88e6d2503 | [
"Apache-2.0"
] | 1 | 2021-06-12T17:04:07.000Z | 2021-06-12T17:04:07.000Z | bentoml/lightgbm.py | francoisserra/BentoML | 213e9e9b39e055286f2649c733907df88e6d2503 | [
"Apache-2.0"
] | 4 | 2021-05-16T08:06:25.000Z | 2021-11-13T08:46:36.000Z | bentoml/lightgbm.py | francoisserra/BentoML | 213e9e9b39e055286f2649c733907df88e6d2503 | [
"Apache-2.0"
] | null | null | null | from ._internal.frameworks.lightgbm import load
from ._internal.frameworks.lightgbm import save
from ._internal.frameworks.lightgbm import load_runner
__all__ = ["load", "load_runner", "save"]
| 32.333333 | 54 | 0.804124 |
b1355b614d3140ba034b33a7f3ee7859a1245971 | 723 | py | Python | flake8_strings/visitor.py | d1618033/flake8-strings | 2ad34a41eab65e2264da7aa91c54dbca701af1c5 | [
"MIT"
] | null | null | null | flake8_strings/visitor.py | d1618033/flake8-strings | 2ad34a41eab65e2264da7aa91c54dbca701af1c5 | [
"MIT"
] | 1 | 2021-02-19T13:50:29.000Z | 2021-02-19T13:50:29.000Z | flake8_strings/visitor.py | d1618033/flake8-strings | 2ad34a41eab65e2264da7aa91c54dbca701af1c5 | [
"MIT"
] | null | null | null | import ast
from typing import List
from flake8_plugin_utils import Visitor
from .errors import UnnecessaryBackslashEscapingError
| 27.807692 | 69 | 0.615491 |
b1355fb67bbb27f060266c03cc17b3aa9d3f3edd | 1,384 | py | Python | tests/test_asyncio_hn.py | MITBigDataGroup2/asyncio-hn | 7133530e8ffb56b7810bcd956241709fc2ae0f48 | [
"MIT"
] | 30 | 2017-02-12T21:58:10.000Z | 2021-11-04T00:11:49.000Z | tests/test_asyncio_hn.py | MITBigDataGroup2/asyncio-hn | 7133530e8ffb56b7810bcd956241709fc2ae0f48 | [
"MIT"
] | 4 | 2017-03-21T12:40:19.000Z | 2021-11-15T17:46:46.000Z | tests/test_asyncio_hn.py | MITBigDataGroup2/asyncio-hn | 7133530e8ffb56b7810bcd956241709fc2ae0f48 | [
"MIT"
] | 2 | 2017-12-18T09:11:45.000Z | 2022-02-09T16:45:49.000Z | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
import pytest
from asyncio_hn import ClientHN
def validate_post(post, post_id, post_creator):
if post.get("id") == post_id:
assert post_creator == post.get("by")
| 28.244898 | 74 | 0.629335 |
b13626eb09eac5813e547227a9c0e21459be9cf0 | 5,649 | py | Python | src/data/make_dataset.py | Rajasvi/Adverse-Food-Events-Analysis | 8fb87cfaa4c55eaae56325e516623ad8661d7fb8 | [
"MIT"
] | 1 | 2021-12-16T02:40:31.000Z | 2021-12-16T02:40:31.000Z | src/data/make_dataset.py | AdityaR-Bits/adverse_food_events_analysis-1 | 8fb87cfaa4c55eaae56325e516623ad8661d7fb8 | [
"MIT"
] | 1 | 2021-12-04T00:58:50.000Z | 2021-12-04T00:58:50.000Z | src/data/make_dataset.py | AdityaR-Bits/adverse_food_events_analysis-1 | 8fb87cfaa4c55eaae56325e516623ad8661d7fb8 | [
"MIT"
] | 2 | 2021-12-04T02:11:26.000Z | 2021-12-04T06:32:19.000Z | # -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
import pandas as pd
import re
import string
from nltk.corpus import stopwords
def brand_preprocess(row, trim_len=2):
""" This function creates a brand name column by parsing out the product column of data. It trims the words based on trim length param to choose appropriate brand name.
Args:
row ([pd.Series]): Dataframe row
trim_len (int, optional): Length by which product name has to be trimmed. Defaults to 2.
Returns:
[str]: brand name corresponding to a product.
"""
assert isinstance(
row, pd.Series
), "Check whether the function is called over Series"
if pd.isna(row["product"]) or pd.isna(row["product"]):
return pd.NA
# Remove punctuations from product name
regexPunctuation = re.compile("[%s]" % re.escape(string.punctuation))
cleanProduct = regexPunctuation.sub("", row["product"])
nameList = [
_.upper()
for _ in cleanProduct.lower().split(" ")
if _ not in stopwords.words("english")
]
if len(nameList) == 0:
return ""
# for certain categories use trim length to select brand name.
if row["category"] in [
"Nuts/Edible Seed",
"Vit/Min/Prot/Unconv Diet(Human/Animal)",
]:
return (
" ".join(nameList)
if len(nameList) < trim_len
else " ".join(nameList[:trim_len])
)
return nameList[0]
def age_preprocess(row):
"""This function converts age reports to a single unit : year(s)
since Data has age reported in multiple units like month(s),day(s)
Args:
row ([pd.Series]): A row of the entire Dataframe
Returns:
[float]: value of patient_age converted to years unit
"""
assert isinstance(
row, pd.Series
), "Check whether the function is called over Series"
age_conv = {
"month(s)": 1 / 12,
"year(s)": 1,
"day(s)": 1 / 365,
"Decade(s)": 10,
"week(s)": 1 / 52,
}
unit = row["age_units"]
age = row["patient_age"]
if pd.isna(age) or pd.isna(unit):
return -1
else:
return row["patient_age"] * round(age_conv[unit], 4)
if __name__ == "__main__":
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| 32.465517 | 172 | 0.657639 |
b1367245e5290f368fa75d027c1ba49c8fa30f4e | 5,061 | py | Python | src/compare_eval.py | gccrpm/cdmf | 5fca1393acbedbbd6ebc65bf2c9336645cc3e0fc | [
"BSD-2-Clause"
] | 1 | 2020-04-16T05:06:39.000Z | 2020-04-16T05:06:39.000Z | src/compare_eval.py | gccrpm/cdmf | 5fca1393acbedbbd6ebc65bf2c9336645cc3e0fc | [
"BSD-2-Clause"
] | null | null | null | src/compare_eval.py | gccrpm/cdmf | 5fca1393acbedbbd6ebc65bf2c9336645cc3e0fc | [
"BSD-2-Clause"
] | 1 | 2020-04-16T05:06:52.000Z | 2020-04-16T05:06:52.000Z | import os
import re
import hyperparams as hp
from data_load import DataLoad
from tqdm import tqdm
import numpy as np
import pandas as pd
import tensorflow as tf
if __name__ == '__main__':
data = DataLoad(data_path=hp.DATA_PATH,
fnames=hp.FNAMES,
forced_seq_len=hp.FORCED_SEQ_LEN,
vocab_size=hp.VOCAB_SIZE,
paly_times=hp.PLAY_TIMES,
num_main_actors=hp.NUM_MAIN_ACTORS,
batch_size=hp.BATCH_SIZE,
num_epochs=hp.NUM_EPOCHS,
noise_rate=hp.NOISE_RATE)
# CDMF
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False)
sess = tf.Session(config=session_conf)
with sess.as_default():
for fpath in load_ckpt_paths('cdmf'):
saver = tf.train.import_meta_graph(fpath+'.meta')
saver.restore(sess, fpath)
# Get the placeholders from the graph by name
m_oids = graph.get_tensor_by_name('movie_order_ids:0')
info = graph.get_tensor_by_name('info:0')
actors = graph.get_tensor_by_name('actors:0')
descriptions = graph.get_tensor_by_name('descriptions:0')
u_oids = graph.get_tensor_by_name('user_order_ids:0')
r_seq = graph.get_tensor_by_name('rating_sequence:0')
dropout_keep_prob = graph.get_tensor_by_name("dropout_keep_prob:0")
# Tensors we want to evaluate
mse_op = graph.get_tensor_by_name('mse/mse_op:0')
# load evalset
eval_iter = data.load_data('eval')
mse, count = 0.0, 0
for (sub_X_user, sub_X_movie), sub_Y in tqdm(eval_iter):
# unpack
sub_u_oids, sub_bu_seq = sub_X_user
sub_m_oids, sub_info, sub_actors, sub_des, sub_bm_seq = sub_X_movie
sub_r_seq = sub_Y
dev_feed_dict = {
m_oids: sub_m_oids,
info: sub_info,
actors: sub_actors,
descriptions: sub_des,
u_oids: sub_u_oids,
r_seq: sub_r_seq,
dropout_keep_prob: hp.DROPOUT_KEEP_PROB}
sub_mse = sess.run(mse_op, feed_dict=dev_feed_dict)
mse += sub_mse
count += 1
rmse = np.sqrt(mse / count)
print('cdmf | rmse:{}'.format(rmse))
# ConvMF
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False)
sess = tf.Session(config=session_conf)
with sess.as_default():
for fpath in load_ckpt_paths('convmf'):
saver = tf.train.import_meta_graph(fpath+'.meta')
saver.restore(sess, fpath)
# Get the placeholders from the graph by name
m_oids = graph.get_tensor_by_name('movie_order_ids:0')
descriptions = graph.get_tensor_by_name('descriptions:0')
u_oids = graph.get_tensor_by_name('user_order_ids:0')
r_seq = graph.get_tensor_by_name('rating_sequence:0')
dropout_keep_prob = graph.get_tensor_by_name("dropout_keep_prob:0")
# Tensors we want to evaluate
mse_op = graph.get_tensor_by_name('mse/mse_op:0')
# load evalset
eval_iter = data.load_data('eval')
mse, count = 0.0, 0
for (sub_X_user, sub_X_movie), sub_Y in tqdm(eval_iter):
# unpack
sub_u_oids, sub_bu_seq = sub_X_user
sub_m_oids, sub_info, sub_actors, sub_des, sub_bm_seq = sub_X_movie
sub_r_seq = sub_Y
dev_feed_dict = {
m_oids: sub_m_oids,
descriptions: sub_des,
u_oids: sub_u_oids,
r_seq: sub_r_seq,
dropout_keep_prob: hp.DROPOUT_KEEP_PROB}
sub_mse = sess.run(mse_op, feed_dict=dev_feed_dict)
mse += sub_mse
count += 1
rmse = np.sqrt(mse / count)
print('convmf | rmse:{}'.format(rmse)) | 40.814516 | 87 | 0.538234 |
b13b701d2eb809667c24251d55ce1c0bf248bc34 | 1,465 | py | Python | substitute_finder/migrations/0003_product.py | tohugaby/pur_beurre_web | c3bdacee50907eea79821e7a8b3fe0f349719d88 | [
"MIT"
] | 1 | 2020-01-05T18:58:51.000Z | 2020-01-05T18:58:51.000Z | substitute_finder/migrations/0003_product.py | tohugaby/pur_beurre_web | c3bdacee50907eea79821e7a8b3fe0f349719d88 | [
"MIT"
] | 3 | 2020-06-05T18:35:47.000Z | 2021-06-10T20:32:44.000Z | substitute_finder/migrations/0003_product.py | tomlemeuch/pur_beurre_web | c3bdacee50907eea79821e7a8b3fe0f349719d88 | [
"MIT"
] | null | null | null | # Generated by Django 2.1 on 2018-08-14 09:42
from django.conf import settings
from django.db import migrations, models
| 44.393939 | 133 | 0.624573 |
b13db7a0887619658384413e84415d13be784dc2 | 6,613 | py | Python | parameters/standard.py | David-Loibl/gistemp | 4b96696243cbbb425c7b27fed35398e0fef9968d | [
"BSD-3-Clause"
] | 1 | 2020-02-04T13:16:05.000Z | 2020-02-04T13:16:05.000Z | parameters/standard.py | David-Loibl/gistemp4.0 | 4b96696243cbbb425c7b27fed35398e0fef9968d | [
"BSD-3-Clause"
] | null | null | null | parameters/standard.py | David-Loibl/gistemp4.0 | 4b96696243cbbb425c7b27fed35398e0fef9968d | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
#
# parameters/standard.py
#
# Nick Barnes, Ravenbrook Limited, 2010-02-15
# Avi Persin, Revision 2016-01-06
"""Parameters controlling the standard GISTEMP algorithm.
Various parameters controlling each phase of the algorithm are
collected and documented here. They appear here in approximately the
order in which they are used in the algorithm.
Parameters controlling cccgistemp extensions to the standard GISTEMP
algorithm, or obsolete features of GISTEMP, are in other parameter
files.
"""
station_drop_minimum_months = 20
"""A station record must have at least one month of the year with at
least this many valid data values, otherwise it is dropped immediately
prior to the peri-urban adjustment step."""
rural_designator = "global_light <= 10"
"""Describes the test used to determine whether a station is rural or
not, in terms of the station metadata fields. Relevant fields are:
'global_light' (global satellite nighttime radiance value); 'popcls'
(GHCN population class flag; the value 'R' stands for rural);
'us_light' (class derived from satellite nighttime radiance covering
the US and some neighbouring stations), 'berkeley' (a field of unknown
provenance which seems to be related to the Berkeley Earth Surface
Temperature project).
The value of this parameter may be a comma separated sequence. Each
member in that sequence can either be a metadata field name, or a
numeric comparison on a metadata field name (e.g. "global_light <= 10",
the default). If a field name appears on its own, the meaning is
field-dependent.
The fields are consulted in the order specified until one is found
that is not blank, and that obeys the condition (the only field which
is likely to be blank is 'us_light': this sequential feature is
required to emulate a previous version of GISTEMP).
Previous versions of GISTEMP can be "emulated" as follows:
"popcls" GISTEMP 1999 to 2001
"us_light, popcls" GISTEMP 2001 to 2010
"global_light <= 10" GISTEMP 2010 onwards
"global_light <= 0" GISTEMP 2011 passing 2 as second arg to do_comb_step2.sh
"berkeley <= 0" GISTEMP 2011 passing 3 as second arg to do_comb_step2.sh
"""
urban_adjustment_min_years = 20
"""When trying to calculate an urban station adjustment, at least this
many years have to have sufficient rural stations (if there are
not enough qualifying years, we may try again at a larger radius)."""
urban_adjustment_proportion_good = 2.0 / 3.0
"""When trying to calculate an urban station adjustment, at least this
proportion of the years to which the fit applies have to have
sufficient rural stations (if there are insufficient stations, we may
try again at a larger radius)."""
urban_adjustment_min_rural_stations = 3
"""When trying to calculate an urban station adjustment, a year
without at least this number of valid readings from rural stations is
not used to calculate the fit."""
urban_adjustment_min_leg = 5
"""When finding a two-part adjustment, only consider knee years which
have at least this many data points (note: not years) on each side."""
urban_adjustment_short_leg = 7
"""When a two-part adjustment has been identified, if either leg is
shorter than this number of years, a one-part adjustment is applied
instead."""
urban_adjustment_steep_leg = 0.1
"""When a two-part adjustment has been identified, if the gradient of
either leg is steeper than this (in absolute degrees Celsius per
year), or if the difference between the leg gradients is greater than
this, a one-part adjustment is applied instead."""
urban_adjustment_leg_difference = 0.05
"""When a two-part adjustment has been identified, if the difference
in gradient between the two legs is greater than this (in absolute
degrees Celsius per year), it is counted separately for statistical
purposes."""
urban_adjustment_reverse_gradient = 0.02
"""When a two-part adjustment has been identified, if the two
gradients have opposite sign, and both gradients are steeper than this
(in absolute degrees Celsius per year), a one-part adjustment is
applied instead."""
urban_adjustment_full_radius = 1000.0
"""Range in kilometres within which a rural station will be considered
for adjusting an urban station record. Half of this radius will be
attempted first."""
rural_station_min_overlap = 20
"""When combining rural station annual anomaly records to calculate
urban adjustment parameters, do not combine a candidate rural record
if it has fewer than this number years of overlap."""
gridding_min_overlap = 20
"""When combining station records to give a grid record, do not
combine a candidate station record if it has fewer than this number of
years of overlap with the combined grid record."""
gridding_radius = 1200.0
"""The radius in kilometres used to find and weight station records to
give a grid record."""
gridding_reference_period = (1951, 1980)
"""When gridding, temperature series are turned into anomaly series by
subtracting monthly means computed over a reference period. This is
the first and last years of that reference period."""
sea_surface_cutoff_temp = -1.77
"""When incorporating monthly sea-surface datasets, treat any
temperature colder than this as missing data."""
subbox_min_valid = 240
"""When combining the sub-boxes into boxes, do not use any sub-box
record, either land or ocean, which has fewer than this number of
valid data."""
subbox_land_range = 100
"""If a subbox has both land data and ocean data, but the distance
from the subbox centre to the nearest station used in its record is
less than this, the land data is used in preference to the ocean data
when calculating the box series. Note: the distance used is actually a
great-circle chord length."""
subbox_reference_period = (1961, 1990)
"""When combining subbox records into box records, temperature series
are turned into anomaly series by subtracting monthly means computed
over a reference period. This is the first and last years of that
reference period."""
box_min_overlap = 20
"""When combining subbox records to make box records, do not combine a
calendar month from a candidate subbox record if it has fewer than
this number of years of overlap with the same calendar month in the
combined box record. Also used when combining boxes into zones."""
box_reference_period = (1951, 1980)
"""When combining box records into zone records, temperature series
are turned into anomaly series by subtracting monthly means computed
over a reference period. This is the first and last years of that
reference period."""
zone_annual_min_months = 6
"""When computing zone annual means, require at least this many valid
month data."""
| 42.121019 | 77 | 0.788145 |
b13ecc0cc389e823f57ccec244dcd3eab8ae5459 | 5,781 | py | Python | pypdevs/src/pypdevs/tracers/tracerCell.py | martinvy/sin-model-elevators | ebf6511d61326972b2e366c8975f76a944196a6f | [
"MIT"
] | 1 | 2018-09-19T14:42:28.000Z | 2018-09-19T14:42:28.000Z | pypdevs/src/pypdevs/tracers/tracerCell.py | martinvy/sin-model-elevators | ebf6511d61326972b2e366c8975f76a944196a6f | [
"MIT"
] | null | null | null | pypdevs/src/pypdevs/tracers/tracerCell.py | martinvy/sin-model-elevators | ebf6511d61326972b2e366c8975f76a944196a6f | [
"MIT"
] | 2 | 2020-05-29T10:12:37.000Z | 2021-05-19T21:32:35.000Z | # Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pypdevs.util import runTraceAtController, toStr
from pypdevs.activityVisualisation import visualizeMatrix
import sys
| 35.466258 | 113 | 0.523785 |
b13f03597d9a5e677488aa6621f7a6411da41c2d | 3,223 | py | Python | Estrangement/tests/test_utils.py | kawadia/estrangement | 612542bf4af64f248766ad28c18028ff4b2307b5 | [
"BSD-3-Clause"
] | 7 | 2015-02-17T14:04:25.000Z | 2020-02-16T08:59:00.000Z | tnetwork/DCD/externals/estrangement_master/Estrangement/tests/test_utils.py | Yquetzal/tnetwork | 43fb2f19aeed57a8a9d9af032ee80f1c9f58516d | [
"BSD-2-Clause"
] | 1 | 2019-07-13T16:16:28.000Z | 2019-07-15T09:34:33.000Z | Estrangement/tests/test_utils.py | kawadia/estrangement | 612542bf4af64f248766ad28c18028ff4b2307b5 | [
"BSD-3-Clause"
] | 4 | 2015-02-20T15:29:59.000Z | 2021-03-28T04:12:08.000Z | import networkx as nx
import sys
import os
import nose
sys.path.append(os.getcwd() + "/..")
import utils
| 48.104478 | 127 | 0.630779 |
b13f674704e7fed7b35db9e06e6e7c93a0224c41 | 2,184 | py | Python | src/train.py | stephenllh/bcs-unet | be534a25e28cbe3501278d0ee6e2417b2cd737d3 | [
"MIT"
] | 5 | 2021-05-04T12:46:32.000Z | 2022-03-17T09:33:39.000Z | src/train.py | stephenllh/bcs-unet | be534a25e28cbe3501278d0ee6e2417b2cd737d3 | [
"MIT"
] | null | null | null | src/train.py | stephenllh/bcs-unet | be534a25e28cbe3501278d0ee6e2417b2cd737d3 | [
"MIT"
] | null | null | null | import os
import argparse
import pytorch_lightning as pl
from pytorch_lightning.callbacks import (
ModelCheckpoint,
EarlyStopping,
LearningRateMonitor,
)
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.seed import seed_everything
from data.emnist import EMNISTDataModule
from data.svhn import SVHNDataModule
from data.stl10 import STL10DataModule
from engine.learner import BCSUNetLearner
from utils import load_config
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
parser = argparse.ArgumentParser()
parser.add_argument(
"-d", "--dataset", type=str, required=True, help="'EMNIST', 'SVHN', or 'STL10'"
)
parser.add_argument(
"-s",
"--sampling_ratio",
type=float,
required=True,
help="Sampling ratio in percentage",
)
args = parser.parse_args()
if __name__ == "__main__":
run()
| 29.12 | 104 | 0.688645 |
b1402f6a4aea579ed7251e589133544512e942f3 | 6,681 | py | Python | perturbation_classifiers/util/dataset.py | rjos/perturbation-classifiers | 5637b49c5c297e20b4ee6bcee25173d9d11d642f | [
"MIT"
] | null | null | null | perturbation_classifiers/util/dataset.py | rjos/perturbation-classifiers | 5637b49c5c297e20b4ee6bcee25173d9d11d642f | [
"MIT"
] | null | null | null | perturbation_classifiers/util/dataset.py | rjos/perturbation-classifiers | 5637b49c5c297e20b4ee6bcee25173d9d11d642f | [
"MIT"
] | null | null | null | # coding=utf-8
# Author: Rodolfo J. O. Soares <rodolfoj.soares@gmail.com>
import numpy as np
import re
def load_keel_file(path):
"""Load a keel dataset format.
Parameters
----------
path : str
The filepath of the keel dataset format.
Returns
-------
keel_dataset: KeelDataset
The keel dataset format loaded.
"""
handle = open(path)
try:
line = handle.readline().strip()
header_parts = line.split()
if header_parts[0] != "@relation" or len(header_parts) != 2:
raise SyntaxError("This is not a valid keel database.")
# Get database name
relation_name = header_parts[1]
# Get attributes
line = handle.readline().strip()
attrs = []
lkp = {}
while line.startswith("@attribute"):
# Get attribute name
attr_name = line.split(" ")[1]
# Get attribute type
match = re.findall(r"\s([a-z]+)\s{0,1}\[", line)
if len(match) > 0:
attr_type = match[0]
else:
attr_type = "nominal"
# Get values range
if attr_type != "nominal":
match = re.findall(r"\[(.*?)\]", line)
attr_builder = float if attr_type == "real" else int
attr_range = tuple(map(attr_builder, match[0].split(",")))
else:
match = re.findall(r"\{(.*?)\}", line)
attr_builder = str
attr_range = tuple(match[0].replace(" ", "").split(","))
keel_attribute = KeelAttribute(attr_name, attr_type, attr_range, attr_builder)
attrs.append(keel_attribute)
lkp[attr_name] = keel_attribute
line = handle.readline().strip()
# Get inputs
if not line.startswith("@input"):
raise SyntaxError("Expected @input or @inputs. " + line)
inputs_parts = line.split(maxsplit=1)
inputs_name = inputs_parts[1].replace(" ", "").split(",")
inputs = [lkp[name] for name in inputs_name]
# Get output
line = handle.readline().strip()
if not line.startswith("@output"):
raise SyntaxError("Expected @outputs or @outputs. " + line)
output_parts = line.split(maxsplit=1)
output_name = output_parts[1].replace(" ", "").split(",")
outputs = [lkp[name] for name in output_name]
# Get data
line = handle.readline().strip()
if line != "@data":
raise SyntaxError("Expected @data.")
data = [[] for _ in range(len(attrs))]
for data_line in handle:
if data_line:
data_values = data_line.strip().replace(" ", "").split(',')
for lst, value, attr in zip(data, data_values, attrs):
v = value
v = v if v == KeelDataSet.UNKNOWN else attr.builder(v)
lst.append(v)
return KeelDataSet(relation_name, attrs, data, inputs, outputs)
finally:
if path:
handle.close()
| 33.074257 | 163 | 0.553959 |
b14084e431f80764a4ba711f2600b59b246111f5 | 830 | py | Python | ex44e.py | liggettla/python | 4bdad72bc2143679be6d1f8722b83cc359753ca9 | [
"MIT"
] | null | null | null | ex44e.py | liggettla/python | 4bdad72bc2143679be6d1f8722b83cc359753ca9 | [
"MIT"
] | null | null | null | ex44e.py | liggettla/python | 4bdad72bc2143679be6d1f8722b83cc359753ca9 | [
"MIT"
] | null | null | null | #Rather than rely on inplicit inheritance from other classes, classes can just
#call the functions from a class; termed composition
son = Child()
son.implicit()
son.override()
son.altered()
| 21.842105 | 78 | 0.639759 |
b14119e47e0e47d908eda6baf79a8ccfb87c16a5 | 2,333 | py | Python | tools/create_doc.py | nbigaouette/gitlab-api-rs | e84c871ad6f852072a373cd950ede546525913eb | [
"Apache-2.0",
"MIT"
] | 11 | 2017-01-22T18:12:57.000Z | 2021-02-15T21:14:34.000Z | tools/create_doc.py | nbigaouette/gitlab-api-rs | e84c871ad6f852072a373cd950ede546525913eb | [
"Apache-2.0",
"MIT"
] | 16 | 2016-12-05T22:09:27.000Z | 2021-12-25T14:56:43.000Z | tools/create_doc.py | nbigaouette/gitlab-api-rs | e84c871ad6f852072a373cd950ede546525913eb | [
"Apache-2.0",
"MIT"
] | 3 | 2017-01-25T19:30:52.000Z | 2018-01-24T09:08:07.000Z | #!/usr/bin/env python3
import os
import re
import sys
import urllib.request
# api_filename = "projects.md"
api_filename = "groups.md"
url = "https://gitlab.com/gitlab-org/gitlab-ce/raw/master/doc/api/" + api_filename
doc_dir = "doc_tmp"
if not os.path.exists(doc_dir):
os.makedirs(doc_dir)
filename, headers = urllib.request.urlretrieve(url)
with open(filename, 'r') as f:
markdown = f.read()
# print("markdown:", markdown)
urllib.request.urlcleanup()
# Strip out all `json` code blocks included in the file.
p = re.compile("```json.*?```", re.MULTILINE | re.DOTALL)
markdown_wo_json = re.sub(p, "", markdown)
GET_block = "GET /"
p_GET_block = re.compile("```\n(%s.*?)\n```" % GET_block, re.MULTILINE | re.DOTALL)
p_GET_variable = re.compile("(:[^/]*)")
sectionsList = re.sub("[^#]#", "TOSPLIT#", markdown_wo_json).split("TOSPLIT")
for section in sectionsList:
if GET_block in section:
lines = section.splitlines()
title = lines[0].replace("#", "").strip()
# print("title:", title)
# section = re.sub(p_GET_block, "```\n```")
m = p_GET_block.search(section)
GET_command = m.group(1)
GET_variables = p_GET_variable.findall(GET_command)
# Sort the variables in decreasing order of _length_. The reason is that a replace of a shorter
# variable might catch a longer one and corrupt the final result.
GET_variables.sort(key = lambda s: -len(s))
# Replace occurrences of the found variables with upper case, removing the ":"
new_GET_command = GET_command
for GET_variable in GET_variables:
new_GET_command = new_GET_command.replace(GET_variable, GET_variable.replace(":", "").upper())
# section = section.replace(GET_command, new_GET_command)
lines = [line.replace(GET_command, new_GET_command) for line in lines]
# print("title:", title)
filename = api_filename.replace(".md", "") + "-GET-" + title.replace(" ", "-").lower() + ".md"
print("filename:", filename)
full_filename = os.path.join(doc_dir, filename)
with open(full_filename, "w") as f:
f.write("//! %s\n" % title)
f.write("//!\n")
f.write("//! # %s\n" % title)
for line in lines[1:]:
f.write("//! %s\n" % line)
| 33.328571 | 106 | 0.624946 |
b14315cacfc7adb3442f4613fdef5630de51a32c | 997 | py | Python | samples/butia/sumo_crono/push_mouse_event.py | RodPy/Turtlebots.activity | f885d7d2e5d710c01294ae60da995dfb0eb36b21 | [
"MIT"
] | null | null | null | samples/butia/sumo_crono/push_mouse_event.py | RodPy/Turtlebots.activity | f885d7d2e5d710c01294ae60da995dfb0eb36b21 | [
"MIT"
] | null | null | null | samples/butia/sumo_crono/push_mouse_event.py | RodPy/Turtlebots.activity | f885d7d2e5d710c01294ae60da995dfb0eb36b21 | [
"MIT"
] | 1 | 2020-06-17T15:44:16.000Z | 2020-06-17T15:44:16.000Z | #Copyright (c) 2009-11, Walter Bender, Tony Forster
# This procedure is invoked when the user-definable block on the
# "extras" palette is selected.
# Usage: Import this code into a Python (user-definable) block; when
# this code is run, the current mouse status will be pushed to the
# FILO heap. If a mouse button event occurs, a y, x, and 1 are pushed
# to the heap. If no button is pressed, 0 is pushed to the heap.
# To use these data, pop the heap in a compare block to determine if a
# button has been pushed. If a 1 was popped from the heap, pop the x
# and y coordinates.
def myblock(tw, x): # ignore second argument
''' Push mouse event to stack '''
if tw.mouse_flag == 1:
# push y first so x will be popped first
tw.lc.heap.append((tw.canvas.height / 2) - tw.mouse_y)
tw.lc.heap.append(tw.mouse_x - (tw.canvas.width / 2))
tw.lc.heap.append(1) # mouse event
tw.mouse_flag = 0
else:
tw.lc.heap.append(0) # no mouse event
| 36.925926 | 70 | 0.675025 |
b144174f87f4c7e89faeb2a0f3dc32dfe6c660fe | 2,593 | py | Python | espn_api/hockey/constant.py | samthom1/espn-api | 6f3f5915a65f1f7e17778d3a5d3f1121e8c7d5fe | [
"MIT"
] | null | null | null | espn_api/hockey/constant.py | samthom1/espn-api | 6f3f5915a65f1f7e17778d3a5d3f1121e8c7d5fe | [
"MIT"
] | null | null | null | espn_api/hockey/constant.py | samthom1/espn-api | 6f3f5915a65f1f7e17778d3a5d3f1121e8c7d5fe | [
"MIT"
] | null | null | null | #Constants
POSITION_MAP = {
# Remaining: F, IR, Util
0 : '0' # IR?
, 1 : 'Center'
, 2 : 'Left Wing'
, 3 : 'Right Wing'
, 4 : 'Defense'
, 5 : 'Goalie'
, 6 : '6' # Forward ?
, 7 : '7' # Goalie, F (Goalie Bench?)
, 8 : '8' # Goalie, F
, 'Center': 1
, 'Left Wing' : 2
, 'Right Wing' : 3
, 'Defense' : 4
, 'Goalie' : 5
}
STATS_IDENTIFIER = {
'00': 'Total',
'01': 'Last 7',
'02': 'Last 15',
'03': 'Last 30',
'10': 'Projected',
'20': '20'
}
PRO_TEAM_MAP = {
1: 'Boston Bruins'
, 2: 'Buffalo Sabres'
, 3: 'Calgary Flames'
, 4: 'Chicago Blackhawks'
, 5: 'Detroit Red Wings'
, 6: 'Edmonton Oilers'
, 7: 'Carolina Hurricanes'
, 8: 'Los Angeles Kings'
, 9: 'Dallas Stars'
, 10: 'Montral Canadiens'
, 11: 'New Jersey Devils'
, 12: 'New York Islanders'
, 13: 'New York Rangers'
, 14: 'Ottawa Senators'
, 15: 'Philadelphia Flyers'
, 16: 'Pittsburgh Penguins'
, 17: 'Colorado Avalanche'
, 18: 'San Jose Sharks'
, 19: 'St. Louis Blues'
, 20: 'Tampa Bay Lightning'
, 21: 'Toronto Maple Leafs'
, 22: 'Vancouver Canucks'
, 23: 'Washington Capitals'
, 24: 'Arizona Coyotes'
, 25: 'Anaheim Ducks'
, 26: 'Florida Panthers'
, 27: 'Nashville Predators'
, 28: 'Winnipeg Jets'
, 29: 'Columbus Blue Jackets'
, 30: 'Minnesota Wild'
, 37: 'Vegas Golden Knights'
, 124292: 'Seattle Krakens'
}
STATS_MAP = {
'0': 'GS',
'1': 'W',
'2': 'L',
'3': 'SA',
'4': 'GA',
'5': '5',
'6': 'SV',
'7': 'SO',
'8': 'MIN ?',
'9': 'OTL',
'10': 'GAA',
'11': 'SV%',
'12': '12',
'13': 'G',
'14': 'A',
'15': '+/-',
'16': '16',
'17': 'PIM',
'18': 'PPG',
'19': '19',
'20': 'SHG',
'21': 'SHA',
'22': 'GWG',
'23': 'FOW',
'24': 'FOL',
'25': '25',
'26': 'TTOI ?',
'27': 'ATOI',
'28': 'HAT',
'29': 'SOG',
'30': '30',
'31': 'HIT',
'32': 'BLK',
'33': 'DEF',
'34': 'GP',
'35': '35',
'36': '36',
'37': '37',
'38': 'PPP',
'39': 'SHP',
'40': '40',
'41': '41',
'42': '42',
'43': '43',
'44': '44',
'45': '45',
'99': '99'
}
ACTIVITY_MAP = {
178: 'FA ADDED',
180: 'WAIVER ADDED',
179: 'DROPPED',
181: 'DROPPED',
239: 'DROPPED',
244: 'TRADED',
'FA': 178,
'WAIVER': 180,
'TRADED': 244
}
| 20.744 | 42 | 0.415349 |
b1445f82594bc253e4a47533cb5834aed7b2e1e1 | 649 | py | Python | dataval/conftest.py | weishengtoh/machinelearning_assignment | 2099377faf0b1086cb3c496eecd3b0ae533a90f2 | [
"Apache-2.0"
] | null | null | null | dataval/conftest.py | weishengtoh/machinelearning_assignment | 2099377faf0b1086cb3c496eecd3b0ae533a90f2 | [
"Apache-2.0"
] | null | null | null | dataval/conftest.py | weishengtoh/machinelearning_assignment | 2099377faf0b1086cb3c496eecd3b0ae533a90f2 | [
"Apache-2.0"
] | null | null | null | import os
import pandas as pd
import pytest
import yaml
import wandb
run = wandb.init(project='RP_NVIDIA_Machine_Learning',
job_type='data_validation')
| 21.633333 | 77 | 0.694915 |
b1450ba4c392fda6a05914dd0e6efe6138ef8c05 | 8,049 | py | Python | src/abaqus/Odb/Odb.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | 7 | 2022-01-21T09:15:45.000Z | 2022-02-15T09:31:58.000Z | src/abaqus/Odb/Odb.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | src/abaqus/Odb/Odb.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | from abaqusConstants import *
from .OdbPart import OdbPart
from .OdbStep import OdbStep
from .SectionCategory import SectionCategory
from ..Amplitude.AmplitudeOdb import AmplitudeOdb
from ..BeamSectionProfile.BeamSectionProfileOdb import BeamSectionProfileOdb
from ..Filter.FilterOdb import FilterOdb
from ..Material.MaterialOdb import MaterialOdb
| 37.966981 | 112 | 0.610262 |
b1483e23d7d2752b7248ed2d54d8ac8e55492604 | 241 | py | Python | popcorn_gallery/tutorials/urls.py | Koenkk/popcorn_maker | 0978b9f98dacd4e8eb753404b24eb584f410aa11 | [
"BSD-3-Clause"
] | 15 | 2015-03-23T02:55:20.000Z | 2021-01-12T12:42:30.000Z | popcorn_gallery/tutorials/urls.py | Koenkk/popcorn_maker | 0978b9f98dacd4e8eb753404b24eb584f410aa11 | [
"BSD-3-Clause"
] | null | null | null | popcorn_gallery/tutorials/urls.py | Koenkk/popcorn_maker | 0978b9f98dacd4e8eb753404b24eb584f410aa11 | [
"BSD-3-Clause"
] | 16 | 2015-02-18T21:43:31.000Z | 2021-11-09T22:50:03.000Z | from django.conf.urls.defaults import patterns, url
urlpatterns = patterns(
'popcorn_gallery.tutorials.views',
url(r'^(?P<slug>[\w-]+)/$', 'object_detail', name='object_detail'),
url(r'^$', 'object_list', name='object_list'),
)
| 30.125 | 71 | 0.66805 |
b1485dd7aa764623468a3437193c8ab420612082 | 3,738 | py | Python | tests/characterisation/test_kelvin_models.py | pauliacomi/adsutils | 062653b38924d419d1235edf7909078ff98a163f | [
"MIT"
] | 35 | 2018-01-24T14:59:08.000Z | 2022-03-10T02:47:58.000Z | tests/characterisation/test_kelvin_models.py | pauliacomi/adsutils | 062653b38924d419d1235edf7909078ff98a163f | [
"MIT"
] | 29 | 2018-01-06T12:08:08.000Z | 2022-03-11T20:26:53.000Z | tests/characterisation/test_kelvin_models.py | pauliacomi/adsutils | 062653b38924d419d1235edf7909078ff98a163f | [
"MIT"
] | 20 | 2019-06-12T19:20:29.000Z | 2022-03-02T09:57:02.000Z | """
This test module has tests relating to kelvin model validations.
All functions in /calculations/models_kelvin.py are tested here.
The purposes are:
- testing the meniscus shape determination function
- testing the output of the kelvin equations
- testing that the "function getter" is performing as expected.
The kelvin functions are tested against pre-calculated values
at several points.
"""
import numpy
import pytest
import pygaps.characterisation.models_kelvin as km
import pygaps.utilities.exceptions as pgEx
| 35.264151 | 80 | 0.607277 |
b1491744c42a7da1be2a17f6cb231604a6c7385b | 2,231 | py | Python | packages/jet_bridge/jet_bridge/__main__.py | bokal2/jet-bridge | dddc4f55c2d5a28c02ce9515dffc750e3887450f | [
"MIT"
] | 1 | 2020-02-06T01:07:44.000Z | 2020-02-06T01:07:44.000Z | packages/jet_bridge/jet_bridge/__main__.py | bokal2/jet-bridge | dddc4f55c2d5a28c02ce9515dffc750e3887450f | [
"MIT"
] | null | null | null | packages/jet_bridge/jet_bridge/__main__.py | bokal2/jet-bridge | dddc4f55c2d5a28c02ce9515dffc750e3887450f | [
"MIT"
] | null | null | null | import os
from datetime import datetime
import sys
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from jet_bridge_base import configuration
from jet_bridge.configuration import JetBridgeConfiguration
conf = JetBridgeConfiguration()
configuration.set_configuration(conf)
from jet_bridge_base.commands.check_token import check_token_command
from jet_bridge_base.db import database_connect
from jet_bridge_base.logger import logger
from jet_bridge import settings, VERSION
from jet_bridge.settings import missing_options, required_options_without_default
if __name__ == '__main__':
main()
| 30.561644 | 103 | 0.714926 |
b14a3e4e999395aab5aac5de3e1df984c03e66f4 | 690 | py | Python | casepro/translation.py | praekelt/helpdesk | 69a7242679c30d2f7cb30a433809e738b9756a3c | [
"BSD-3-Clause"
] | 5 | 2015-07-21T15:58:31.000Z | 2019-09-14T22:34:00.000Z | casepro/translation.py | praekelt/helpdesk | 69a7242679c30d2f7cb30a433809e738b9756a3c | [
"BSD-3-Clause"
] | 197 | 2015-03-24T15:26:04.000Z | 2017-11-28T19:24:37.000Z | casepro/translation.py | praekelt/helpdesk | 69a7242679c30d2f7cb30a433809e738b9756a3c | [
"BSD-3-Clause"
] | 10 | 2015-03-24T12:26:36.000Z | 2017-02-21T13:08:57.000Z | from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from django.utils.translation import get_language as _get_language
from modeltranslation.translator import translator, TranslationOptions
from modeltranslation import utils
from nsms.text.models import Text
translator.register(Text, TextTranslationOptions)
# need to translate something for django translations to kick in
_("Something to trigger localizations")
# monkey patch a version of get_language that isn't broken
utils.get_language = get_language
| 27.6 | 70 | 0.815942 |
b14a72da64d12a7c8066ba502beb5c9606168931 | 147 | py | Python | Booleans/4.2.4 If/4.2.5 Fix the problem.py | ferrerinicolas/python_samples | 107cead4fbee30b275a5e2be1257833129ce5e46 | [
"MIT"
] | null | null | null | Booleans/4.2.4 If/4.2.5 Fix the problem.py | ferrerinicolas/python_samples | 107cead4fbee30b275a5e2be1257833129ce5e46 | [
"MIT"
] | null | null | null | Booleans/4.2.4 If/4.2.5 Fix the problem.py | ferrerinicolas/python_samples | 107cead4fbee30b275a5e2be1257833129ce5e46 | [
"MIT"
] | null | null | null | can_juggle = True
# The code below has problems. See if
# you can fix them!
#if can_juggle print("I can juggle!")
#else
print("I can't juggle.")
| 16.333333 | 37 | 0.693878 |
b14c88c3a21671daaf4ca901cbbd386b9d8bf26a | 703 | py | Python | pytools/mpiwrap.py | nchristensen/pytools | 82da2e0aad6863763f1950318bcb933662020135 | [
"MIT"
] | 52 | 2015-06-23T10:30:24.000Z | 2021-07-28T20:50:31.000Z | pytools/mpiwrap.py | nchristensen/pytools | 82da2e0aad6863763f1950318bcb933662020135 | [
"MIT"
] | 72 | 2015-10-22T18:57:08.000Z | 2022-03-01T00:04:45.000Z | pytools/mpiwrap.py | nchristensen/pytools | 82da2e0aad6863763f1950318bcb933662020135 | [
"MIT"
] | 27 | 2015-09-14T07:24:04.000Z | 2021-12-17T14:31:33.000Z | """See pytools.prefork for this module's reason for being."""
import mpi4py.rc # pylint:disable=import-error
mpi4py.rc.initialize = False
from mpi4py.MPI import * # noqa pylint:disable=wildcard-import,wrong-import-position
import pytools.prefork # pylint:disable=wrong-import-position
pytools.prefork.enable_prefork()
if Is_initialized(): # noqa pylint:disable=undefined-variable
raise RuntimeError("MPI already initialized before MPI wrapper import")
| 33.47619 | 85 | 0.762447 |
b14cfa3a8ca9bb29e189356b82457936f9e99aff | 6,096 | py | Python | vlcp/service/connection/tcpserver.py | geek-plus/vlcp | e7936e00929fcef00c04d4da39b67d9679d5f083 | [
"Apache-2.0"
] | 1 | 2016-09-10T12:09:29.000Z | 2016-09-10T12:09:29.000Z | vlcp/service/connection/tcpserver.py | wan-qy/vlcp | e7936e00929fcef00c04d4da39b67d9679d5f083 | [
"Apache-2.0"
] | null | null | null | vlcp/service/connection/tcpserver.py | wan-qy/vlcp | e7936e00929fcef00c04d4da39b67d9679d5f083 | [
"Apache-2.0"
] | null | null | null | '''
Created on 2015/10/19
:author: hubo
'''
from vlcp.server.module import Module, api
from vlcp.event import TcpServer
from vlcp.event.runnable import RoutineContainer
from vlcp.event.connection import Client
| 43.234043 | 146 | 0.564304 |
b14d75f54839eba4678025c29ab6853f284addcb | 1,571 | py | Python | make/requirements.py | Fizzadar/Kanmail | 3915b1056949b50410478d1519b9276d64ef4f5d | [
"OpenSSL"
] | 12 | 2019-02-10T21:18:53.000Z | 2020-02-17T07:40:48.000Z | make/requirements.py | Fizzadar/Kanmail | 3915b1056949b50410478d1519b9276d64ef4f5d | [
"OpenSSL"
] | 71 | 2017-11-17T07:13:02.000Z | 2020-04-03T15:25:43.000Z | make/requirements.py | Fizzadar/Kanmail | 3915b1056949b50410478d1519b9276d64ef4f5d | [
"OpenSSL"
] | 1 | 2020-02-15T03:16:13.000Z | 2020-02-15T03:16:13.000Z | from distutils.spawn import find_executable
from os import path
import click
from .settings import (
BASE_DEVELOPMENT_REQUIREMENTS_FILENAME,
BASE_REQUIREMENTS_FILENAME,
DEVELOPMENT_REQUIREMENTS_FILENAME,
REQUIREMENTS_FILENAME,
)
from .util import print_and_run
if __name__ == '__main__':
cli()
| 21.819444 | 93 | 0.695099 |
b14f875123a59ce6fa0837c5ecb49e829cede9cf | 1,135 | py | Python | integration/python/src/helper/hosts.py | ArpitShukla007/planetmint | 4b1e215e0059e26c0cee6778c638306021b47bdd | [
"Apache-2.0"
] | 3 | 2022-01-19T13:39:52.000Z | 2022-01-28T05:57:08.000Z | integration/python/src/helper/hosts.py | ArpitShukla007/planetmint | 4b1e215e0059e26c0cee6778c638306021b47bdd | [
"Apache-2.0"
] | 67 | 2022-01-13T22:42:17.000Z | 2022-03-31T14:18:26.000Z | integration/python/src/helper/hosts.py | ArpitShukla007/planetmint | 4b1e215e0059e26c0cee6778c638306021b47bdd | [
"Apache-2.0"
] | 7 | 2022-01-13T16:20:54.000Z | 2022-02-07T11:42:05.000Z | # Copyright 2020 Interplanetary Database Association e.V.,
# Planetmint and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
from typing import List
from planetmint_driver import Planetmint
| 30.675676 | 102 | 0.667841 |
b151396bf4b33731a5544d5a99c0e63a228fafd2 | 24,737 | py | Python | baiduspider/core/__init__.py | samuelmao415/BaiduSpider | c896201ced6714878ad13867f83d740f303df68b | [
"MIT"
] | 1 | 2020-09-19T03:17:08.000Z | 2020-09-19T03:17:08.000Z | baiduspider/core/__init__.py | samuelmao415/BaiduSpider | c896201ced6714878ad13867f83d740f303df68b | [
"MIT"
] | null | null | null | baiduspider/core/__init__.py | samuelmao415/BaiduSpider | c896201ced6714878ad13867f83d740f303df68b | [
"MIT"
] | null | null | null | """BaiduSpider
:Author: Sam Zhang
:Licence: MIT
:GitHub: https://github.com/samzhangjy
:GitLab: https://gitlab.com/samzhangjy
TODO:
TODO:
"""
import json
import os
import re
from html import unescape
from pprint import pprint
from urllib.parse import quote, urlparse
import requests
from bs4 import BeautifulSoup
from baiduspider.core._spider import BaseSpider
from baiduspider.core.parser import Parser
from baiduspider.errors import ParseError, UnknownError
__all__ = ['BaiduSpider']
| 33.701635 | 767 | 0.402353 |
b15153401c65e82722c6b9906d4e09d6524f4e20 | 1,200 | py | Python | HY_Plotter/windReader/reader/cfosat.py | BigShuiTai/HY-CFOSAT-ASCAT-Wind-Data-Plotter | 5be90e5d35151d4c056c77344bf5075e144c3113 | [
"MIT"
] | 1 | 2021-08-22T06:30:58.000Z | 2021-08-22T06:30:58.000Z | HY_Plotter/windReader/reader/cfosat.py | Dapiya/HY-CFOSAT-L2B-Wind-Data-Plotter | 5be90e5d35151d4c056c77344bf5075e144c3113 | [
"MIT"
] | 1 | 2021-10-30T07:25:17.000Z | 2021-10-30T16:22:17.000Z | HY_Plotter/windReader/reader/cfosat.py | Dapiya/HY-CFOSAT-L2B-Wind-Data-Plotter | 5be90e5d35151d4c056c77344bf5075e144c3113 | [
"MIT"
] | 1 | 2021-08-21T12:51:39.000Z | 2021-08-21T12:51:39.000Z | import netCDF4
import numpy as np | 41.37931 | 115 | 0.549167 |
b15405b5c4a9b35dd5bdc84b62d31229a91e7265 | 17,228 | py | Python | example_snippets.py | kimberscott/ffmpeg-stimuli-generation | 54bce134a3236d9e7d2fefe4538378d76f2db798 | [
"MIT"
] | null | null | null | example_snippets.py | kimberscott/ffmpeg-stimuli-generation | 54bce134a3236d9e7d2fefe4538378d76f2db798 | [
"MIT"
] | null | null | null | example_snippets.py | kimberscott/ffmpeg-stimuli-generation | 54bce134a3236d9e7d2fefe4538378d76f2db798 | [
"MIT"
] | 1 | 2020-08-14T17:15:29.000Z | 2020-08-14T17:15:29.000Z | """
Examples of using the functions in videotools.py to generate videos.
This file will not run as-is - it is just intended to provide reference commands you might copy and edit.
"""
import os
from videotools import *
this_path = os.path.dirname(os.path.abspath(__file__))
input_path = os.path.join(this_path, "example_input")
output_path = os.path.join(this_path, "example_output")
# Put two videos side-by-side
makeSideBySide(os.path.join(input_path, "cropped_book.mp4"), os.path.join(input_path, "cropped_box.mp4"), "right", os.path.join(output_path, "side_by_side.mp4"))
# Make a collage of the object-introduction videos
vids = [
"apple",
"cup",
"lotion",
"spray",
"whiteball",
"orangeball",
"train",
"toycar",
"sunglasses",
"marker",
"flashlight",
"block",
]
vids = ["cropped_" + v + ".mp4" for v in vids]
make_collage(input_path, vids, 4, os.path.join(output_path, "0_introsA"), True, 1920, vidHeight=640)
# Replace the audio in VIDEO_1 with a different mp3 file NEW_AUDIO
sp.call([
"ffmpeg",
"-i",
VIDEO_1,
"-i",
NEW_AUDIO,
"-map",
"0:v",
"-map",
"1:a",
"-shortest",
OUTPUT_VIDEO_NAME,
])
# Make a video where the input video plays backwards then forwards
sp.call(
[
"ffmpeg",
"-i",
INPUT_VIDEO,
"-i",
INPUT_VIDEO,
"-filter_complex",
"[1:v]reverse[secondhalf];[0:v][secondhalf]concat[out]",
"-map",
"""[out]""",
"-loglevel",
"error",
OUTPUT_VIDEO,
]
)
# The following are included for reference about potentially useful ffmpeg commands only - they are very specialized
# for particular stimuli!
def combineVideos(croppedVideoDir, sidebysideDir, regularOrderDict, whichVersions, minimal=False):
'''Generate all versions of side-by-side videos needed for Lookit physics study.
i.e. A / B, flippedA / B, A / flippedB, flippedA / flippedB.'''
make_sure_path_exists(sidebysideDir)
commands = ["""[0:v]setpts=PTS-STARTPTS,pad=iw*3:ih:color=white[a];[1:v]setpts=PTS-STARTPTS[z];[a][z]overlay=x=2*w:repeatlast=1:shortest=1:eof_action=repeat[out]""", \
"""[0:v]setpts=PTS-STARTPTS,hflip,pad=iw*3:ih:color=white[b];[1:v]setpts=PTS-STARTPTS[z];[b][z]overlay=x=2*w:repeatlast=1:shortest=1:eof_action=repeat[out]""", \
"""[0:v]setpts=PTS-STARTPTS,pad=iw*3:ih:color=white[b];[1:v]setpts=PTS-STARTPTS[z];[z]hflip[c];[b][c]overlay=x=2*w:repeatlast=1:shortest=1:eof_action=repeat[out]""", \
"""[0:v]setpts=PTS-STARTPTS,hflip,pad=iw*3:ih:color=white[b];[1:v]setpts=PTS-STARTPTS[z];[z]hflip[c];[b][c]overlay=x=2*w:repeatlast=1:shortest=1:eof_action=repeat[out]"""]
suffixes = ['NN', 'RN', 'NR', 'RR']
allfiles = os.listdir(croppedVideoDir)
for iVid1, video1 in enumerate(allfiles):
(shortname1, ext1) = os.path.splitext(video1)
if not(os.path.isdir(os.path.join(croppedVideoDir, video1))) and ext1 == VIDEXT:
for iVid2 in range(len(allfiles)):
if iVid2 == iVid1:
continue
if minimal and iVid2 <= iVid1:
continue
else:
video2 = allfiles[iVid2]
(shortname2, ext2) = os.path.splitext(video2)
if not(os.path.isdir(os.path.join(croppedVideoDir, video2))) and ext2 == VIDEXT:
labels = [parse_video_filename(v, regularOrderDict) for v in [video1, video2]]
if labels[0][0] == labels[1][0] and \
labels[0][2] == labels[1][2] and \
labels[0][3] == labels[1][3] and \
labels[0][4] == labels[1][4]:
outfilenameBase = 'sbs_' + labels[0][0] + '_' + labels[0][1] + '_' + labels[1][1] + '_' + \
labels[0][2] + '_' + labels[0][3] + '_' + labels[0][4] + '_'
for iVid in range(len(commands)):
if suffixes[iVid] in whichVersions:
sp.call(["ffmpeg", "-i", os.path.join(croppedVideoDir, video1), \
"-i", os.path.join(croppedVideoDir, video2), \
"-filter_complex", \
commands[iVid], \
"-map", """[out]""", "-loglevel", "error", \
os.path.join(sidebysideDir, outfilenameBase + suffixes[iVid] + '.mp4')])
### Crops and rescales 640px wide.
def cropVideos(
origVideoDir,
croppedVideoDir,
regularOrderDict,
originalSizes=[],
cropStrings=[],
which=[],
cropByName=[],
timecrop=[],
fadeParams=[],
doCrossFade=False,
):
"""TODO: docstring
timecrop: list of (ID, start, stop, padStart, padStop) tuples.
ID: dict containing any keys in ['object', 'event', 'outcome', 'camera', 'background'] and values.
This time cropping will be applied to any videos that match the values for all
the specified keys.
start, stop: start and stop times in s.
padStart, padStop: amount of time to extend first and last frames by, in s.
fadeParams: (fadeFrames, fadeColor)
"""
make_sure_path_exists(croppedVideoDir)
for f in os.listdir(origVideoDir):
if not (os.path.isdir(os.path.join(origVideoDir, f))):
(shortname, ext) = os.path.splitext(f)
if ext in ORIGEXT:
if regularOrderDict:
(event, outcome, object, camera, background) = parse_video_filename(
shortname, regularOrderDict
)
thisID = {
"event": event,
"outcome": outcome,
"object": object,
"camera": camera,
"background": background,
}
if len(which) == 2 and not (object, event) == which:
continue
if len(which) == 3 and not (object, event, outcome) == which:
continue
timecropCommand = []
doTimeCrop = False
if timecrop:
for (ID, s, e, pS, pE) in timecrop:
if all([thisID[key] == val for (key, val) in ID.items()]):
startTime = s
endTime = e
padStart = pS
padEnd = pE
doTimeCrop = True
if doTimeCrop:
if not startTime == -1:
timecropCommand = ["-ss", str(startTime)]
if not endTime == -1:
timecropCommand = timecropCommand + [
"-t",
str(endTime - startTime),
]
else:
warnings.warn("No time cropping for this video")
if cropByName:
for (vidNames, cropStrForNames) in cropByName:
if f in vidNames:
cropStr = cropStrForNames
else:
if originalSizes == "*":
cropStr = cropStrings[0]
else:
res = findVideoResolution(os.path.join(origVideoDir, f))
if res in originalSizes:
cropStr = cropStrings[originalSizes.index(res)]
else:
cropStr = """scale=640:-2"""
cropStr = cropStr + ",setpts=PTS-STARTPTS"
if doTimeCrop:
croppedVid = os.path.join(
croppedVideoDir, shortname + "_middle.mp4"
)
croppedVidFinal = os.path.join(croppedVideoDir, shortname + ".mp4")
else:
croppedVid = os.path.join(croppedVideoDir, shortname + ".mp4")
croppedVidFinal = croppedVid
command = (
["ffmpeg", "-i", os.path.join(origVideoDir, f), "-vf", cropStr]
+ timecropCommand
+ ["-loglevel", "error", croppedVid]
)
sp.call(command)
if doTimeCrop:
firstImg = os.path.join(croppedVideoDir, shortname + "_first.png")
lastImg = os.path.join(croppedVideoDir, shortname + "_last.png")
firstVid = os.path.join(croppedVideoDir, shortname + "_first.mp4")
lastVid = os.path.join(croppedVideoDir, shortname + "_last.mp4")
sp.call(
[
"ffmpeg",
"-i",
croppedVid,
"-vframes",
"1",
"-f",
"image2",
firstImg,
"-loglevel",
"error",
]
)
[nF, dur, x, y] = get_video_details(
croppedVid, ["nframes", "vidduration", "width", "height"]
)
sp.call(
[
"ffmpeg",
"-i",
croppedVid,
"-vf",
"select='eq(n,{})'".format(nF - 1),
"-vframes",
"1",
"-f",
"image2",
lastImg,
"-loglevel",
"error",
]
)
sp.call(
[
"ffmpeg",
"-loop",
"1",
"-i",
firstImg,
"-t",
str(padStart),
firstVid,
"-loglevel",
"error",
]
)
sp.call(
[
"ffmpeg",
"-loop",
"1",
"-i",
lastImg,
"-t",
str(padEnd),
lastVid,
"-loglevel",
"error",
]
)
if not doCrossFade:
concat_mp4s(croppedVidFinal, [firstVid, croppedVid, lastVid])
else:
unfaded = os.path.join(
croppedVideoDir, shortname + "_beforecrossfade.mp4"
)
concat_mp4s(unfaded, [croppedVid, lastVid])
# see crossfade advice at http://superuser.com/a/778967
sp.call(
[
"ffmpeg",
"-i",
unfaded,
"-i",
firstVid,
"-f",
"lavfi",
"-i",
"color=white:s={}x{}".format(int(x), int(y)),
"-filter_complex",
"[0:v]format=pix_fmts=yuva420p,fade=t=out:st={}:d={}:alpha=1,setpts=PTS-STARTPTS[va0];\
[1:v]format=pix_fmts=yuva420p,fade=t=in:st=0:d={}:alpha=1,setpts=PTS-STARTPTS+{}/TB[va1];\
[2:v]scale={}x{},trim=duration={}[over];\
[over][va0]overlay=format=yuv420[over1];\
[over1][va1]overlay=format=yuv420[outv]".format(
dur + padEnd,
padEnd,
padEnd,
dur,
int(x),
int(y),
dur + padStart + padEnd,
),
"-vcodec",
"libx264",
"-map",
"[outv]",
croppedVidFinal,
"-loglevel",
"error",
]
)
os.remove(unfaded)
os.remove(firstImg)
os.remove(lastImg)
os.remove(firstVid)
os.remove(lastVid)
os.remove(croppedVid)
if fadeParams:
(fadeFrames, fadeColor) = fadeParams
nF = get_video_details(croppedVidFinal, "nframes")
unfaded = os.path.join(croppedVideoDir, shortname + "_unfaded.mp4")
os.rename(croppedVidFinal, unfaded)
sp.call(
[
"ffmpeg",
"-i",
unfaded,
"-vf",
"""fade=type=in:start_frame=1:nb_frames={}:color={},fade=type=out:start_frame={}:color={}""".format(
fadeFrames, fadeColor, nF - fadeFrames, fadeColor
),
"-loglevel",
"error",
croppedVidFinal,
]
)
os.remove(unfaded) | 40.252336 | 187 | 0.390585 |
b1542cd589e62fb7173b027c1b40c713b7897ca2 | 615 | py | Python | sample_project/env/lib/python3.9/site-packages/qtpy/tests/test_qtprintsupport.py | Istiakmorsalin/ML-Data-Science | 681e68059b146343ef55b0671432dc946970730d | [
"MIT"
] | 4 | 2021-11-19T03:25:13.000Z | 2022-02-24T15:32:30.000Z | sample_project/env/lib/python3.9/site-packages/qtpy/tests/test_qtprintsupport.py | Istiakmorsalin/ML-Data-Science | 681e68059b146343ef55b0671432dc946970730d | [
"MIT"
] | null | null | null | sample_project/env/lib/python3.9/site-packages/qtpy/tests/test_qtprintsupport.py | Istiakmorsalin/ML-Data-Science | 681e68059b146343ef55b0671432dc946970730d | [
"MIT"
] | 3 | 2020-08-04T02:48:32.000Z | 2020-08-17T01:20:09.000Z | from __future__ import absolute_import
import pytest
from qtpy import QtPrintSupport
def test_qtprintsupport():
"""Test the qtpy.QtPrintSupport namespace"""
assert QtPrintSupport.QAbstractPrintDialog is not None
assert QtPrintSupport.QPageSetupDialog is not None
assert QtPrintSupport.QPrintDialog is not None
assert QtPrintSupport.QPrintPreviewDialog is not None
assert QtPrintSupport.QPrintEngine is not None
assert QtPrintSupport.QPrinter is not None
assert QtPrintSupport.QPrinterInfo is not None
assert QtPrintSupport.QPrintPreviewWidget is not None
| 32.368421 | 59 | 0.782114 |
b155f55e9f976d163537ef6daaa4dfc7e72b3594 | 2,004 | py | Python | logbook/auth.py | nicola-zanardi/personal-logbook | d44989825ec82437ffd50572c23ef7c2ddf00e30 | [
"Unlicense"
] | null | null | null | logbook/auth.py | nicola-zanardi/personal-logbook | d44989825ec82437ffd50572c23ef7c2ddf00e30 | [
"Unlicense"
] | 7 | 2019-08-28T18:22:40.000Z | 2020-01-15T09:10:13.000Z | logbook/auth.py | nicola-zen/personal-logbook | d44989825ec82437ffd50572c23ef7c2ddf00e30 | [
"Unlicense"
] | null | null | null | from flask import Blueprint, flash, redirect, render_template, request, url_for
from werkzeug.security import check_password_hash, generate_password_hash
from flask_login import login_required, login_user, logout_user
from logbook.models import User, db
from peewee import fn
auth = Blueprint("auth", __name__)
| 30.363636 | 94 | 0.686627 |
b156849efe28743e1f59dbcfbfb3f32c4319b8b3 | 2,718 | py | Python | gecko/classes/api_handler.py | paulschick/Coingecko-Crypto-Price-API | c712856bf423a6d1d429a35c8a8e01bb983ec7ff | [
"MIT"
] | 2 | 2022-01-18T18:09:31.000Z | 2022-02-28T01:01:45.000Z | gecko/classes/api_handler.py | paulschick/Coingecko-Crypto-Price-API | c712856bf423a6d1d429a35c8a8e01bb983ec7ff | [
"MIT"
] | null | null | null | gecko/classes/api_handler.py | paulschick/Coingecko-Crypto-Price-API | c712856bf423a6d1d429a35c8a8e01bb983ec7ff | [
"MIT"
] | null | null | null | import aiohttp
from aiohttp import ClientConnectionError, ClientResponseError
from .models import CoinsResponse, SimplePriceResponse
from .configs import Config
from typing import List, Dict, Union
| 41.181818 | 102 | 0.472774 |
b156a941a513ed31187d8dbd1191f683290ef317 | 1,497 | py | Python | Hello-Cifar-10/keras.py | PyTorchLightning/grid-tutorials | a45ec1bed374660b5a423d096945e462b3241efc | [
"Apache-2.0"
] | null | null | null | Hello-Cifar-10/keras.py | PyTorchLightning/grid-tutorials | a45ec1bed374660b5a423d096945e462b3241efc | [
"Apache-2.0"
] | null | null | null | Hello-Cifar-10/keras.py | PyTorchLightning/grid-tutorials | a45ec1bed374660b5a423d096945e462b3241efc | [
"Apache-2.0"
] | null | null | null | from argparse import ArgumentParser
from pathlib import Path
from tensorflow import keras
# Define this script's flags
parser = ArgumentParser()
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--max_epochs', type=int, default=5)
parser.add_argument('--data_dir', type=str, default="./data/")
args = parser.parse_args()
# Make sure data_dir is absolute + create it if it doesn't exist
data_dir = Path(args.data_dir).absolute()
data_dir.mkdir(parents=True, exist_ok=True)
# Download and/or load data from disk
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data(data_dir / 'mnist.npz')
# Standardize X's to be between 0.0-1.0 instead of 0-255
x_train, x_test = x_train.astype("float32") / 255, x_test.astype("float32") / 255
# Build Model
model = keras.models.Sequential(
[
keras.layers.Flatten(input_shape=(28, 28, 1)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax'),
]
)
# Compile
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=args.lr),
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
# Train
history = model.fit(
x_train,
y_train,
batch_size=args.batch_size,
epochs=args.max_epochs,
validation_split=0.1,
callbacks=[keras.callbacks.TensorBoard(log_dir='./lightning_logs/keras')],
)
# Evaluate
model.evaluate(x_test, y_test)
| 28.788462 | 93 | 0.725451 |
b15750ce5aef5b54cce96688ad262cadc96dc7f8 | 4,432 | py | Python | src/taskmaster/client.py | alex/taskmaster | 04a03bf0853facf318ce98192db6389cdaaefe3c | [
"Apache-2.0"
] | 2 | 2015-11-08T12:45:38.000Z | 2017-06-03T09:16:16.000Z | src/taskmaster/client.py | alex/taskmaster | 04a03bf0853facf318ce98192db6389cdaaefe3c | [
"Apache-2.0"
] | null | null | null | src/taskmaster/client.py | alex/taskmaster | 04a03bf0853facf318ce98192db6389cdaaefe3c | [
"Apache-2.0"
] | null | null | null | """
taskmaster.consumer
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import cPickle as pickle
import gevent
from gevent_zeromq import zmq
from gevent.queue import Queue
from taskmaster.util import import_target
| 25.181818 | 92 | 0.54287 |
b1587cfb5054c54695ad8b82700668819e284945 | 3,165 | py | Python | src/loop.py | migueldingli1997/PySnake | b9b7e98651b207f7bf846cd951b4bb4ee3bba426 | [
"Apache-2.0"
] | 2 | 2020-03-06T09:09:00.000Z | 2022-01-12T14:29:51.000Z | src/loop.py | migueldingli1997/PySnake | b9b7e98651b207f7bf846cd951b4bb4ee3bba426 | [
"Apache-2.0"
] | 20 | 2020-02-09T16:42:53.000Z | 2020-03-07T18:47:35.000Z | src/loop.py | migueldingli1997/PySnake | b9b7e98651b207f7bf846cd951b4bb4ee3bba426 | [
"Apache-2.0"
] | null | null | null | import pygame as pg
from pygame.time import Clock
from src.drawer import Drawer
from src.game import Game
from src.utils.config import Config
from src.utils.score import ScoresList
from src.utils.sfx import SfxHolder
from src.utils.text import Text
from src.utils.util import Util, user_quit
| 35.166667 | 78 | 0.529226 |
b15a0f38860998844631ced61f5490b9a9898c55 | 7,135 | py | Python | tests/test_detectCompileCommand.py | langrind/ccjtools | 6f92d8cadf24d6e1f26e984df3c11b4d58061053 | [
"MIT"
] | null | null | null | tests/test_detectCompileCommand.py | langrind/ccjtools | 6f92d8cadf24d6e1f26e984df3c11b4d58061053 | [
"MIT"
] | null | null | null | tests/test_detectCompileCommand.py | langrind/ccjtools | 6f92d8cadf24d6e1f26e984df3c11b4d58061053 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from ccjtools import ccj_make
def test_detectExactSpecifiedCompilerCommandWord():
"""Using -c option, check that the exact word is recognized"""
inputFileName = 'dummy'
parsedArgs = ccj_make.mkccj_parse_args(['progname', inputFileName, '-c', 'mastadon'])
if not parsedArgs:
assert False
# Note that we are basically testing "strcmp()" here. A different test is used
# to check a whole line of input
if not ccj_make.mkccj_is_compiler_command(parsedArgs, "mastadon"):
assert False
if ccj_make.mkccj_is_compiler_command(parsedArgs, "Mastadon"):
assert False
if ccj_make.mkccj_is_compiler_command(parsedArgs, "Mastadon"):
assert False
if ccj_make.mkccj_is_compiler_command(parsedArgs, "mastadon++"):
assert False
if ccj_make.mkccj_is_compiler_command(parsedArgs, "astadon"):
assert False
assert True
def test_detectCompilerWord():
"""Not using -c option, check that plausible compiler commands are recognized"""
inputFileName = 'dummy'
parsedArgs = ccj_make.mkccj_parse_args(['progname', inputFileName])
if not parsedArgs:
assert False
# Note that we are basically testing a regexp single-word match. A different test
# is used to check a whole line of input
if not ccj_make.mkccj_is_compiler_command(parsedArgs, "gcc"):
assert False
if not ccj_make.mkccj_is_compiler_command(parsedArgs, "mastadon-gcc"):
assert False
if not ccj_make.mkccj_is_compiler_command(parsedArgs, "Mastadon-c++"):
assert False
if not ccj_make.mkccj_is_compiler_command(parsedArgs, "gcc"):
assert False
if not ccj_make.mkccj_is_compiler_command(parsedArgs, "c++"):
assert False
if not ccj_make.mkccj_is_compiler_command(parsedArgs, "g++"):
assert False
if ccj_make.mkccj_is_compiler_command(parsedArgs, "mastadon++"):
assert False
if ccj_make.mkccj_is_compiler_command(parsedArgs, "mastadon"):
assert False
assert True
def test_detectExactSpecifiedCompilerCommand():
"""Using -c option, check that lines are recognized correctly"""
inputFileName = 'dummy'
parsedArgs = ccj_make.mkccj_parse_args(['progname', inputFileName, '-c', 'mastadon'])
if not parsedArgs:
assert False
if ccj_make.mkccj_process_line(parsedArgs, {}, [], "mastadons are not bluefish -Itheentireseas"):
assert False
if not ccj_make.mkccj_process_line(parsedArgs, {}, [], "mastadon are not bluefish -Itheentireseas"):
assert False
if ccj_make.mkccj_process_line(parsedArgs, {}, [], "mastadon-gcc mastadon.c -D_THIS_ -D_THAT_ -fno-dependent-clauses-or-santa-clauses-either"):
assert False
bigString = "/opt/gcc-arm-none-eabi-6-2017-q2-update/bin/arm-none-eabi-g++ -DCONFIG_ARCH_BOARD_PX4_FMU_V5 -D__CUSTOM_FILE_IO__ -D__DF_NUTTX -D__PX4_NUTTX -D__STDC_FORMAT_MACROS -isystem ../../platforms/nuttx/NuttX/include/cxx -isystem NuttX/nuttx/include/cxx -isystem NuttX/nuttx/include -I../../boards/px4/fmu-v5/src -I../../platforms/nuttx/src/px4/common/include -I. -Isrc -Isrc/lib -Isrc/modules -I../../platforms/nuttx/src/px4/stm/stm32f7/include -I../../platforms/common/include -I../../src -I../../src/include -I../../src/lib -I../../src/lib/DriverFramework/framework/include -I../../src/lib/matrix -I../../src/modules -I../../src/platforms -INuttX/nuttx/arch/arm/src/armv7-m -INuttX/nuttx/arch/arm/src/chip -INuttX/nuttx/arch/arm/src/common -INuttX/apps/include -mcpu=cortex-m7 -mthumb -mfpu=fpv5-d16 -mfloat-abi=hard -Os -DNDEBUG -g -fdata-sections -ffunction-sections -fomit-frame-pointer -fmerge-all-constants -fno-signed-zeros -fno-trapping-math -freciprocal-math -fno-math-errno -fno-strict-aliasing -fvisibility=hidden -include visibility.h -Wall -Wextra -Werror -Warray-bounds -Wcast-align -Wdisabled-optimization -Wdouble-promotion -Wfatal-errors -Wfloat-equal -Wformat-security -Winit-self -Wlogical-op -Wpointer-arith -Wshadow -Wuninitialized -Wunknown-pragmas -Wunused-variable -Wno-missing-field-initializers -Wno-missing-include-dirs -Wno-unused-parameter -fdiagnostics-color=always -fno-builtin-printf -fno-strength-reduce -Wformat=1 -Wunused-but-set-variable -Wno-format-truncation -fcheck-new -fno-exceptions -fno-rtti -fno-threadsafe-statics -Wreorder -Wno-overloaded-virtual -nostdinc++ -std=gnu++11 -o msg/CMakeFiles/uorb_msgs.dir/topics_sources/uORBTopics.cpp.obj -c /home/langrind/Firmware/build/px4_fmu-v5_multicopter/msg/topics_sources/uORBTopics.cpp"
if ccj_make.mkccj_process_line(parsedArgs, {}, [], bigString):
assert False
assert True
def test_detectCompilerCommandLine():
"""Not using -c option, check that plausible compiler command lines are recognized"""
inputFileName = 'dummy'
parsedArgs = ccj_make.mkccj_parse_args(['progname', inputFileName])
if not parsedArgs:
assert False
if ccj_make.mkccj_process_line(parsedArgs, {}, [], "mastadons are not bluefish -Itheentireseas"):
assert False
if not ccj_make.mkccj_process_line(parsedArgs, {}, [], "mastadon-gcc mastadon.c -D_THIS_ -D_THAT_ -fno-dependent-clauses-or-santa-clauses-either"):
assert False
bigString = "/opt/gcc-arm-none-eabi-6-2017-q2-update/bin/arm-none-eabi-g++ -DCONFIG_ARCH_BOARD_PX4_FMU_V5 -D__CUSTOM_FILE_IO__ -D__DF_NUTTX -D__PX4_NUTTX -D__STDC_FORMAT_MACROS -isystem ../../platforms/nuttx/NuttX/include/cxx -isystem NuttX/nuttx/include/cxx -isystem NuttX/nuttx/include -I../../boards/px4/fmu-v5/src -I../../platforms/nuttx/src/px4/common/include -I. -Isrc -Isrc/lib -Isrc/modules -I../../platforms/nuttx/src/px4/stm/stm32f7/include -I../../platforms/common/include -I../../src -I../../src/include -I../../src/lib -I../../src/lib/DriverFramework/framework/include -I../../src/lib/matrix -I../../src/modules -I../../src/platforms -INuttX/nuttx/arch/arm/src/armv7-m -INuttX/nuttx/arch/arm/src/chip -INuttX/nuttx/arch/arm/src/common -INuttX/apps/include -mcpu=cortex-m7 -mthumb -mfpu=fpv5-d16 -mfloat-abi=hard -Os -DNDEBUG -g -fdata-sections -ffunction-sections -fomit-frame-pointer -fmerge-all-constants -fno-signed-zeros -fno-trapping-math -freciprocal-math -fno-math-errno -fno-strict-aliasing -fvisibility=hidden -include visibility.h -Wall -Wextra -Werror -Warray-bounds -Wcast-align -Wdisabled-optimization -Wdouble-promotion -Wfatal-errors -Wfloat-equal -Wformat-security -Winit-self -Wlogical-op -Wpointer-arith -Wshadow -Wuninitialized -Wunknown-pragmas -Wunused-variable -Wno-missing-field-initializers -Wno-missing-include-dirs -Wno-unused-parameter -fdiagnostics-color=always -fno-builtin-printf -fno-strength-reduce -Wformat=1 -Wunused-but-set-variable -Wno-format-truncation -fcheck-new -fno-exceptions -fno-rtti -fno-threadsafe-statics -Wreorder -Wno-overloaded-virtual -nostdinc++ -std=gnu++11 -o msg/CMakeFiles/uorb_msgs.dir/topics_sources/uORBTopics.cpp.obj -c /home/langrind/Firmware/build/px4_fmu-v5_multicopter/msg/topics_sources/uORBTopics.cpp"
if not ccj_make.mkccj_process_line(parsedArgs, {}, [], bigString):
assert False
assert True
| 62.043478 | 1,789 | 0.737211 |
b15a35bd4f1abd5ba27c131e3166d2cc71012e7c | 748 | py | Python | Medium/valid-ip-addresses.py | SaumyaRai2010/algoexpert-data-structures-algorithms | bcafd8d7798661bf86c2d6234221d764c68fc19f | [
"MIT"
] | 152 | 2021-07-15T02:56:17.000Z | 2022-03-31T08:59:52.000Z | Medium/valid-ip-addresses.py | deepakgarg08/algoexpert-data-structures-algorithms | 2264802bce971e842c616b1eaf9238639d73915f | [
"MIT"
] | 2 | 2021-07-18T22:01:28.000Z | 2022-02-17T03:55:04.000Z | Medium/valid-ip-addresses.py | deepakgarg08/algoexpert-data-structures-algorithms | 2264802bce971e842c616b1eaf9238639d73915f | [
"MIT"
] | 74 | 2021-07-16T11:55:30.000Z | 2022-03-31T14:48:06.000Z |
# VALID IP ADDRESSES
# O(1) time and space
| 24.933333 | 95 | 0.620321 |
b15acd6c26c6ac380b78b3c4621e284328ee4d9a | 1,999 | py | Python | resnet152/configs.py | LiuHao-THU/frame2d | c2b923aa45bf2e523e281d1bc36c7f3e70f9fb2b | [
"Apache-2.0"
] | 1 | 2020-05-15T03:28:53.000Z | 2020-05-15T03:28:53.000Z | resnet152/configs.py | LiuHao-THU/frame2d | c2b923aa45bf2e523e281d1bc36c7f3e70f9fb2b | [
"Apache-2.0"
] | null | null | null | resnet152/configs.py | LiuHao-THU/frame2d | c2b923aa45bf2e523e281d1bc36c7f3e70f9fb2b | [
"Apache-2.0"
] | null | null | null | """
this .py file contains all the parameters
"""
import os
configs = {}
main_dir = 'frame_vessel/resnet152'
#****************************************read data parameters**************************************
configs['max_angle'] = 20
configs['root_dir'] = 'data'
configs['save_dir'] = 'saved_data'
configs['image_size'] = 224
configs['per'] = 0.9 #percentage splited from the raw data
configs['saved_npy'] = True
configs['imgs_train'] = 'imgs_train.npy'
configs['imgs_label'] = 'imgs_label.npy'
configs['imgs_train_test'] = 'imgs_train_test.npy'
configs['imgs_label_test'] = 'imgs_label_test.npy'
configs['model_path'] = 'frame_vessel/pretrain_model/resnet/resnet152.npy'
#**************************************argumentation parameters************************************
configs['raw_images'] = True
configs['horizontal_flip_num'] = False
configs['vertical_flip_num'] = False
configs['random_rotate_num'] = 1
configs['random_crop_num'] = 1
configs['center_crop_num'] = 0
configs['slide_crop_num'] = 0
configs['slide_crop_old_num'] = 0
#*************************************train parameters**********************************************
configs['image_size'] = 224
# configs['channel'] = 3
configs['channel'] = 3
configs["batch_size"] = 8
configs['epoch'] = 20
configs['final_layer_type'] = "softmax_sparse"
configs['learning_rate_orig'] = 1e-3
configs['checkpoint_dir'] = main_dir+ '/check_points'
configs['num_classes'] = 3
configs['VGG_MEAN'] = [1.030626238009759419e+02, 1.159028825738600261e+02, 1.231516308384586438e+02]
configs['_BATCH_NORM_DECAY'] = 0.997
configs['_BATCH_NORM_EPSILON'] = 1e-5
#************************************device parameters**********************************************
configs["num_gpus"] = 1
configs["dev"] = '/gpu:0' #'/cpu:0'
# configs["dev"] = '/cpu:0' #'/cpu:0'
configs['GPU'] = '0'
#************************************evaluate parameters********************************************
| 39.98 | 101 | 0.561281 |
b15c81d9f33f129ae3eb078cb489fe17c6a3fe71 | 2,707 | py | Python | src/packagedcode/windows.py | Siddhant-K-code/scancode-toolkit | d1e725d3603a8f96c25f7e3f7595c68999b92a67 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1,511 | 2015-07-01T15:29:03.000Z | 2022-03-30T13:40:05.000Z | src/packagedcode/windows.py | Siddhant-K-code/scancode-toolkit | d1e725d3603a8f96c25f7e3f7595c68999b92a67 | [
"Apache-2.0",
"CC-BY-4.0"
] | 2,695 | 2015-07-01T16:01:35.000Z | 2022-03-31T19:17:44.000Z | src/packagedcode/windows.py | Siddhant-K-code/scancode-toolkit | d1e725d3603a8f96c25f7e3f7595c68999b92a67 | [
"Apache-2.0",
"CC-BY-4.0"
] | 540 | 2015-07-01T15:08:19.000Z | 2022-03-31T12:13:11.000Z | #
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import attr
import xmltodict
from packagedcode import models
from commoncode import filetype
# Tracing flags
TRACE = False
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
| 27.622449 | 90 | 0.615441 |
b15cd11eeded0e97332a28f0cc409f651b2843ff | 988 | py | Python | day-21/main.py | jmolinski/advent-of-code-2018 | 96bad97d6523bc99d63c86bbff6b13602952a91d | [
"MIT"
] | 2 | 2018-12-16T20:48:52.000Z | 2021-03-28T15:07:51.000Z | day-21/main.py | jmolinski/advent-of-code-2018 | 96bad97d6523bc99d63c86bbff6b13602952a91d | [
"MIT"
] | null | null | null | day-21/main.py | jmolinski/advent-of-code-2018 | 96bad97d6523bc99d63c86bbff6b13602952a91d | [
"MIT"
] | 1 | 2018-12-02T13:36:24.000Z | 2018-12-02T13:36:24.000Z | # decompiled-by-hand & optimized
# definitely not gonna refactor this one
# 0.18s on pypy3
ip_reg = 4
reg = [0, 0, 0, 0, 0, 0]
i = 0
seen = set()
lst = []
while True:
i += 1
break_true = False
while True:
if break_true:
if i == 1:
print("1)", reg[1])
if reg[1] in seen:
if len(lst) == 25000:
p2 = max(seen, key=lambda x: lst.index(x))
print("2)", p2)
exit()
seen.add(reg[1])
lst.append(reg[1])
break
reg[2] = reg[1] | 65536 # 6
reg[1] = 8725355 # 7
while True:
reg[5] = reg[2] & 255 # 8
reg[1] += reg[5] # 9
reg[1] &= 16777215 # 10
reg[1] *= 65899 # 11
reg[1] &= 16777215 # 12
reg[2] = reg[2] // 256
if reg[2] == 0:
break_true = True
break
break_true = False
| 22.976744 | 62 | 0.403846 |
b15ea12d5029680389c91718e2950c1e519b15d4 | 1,247 | py | Python | website/canvas/funnels.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 61 | 2015-11-10T17:13:46.000Z | 2021-08-06T17:58:30.000Z | website/canvas/funnels.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 13 | 2015-11-11T07:49:41.000Z | 2021-06-09T03:45:31.000Z | website/canvas/funnels.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 18 | 2015-11-11T04:50:04.000Z | 2021-08-20T00:57:11.000Z | from django.conf import settings
from canvas.metrics import Metrics
def _setup_funnels():
by_name = {}
for name, steps in Funnels.names:
funnel = Funnel(name, steps)
setattr(Funnels, name, funnel)
by_name[name] = funnel
Funnels.by_name = by_name
_setup_funnels()
| 25.979167 | 57 | 0.565357 |
b1612586b6458c702c53a9e35ab3d78b199a5137 | 3,948 | py | Python | hopfield.py | mstruijs/neural-demos | 2be157bbac4b42c008190745bb3ee75a278d7e34 | [
"MIT"
] | null | null | null | hopfield.py | mstruijs/neural-demos | 2be157bbac4b42c008190745bb3ee75a278d7e34 | [
"MIT"
] | null | null | null | hopfield.py | mstruijs/neural-demos | 2be157bbac4b42c008190745bb3ee75a278d7e34 | [
"MIT"
] | null | null | null | import numpy as np
from neupy import algorithms,plots
import matplotlib.pyplot as plt
from neupy.utils import format_data
from neupy.algorithms.memory.utils import bin2sign,step_function
import argparse
dhnet = algorithms.DiscreteHopfieldNetwork(mode='async', check_limit=False)
iteration = 0
output_data = None
n_features = 0
def ascii_visualise(bin_vector, m=10,n=10):
'''
Basic visualisation for debug purposes: print binary vector as m x n matrix
'''
for row in bin_vector.reshape((n,m)).tolist():
print(' '.join('.X'[val] for val in row))
def read_data(filename):
'''
Read the training/test data from file and return it in a list of matrices.
'''
res = [];
m = [];
rf = open(filename, 'r')
for line in rf.readlines():
if len(line) == 1:#empty line
res.append(np.matrix(m))
m = [];
continue
for char in line.strip():
m.append(1 if char=='X' else 0)
res.append(np.matrix(m))
rf.close()
return res
def train(data):
'''
Train the network with the supplied data
'''
dhnet.train(np.concatenate(data, axis = 0))
def run(input, iterations=None, show=False):
'''
Run the trained network with the given input, for the specified number of iterations.
Print the the result if `show`
'''
result = dhnet.predict(input, iterations)
if show:
ascii_visualise(result)
print()
return result
def show_weights():
'''
Plot the weight matrix in a Hinton diagram
'''
plt.figure(figsize=(14,12))
plt.title("Hinton diagram (weights)")
plots.hinton(dhnet.weight)
plt.show()
def initialise_run(input_data):
'''
Prepare a controlled iteration on a trained network for the given input
'''
global iteration,dhnet,output_data,n_features
iteration = 0
dhnet.discrete_validation(input_data)
input_data = format_data(bin2sign(input_data), is_feature1d=False)
_, n_features = input_data.shape
output_data = input_data
def step(step_size=1, show=False):
'''
Execute `step_size` asynchronous update steps on the initialised network.
Print the result if `show`.
'''
global iteration,dhnet,output_data,n_features
for _ in range(step_size):
iteration+=1
position = np.random.randint(0, n_features - 1)
raw_new_value = output_data.dot(dhnet.weight[:, position])
output_data[:, position] = np.sign(raw_new_value)
result = step_function(output_data).astype(int)
if show:
print("--Iteration " + str(iteration) + ":")
ascii_visualise(result)
return result
def is_stable():
'''
Return True iff the initialised network has reached a stable output
'''
global dhnet,output_data,n_features,iteration
for position in range(0,n_features-1):
raw_new_value = output_data.dot(dhnet.weight[:, position])
if np.sign(raw_new_value) != output_data[0][position]:
return False
return True
def run_to_convergence(input_data, show_list=[], show_all=True):
'''
Runs a trained network on `input_data` until it converges to a stable output.
Print the intermediate output at all positions in `show_list`.
'''
initialise_run(input_data)
i=0
result = None
while not(is_stable()):
i+=1
result=step(show=(i in show_list or show_all))
return result
if __name__ == "__main__":
args = get_args()
training_data = read_data(str(args['train']))
train(training_data)
test_data = read_data(str(args['test']))
print('--Start')
ascii_visualise(test_data[0])
step_run = False
if step_run:
initialise_run(test_data[0])
for i in range(1,300,5):
print("--Iteration " + str(i) + ":")
step(step_size=5,show=True)
if is_stable():
break
else:
res = run_to_convergence(test_data[0],[62,144,232,379])
print("--Iteration " + str(iteration) + ":")
ascii_visualise(res)
print('--End')
| 27.608392 | 87 | 0.719352 |
b161578913391598cf5bd530a5ec301a0546f6e8 | 686 | py | Python | client/fig_client.py | haihala/fig | 426e2ee218c8a55e6389ace497a7f365425daae1 | [
"MIT"
] | null | null | null | client/fig_client.py | haihala/fig | 426e2ee218c8a55e6389ace497a7f365425daae1 | [
"MIT"
] | null | null | null | client/fig_client.py | haihala/fig | 426e2ee218c8a55e6389ace497a7f365425daae1 | [
"MIT"
] | null | null | null | from init import conf_parse, socket_init
from sock_ops import pull_sync, push_sync
from fs_ops import construct_tree, differences
from debug import debug_print
import time
if __name__ == "__main__":
main() | 21.4375 | 46 | 0.740525 |
b161de6456d6f8b14c33e69247fe9c0fa8b2fa93 | 23,850 | py | Python | TicTacToe2.py | tlively/N-TicTacToe | db1143e2e94012451ba590952670452431814b7b | [
"MIT"
] | 6 | 2017-10-03T13:37:54.000Z | 2020-12-21T07:34:01.000Z | TicTacToe2.py | tlively/N-TicTacToe | db1143e2e94012451ba590952670452431814b7b | [
"MIT"
] | null | null | null | TicTacToe2.py | tlively/N-TicTacToe | db1143e2e94012451ba590952670452431814b7b | [
"MIT"
] | 4 | 2017-07-04T18:53:52.000Z | 2021-03-24T03:15:07.000Z | # N-Dimensional Tic-Tac-Toe by Thomas Lively
from __future__ import division
import curses, curses.ascii, sys
# logical representation of the n-dimensional board as a single list
# A view for the model. Other views might use Curses or a graphics library
# serves as a "Main" class and controls user interface with model and view
# run the game if run as a script
if __name__ == u"__main__":
#TextGameController()
args = [int(i) for i in sys.argv[1:]]
if args:
CursesController(Model(*args))
else:
CursesController(Model(4))
| 38.405797 | 94 | 0.512704 |
b161fd74a00848098e638db57b29a16c1340bf14 | 854 | py | Python | platform-tools/systrace/catapult/devil/devil/utils/run_tests_helper.py | NBPS-Robotics/FTC-Code-Team-9987---2022 | 180538f3ebd234635fa88f96ae7cf7441df6a246 | [
"MIT"
] | 1,894 | 2015-04-17T18:29:53.000Z | 2022-03-28T22:41:06.000Z | platform-tools/systrace/catapult/devil/devil/utils/run_tests_helper.py | NBPS-Robotics/FTC-Code-Team-9987---2022 | 180538f3ebd234635fa88f96ae7cf7441df6a246 | [
"MIT"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | platform-tools/systrace/catapult/devil/devil/utils/run_tests_helper.py | NBPS-Robotics/FTC-Code-Team-9987---2022 | 180538f3ebd234635fa88f96ae7cf7441df6a246 | [
"MIT"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions common to native, java and host-driven test runners."""
import collections
import logging
from devil.utils import logging_common
CustomFormatter = logging_common.CustomFormatter
_WrappedLoggingArgs = collections.namedtuple('_WrappedLoggingArgs',
['verbose', 'quiet'])
def SetLogLevel(verbose_count, add_handler=True):
"""Sets log level as |verbose_count|.
Args:
verbose_count: Verbosity level.
add_handler: If true, adds a handler with |CustomFormatter|.
"""
logging_common.InitializeLogging(
_WrappedLoggingArgs(verbose_count, 0),
handler=None if add_handler else logging.NullHandler())
| 31.62963 | 75 | 0.725995 |
b1623f67cebbb4df1eda133e8176caaaf6a0be46 | 4,819 | py | Python | src/classical_ml/pca.py | Jagriti-dixit/CS229_Project_Final | 16fdb55086411dee17153e88b2499c378cdfc096 | [
"MIT"
] | null | null | null | src/classical_ml/pca.py | Jagriti-dixit/CS229_Project_Final | 16fdb55086411dee17153e88b2499c378cdfc096 | [
"MIT"
] | null | null | null | src/classical_ml/pca.py | Jagriti-dixit/CS229_Project_Final | 16fdb55086411dee17153e88b2499c378cdfc096 | [
"MIT"
] | null | null | null | import sys
import time
from comet_ml import Experiment
import pydub
import numpy as np
from pydub import AudioSegment
import librosa
import librosa.display
import matplotlib.pyplot as plt
import sklearn
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
import pandas as pd
from pathlib import Path
import math,random
import zipfile as zf
import soundfile as sf
import pandas as pd
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFE
import json
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score, classification_report, precision_score, recall_score
from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve, auc, log_loss
from sklearn.datasets import make_classification
from sklearn.metrics import plot_confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
import getSamples as gs
from sklearn.metrics import precision_score, \
recall_score, confusion_matrix, classification_report, \
accuracy_score, f1_score
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import seaborn as sns
train_file = sys.argv[1]
test_file = sys.argv[2]
print("Reading train and test dataset")
#train = pd.read_csv('train_data_noise_pad.csv')
train = pd.read_csv(train_file)
print("read train data")
#test = pd.read_csv('test_data_noise_pad.csv')
test = pd.read_csv(test_file)
print("read test data")
print("Read two big files ")
X_train = train.iloc[:,:2040]
y_train = train.iloc[:,2041]
X_test = test.iloc[:,:2040]
y_test = test.iloc[:,2041]
# X_train = train.iloc[:,:20]
# y_train = train.iloc[:,21]
# X_test = test.iloc[:,:20]
# y_test = test.iloc[:,21]
X_train = StandardScaler(with_mean=True).fit_transform(X_train)
X_test = StandardScaler(with_mean=True).fit_transform(X_test)
print("Mean of train data is ",np.mean(X_train),"Std deviation is",np.std(X_train))
pca = PCA(n_components = 'mle')
pca = PCA().fit(X_train)
print('Explained variation per principal component:{}'.format((pca.explained_variance_ratio_)))
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('Cumulative explained variance')
plt.savefig("cumulative_variance_plot.png")
time_start = time.time()
print("we want to see the accumulated variance of 700 features ")
pca = PCA(n_components = 700)
pca_result = pca.fit_transform(X_train)
pca_test = pca.transform(X_test)
X_train_pca = pca_result
X_test_pca = pca_test
out_train = "train_pca.csv"
pca_train = pd.DataFrame(data=X_train_pca)
pca_train['language'] = y_train
out_file_train = open(out_train,'wb')
pca_train.to_csv(out_file_train,index=False)
out_file_train.close()
out_test = "test_pca.csv"
pca_test = pd.DataFrame(data=X_test_pca)
pca_test['language'] = y_test
out_file_test = open(out_test,'wb')
pca_test.to_csv(out_file_test,index=False)
out_file_test.close()
time_start = time.time()
print("shapes are",X_train_pca.shape,y_train.shape)
print("X_train shape is ",X_train_pca.shape,"X_test shape is",X_test_pca.shape)
print("Total variation in these 1000 features is",np.sum(pca.explained_variance_ratio_))
print('PCA done! Time elapsed: {} seconds'.format(time.time()-time_start))
print("Now lets plot PCA for 2D visualisation")
##Taking only some of the total dataset randomly for plotting
np.random.seed(42)
rndperm = np.random.permutation(train.shape[0])
#2D plot(Having two components)
plt.figure(figsize=(16,10))
pca = PCA(n_components = 2)
pca_result = pca.fit_transform(X_train)
train['pca_one'] = pca_result[:,0]
train['pca_two'] = pca_result[:,1]
sns.scatterplot(
x="pca_one", y="pca_two",
hue="2041",
palette=sns.color_palette("hls", 3),
data=train.loc[rndperm,:],
legend="full",
alpha=0.3
)
plt.savefig("PCA_2d.png")
###PCA with 3 components
pca = PCA(n_components = 3)
pca_result = pca.fit_transform(X_train)
train['pca_one'] = pca_result[:,0]
train['pca_two'] = pca_result[:,1]
train['pca_three'] = pca_result[:,2]
print("Its processing 3d plot")
#3D plot(Having 3 components)
ax = plt.figure(figsize=(16,10)).gca(projection='3d')
ax.scatter(
xs=train.loc[rndperm,:]["pca_one"],
ys=train.loc[rndperm,:]["pca_two"],
zs=train.loc[rndperm,:]["pca_three"],
c=train.loc[rndperm,:]["2041"],
cmap='tab10'
)
ax.set_xlabel('pca_one')
ax.set_ylabel('pca_two')
ax.set_zlabel('pca_three')
plt.savefig("PCA_3d.png")
| 31.496732 | 97 | 0.775265 |
b16413494678ee579844f16a2bea9f231ef05803 | 1,601 | py | Python | API/application.py | XuhuaHuang/LearnPython | eb39f11147716193971dd5a8894e675daa1b9d01 | [
"MIT"
] | null | null | null | API/application.py | XuhuaHuang/LearnPython | eb39f11147716193971dd5a8894e675daa1b9d01 | [
"MIT"
] | null | null | null | API/application.py | XuhuaHuang/LearnPython | eb39f11147716193971dd5a8894e675daa1b9d01 | [
"MIT"
] | null | null | null | from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'
db = SQLAlchemy(app)
| 28.589286 | 85 | 0.66396 |
b165edb3b3722f2964b765aab8fe578c7cc4aee1 | 2,199 | py | Python | examples/mass-generation.py | Orange-OpenSource/tinypyki | 10c57fb3a4413f4c601baaf58e53d92fd4a09f49 | [
"BSD-3-Clause"
] | 1 | 2018-05-29T22:50:33.000Z | 2018-05-29T22:50:33.000Z | examples/mass-generation.py | Orange-OpenSource/tinypyki | 10c57fb3a4413f4c601baaf58e53d92fd4a09f49 | [
"BSD-3-Clause"
] | null | null | null | examples/mass-generation.py | Orange-OpenSource/tinypyki | 10c57fb3a4413f4c601baaf58e53d92fd4a09f49 | [
"BSD-3-Clause"
] | 2 | 2016-11-01T11:45:28.000Z | 2021-06-22T10:18:46.000Z | #!/usr/bin/env python
"""A third example to get started with tinypyki.
Toying with mass certificate generation.
"""
import os
import tinypyki as tiny
print("Creating a pki instance named \"mass-pki\"")
pki = tiny.PKI("mass-pki")
print("Create the \"root-ca\"")
root_ca = tiny.Node(nid = "root-ca", pathlen = 1, san="email=dev.null@hexample.com")
print("Create 10 sub nodes")
targets = [tiny.Node(nid = "target-{0}".format(i), issuer = "root-ca", ntype="u", san="ip=192.168.0.{0}, dns=hexample.com".format((175+i)%256)) for i in range(10)]
print("Insert the root-ca then all nodes in the pki")
tiny.do.insert(root_ca, pki)
for node in targets:
tiny.change.subj(node, cn=node.nid + "-dummy-hexample")
tiny.do.insert(node, pki)
print("Create everything, including p12 bundles")
tiny.do.everything(pki, pkcs12 = True)
print("Observe the pki changes")
tiny.show(pki)
# Uncomment this if you wish to see the contents of all the files
# print("Showing the contents of all files")
# for node in pki.nodes.values():
# tiny.show(node.key_path)
# tiny.show(node.csr_path)
# tiny.show(node.cert_path)
# tiny.show(node.crl_path)
print("Revoking every other certificate")
for node in pki.nodes.values():
if node.nid.startswith("target"):
if not int(node.nid.split("-")[-1])%2:
# Valid reasons: "unspecified", "keycompromise", "cacompromise", "affiliationchanged", "superseded", "cessationofoperation", "certificatehold", "removefromcrl"
tiny.do.revoke(node, reason="keycompromise")
print("Observe the crl changes of the root-ca")
tiny.show(pki.nodes["root-ca"].crl_path)
print("Create the verification environment")
tiny.do.verifyenv(pki, create=True)
print("Verify every file related to root-ca")
tiny.do.verify(pki.nodes["root-ca"])
# You can verify specific elements, by specifying "key", "csr", "cert", "crl" or "pkcs12"
# tiny.do.verify(pki.nodes["root-ca"], "key")
# You can verify the whole pki as follows
# tiny.do.verify_all(pki)
print("Destroy the verification environment")
tiny.do.verifyenv(pki, create=False)
# Uncomment this if you wish to delete the files
# print("Cleaning up the work direcotry")
# tiny.do.clean(pki)
| 32.820896 | 171 | 0.703502 |
b1667d176dd7399e7e7f6c6217ae79f8d38f3cee | 638 | py | Python | passive_capture/reporter/admin.py | Sillson/passive_capture_py | 167d08865400571c9eed60c0040cf67d27fa11b4 | [
"MIT"
] | null | null | null | passive_capture/reporter/admin.py | Sillson/passive_capture_py | 167d08865400571c9eed60c0040cf67d27fa11b4 | [
"MIT"
] | null | null | null | passive_capture/reporter/admin.py | Sillson/passive_capture_py | 167d08865400571c9eed60c0040cf67d27fa11b4 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.gis import admin as geo_model_admin
from leaflet.admin import LeafletGeoAdmin
from .models import Forecasts, Dam, Species
# Forecast Model
admin.site.register(Forecasts, ForecastsAdmin)
# Species Model
admin.site.register(Species, SpeciesAdmin)
# Dam Model - requires GeoAdmin privelages
admin.site.register(Dam, DamAdmin)
| 26.583333 | 55 | 0.782132 |