hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
4ad0334044a6b76510a6250d8488d1fea4817857
326
py
Python
lista01/rpc/ex01_cl.py
SD-CC-UFG/leonardo.fleury
0a8dfc5752c739f5ff98890477355df8960ad730
[ "MIT" ]
null
null
null
lista01/rpc/ex01_cl.py
SD-CC-UFG/leonardo.fleury
0a8dfc5752c739f5ff98890477355df8960ad730
[ "MIT" ]
null
null
null
lista01/rpc/ex01_cl.py
SD-CC-UFG/leonardo.fleury
0a8dfc5752c739f5ff98890477355df8960ad730
[ "MIT" ]
null
null
null
import xmlrpc.client if __name__ == '__main__': main()
20.375
68
0.628834
4ad04912e975ba67417ff28c203441d4697e2178
846
py
Python
autocomplete/migrations/0001_initial.py
openshift-eng/art-dashboard-server
af4e78b3d2213c30038cf69de646f25fd57c9e3c
[ "Apache-2.0" ]
1
2020-09-21T06:48:47.000Z
2020-09-21T06:48:47.000Z
autocomplete/migrations/0001_initial.py
adarshtri/build_interface_server
af4e78b3d2213c30038cf69de646f25fd57c9e3c
[ "Apache-2.0" ]
5
2021-02-05T19:43:08.000Z
2021-06-04T23:23:29.000Z
autocomplete/migrations/0001_initial.py
openshift-eng/art-dashboard-server
af4e78b3d2213c30038cf69de646f25fd57c9e3c
[ "Apache-2.0" ]
6
2021-02-06T07:21:37.000Z
2021-06-07T12:40:37.000Z
# Generated by Django 3.0.7 on 2020-07-27 19:23 import build.models from django.db import migrations, models
29.172414
100
0.588652
4ad19826ee08450eee4ee8d57542ce3dfd0b5399
636
py
Python
unet3d/config.py
fcollman/pytorch-3dunet
303336bfdc0234f075c70e0c59759d09bc4081b8
[ "MIT" ]
null
null
null
unet3d/config.py
fcollman/pytorch-3dunet
303336bfdc0234f075c70e0c59759d09bc4081b8
[ "MIT" ]
null
null
null
unet3d/config.py
fcollman/pytorch-3dunet
303336bfdc0234f075c70e0c59759d09bc4081b8
[ "MIT" ]
null
null
null
import argparse import os import torch import yaml DEFAULT_DEVICE = 'cuda:0'
26.5
97
0.720126
4ad2b9e71e54721776c8640bd3dfe9980a8f4ea4
654
py
Python
src/graph_transpiler/webdnn/backend/webgl/optimize_rules/simplify_channel_mode_conversion/simplify_channel_mode_conversion.py
gunpowder78/webdnn
c659ea49007f91d178ce422a1eebe289516a71ee
[ "MIT" ]
1
2018-07-26T13:52:21.000Z
2018-07-26T13:52:21.000Z
src/graph_transpiler/webdnn/backend/webgl/optimize_rules/simplify_channel_mode_conversion/simplify_channel_mode_conversion.py
gunpowder78/webdnn
c659ea49007f91d178ce422a1eebe289516a71ee
[ "MIT" ]
null
null
null
src/graph_transpiler/webdnn/backend/webgl/optimize_rules/simplify_channel_mode_conversion/simplify_channel_mode_conversion.py
gunpowder78/webdnn
c659ea49007f91d178ce422a1eebe289516a71ee
[ "MIT" ]
null
null
null
from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_nonsense_channel_mode_conversion import \ SimplifyNonsenseChannelModeConversion from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_redundant_channel_mode_conversion import \ SimplifyRedundantChannelModeConversion from webdnn.graph.optimize_rule import OptimizeRuleGroup
46.714286
125
0.831804
4ad2c65a15fe6f6a8837baee7e607c55330b95b9
3,998
py
Python
script.video.F4mProxy/lib/flvlib/constants.py
akuala/REPO.KUALA
ea9a157025530d2ce8fa0d88431c46c5352e89d4
[ "Apache-2.0" ]
105
2015-11-28T00:03:11.000Z
2021-05-05T20:47:42.000Z
script.video.F4mProxy/lib/flvlib/constants.py
akuala/REPO.KUALA
ea9a157025530d2ce8fa0d88431c46c5352e89d4
[ "Apache-2.0" ]
918
2015-11-28T14:12:40.000Z
2022-03-23T20:24:49.000Z
script.video.F4mProxy/lib/flvlib/constants.py
akuala/REPO.KUALA
ea9a157025530d2ce8fa0d88431c46c5352e89d4
[ "Apache-2.0" ]
111
2015-12-01T14:06:10.000Z
2020-08-01T10:44:39.000Z
""" The constants used in FLV files and their meanings. """ # Tag type (TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT) = (8, 9, 18) # Sound format (SOUND_FORMAT_PCM_PLATFORM_ENDIAN, SOUND_FORMAT_ADPCM, SOUND_FORMAT_MP3, SOUND_FORMAT_PCM_LITTLE_ENDIAN, SOUND_FORMAT_NELLYMOSER_16KHZ, SOUND_FORMAT_NELLYMOSER_8KHZ, SOUND_FORMAT_NELLYMOSER, SOUND_FORMAT_G711_A_LAW, SOUND_FORMAT_G711_MU_LAW) = range(9) (SOUND_FORMAT_AAC, SOUND_FORMAT_SPEEX) = range(10, 12) (SOUND_FORMAT_MP3_8KHZ, SOUND_FORMAT_DEVICE_SPECIFIC) = range(14, 16) sound_format_to_string = { SOUND_FORMAT_PCM_PLATFORM_ENDIAN: "Linear PCM, platform endian", SOUND_FORMAT_ADPCM: "ADPCM", SOUND_FORMAT_MP3: "MP3", SOUND_FORMAT_PCM_LITTLE_ENDIAN: "Linear PCM, little endian", SOUND_FORMAT_NELLYMOSER_16KHZ: "Nellymoser 16-kHz mono", SOUND_FORMAT_NELLYMOSER_8KHZ: "Nellymoser 8-kHz mono", SOUND_FORMAT_NELLYMOSER: "Nellymoser", SOUND_FORMAT_G711_A_LAW: "G.711 A-law logarithmic PCM", SOUND_FORMAT_G711_MU_LAW: "G.711 mu-law logarithmic PCM", SOUND_FORMAT_AAC: "AAC", SOUND_FORMAT_SPEEX: "Speex", SOUND_FORMAT_MP3_8KHZ: "MP3 8-kHz", SOUND_FORMAT_DEVICE_SPECIFIC: "Device-specific sound" } # Sound rate (SOUND_RATE_5_5_KHZ, SOUND_RATE_11_KHZ, SOUND_RATE_22_KHZ, SOUND_RATE_44_KHZ) = range(4) sound_rate_to_string = { SOUND_RATE_5_5_KHZ: "5.5-kHz", SOUND_RATE_11_KHZ: "11-kHz", SOUND_RATE_22_KHZ: "22-kHz", SOUND_RATE_44_KHZ: "44-kHz" } # Sound size (SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT) = range(2) sound_size_to_string = { SOUND_SIZE_8_BIT: "snd8Bit", SOUND_SIZE_16_BIT: "snd16Bit" } # Sound type (SOUND_TYPE_MONO, SOUND_TYPE_STEREO) = range(2) sound_type_to_string = { SOUND_TYPE_MONO: "sndMono", SOUND_TYPE_STEREO: "sndStereo" } # AAC packet type (AAC_PACKET_TYPE_SEQUENCE_HEADER, AAC_PACKET_TYPE_RAW) = range(2) aac_packet_type_to_string = { AAC_PACKET_TYPE_SEQUENCE_HEADER: "sequence header", AAC_PACKET_TYPE_RAW: "raw" } # Codec ID (CODEC_ID_JPEG, CODEC_ID_H263, CODEC_ID_SCREEN_VIDEO, CODEC_ID_VP6, CODEC_ID_VP6_WITH_ALPHA, CODEC_ID_SCREEN_VIDEO_V2, CODEC_ID_H264) = range(1, 8) codec_id_to_string = { CODEC_ID_JPEG: "JPEG", CODEC_ID_H263: "Sorenson H.263", CODEC_ID_SCREEN_VIDEO: "Screen video", CODEC_ID_VP6: "On2 VP6", CODEC_ID_VP6_WITH_ALPHA: "On2 VP6 with alpha channel", CODEC_ID_SCREEN_VIDEO_V2: "Screen video version 2", CODEC_ID_H264: "H.264" } # Frame type (FRAME_TYPE_KEYFRAME, FRAME_TYPE_INTERFRAME, FRAME_TYPE_DISPOSABLE_INTERFRAME, FRAME_TYPE_GENERATED_KEYFRAME, FRAME_TYPE_INFO_FRAME) = range(1, 6) frame_type_to_string = { FRAME_TYPE_KEYFRAME: "keyframe", FRAME_TYPE_INTERFRAME: "interframe", FRAME_TYPE_DISPOSABLE_INTERFRAME: "disposable interframe", FRAME_TYPE_GENERATED_KEYFRAME: "generated keyframe", FRAME_TYPE_INFO_FRAME: "video info/command frame" } # H.264 packet type (H264_PACKET_TYPE_SEQUENCE_HEADER, H264_PACKET_TYPE_NALU, H264_PACKET_TYPE_END_OF_SEQUENCE) = range(3) h264_packet_type_to_string = { H264_PACKET_TYPE_SEQUENCE_HEADER: "sequence header", H264_PACKET_TYPE_NALU: "NAL unit", H264_PACKET_TYPE_END_OF_SEQUENCE: "sequence end" } # Value type (VALUE_TYPE_NUMBER, VALUE_TYPE_BOOLEAN, VALUE_TYPE_STRING, VALUE_TYPE_OBJECT, VALUE_TYPE_MOVIECLIP, VALUE_TYPE_NULL, VALUE_TYPE_UNDEFINED, VALUE_TYPE_REFERENCE, VALUE_TYPE_ECMA_ARRAY) = range(9) (VALUE_TYPE_STRICT_ARRAY, VALUE_TYPE_DATE, VALUE_TYPE_LONGSTRING) = range(10, 13) value_type_to_string = { VALUE_TYPE_NUMBER: 'Number', VALUE_TYPE_BOOLEAN: 'Boolean', VALUE_TYPE_STRING: 'String', VALUE_TYPE_OBJECT: 'Object', VALUE_TYPE_MOVIECLIP: 'MovieClip', VALUE_TYPE_NULL: 'Null', VALUE_TYPE_UNDEFINED: 'Undefined', VALUE_TYPE_REFERENCE: 'Reference', VALUE_TYPE_ECMA_ARRAY: 'ECMA Array', VALUE_TYPE_STRICT_ARRAY: 'Strict Array', VALUE_TYPE_DATE: 'Date', VALUE_TYPE_LONGSTRING: 'Longstring' }
24.679012
68
0.765883
4ad2ee44fa3231c3be7b4de5ecea4010665c6467
738
py
Python
A2/semcor_chunk.py
Rogerwlk/Natural-Language-Processing
e1c0499180cec49ac0060aad7f0da00b61cfac94
[ "MIT" ]
null
null
null
A2/semcor_chunk.py
Rogerwlk/Natural-Language-Processing
e1c0499180cec49ac0060aad7f0da00b61cfac94
[ "MIT" ]
null
null
null
A2/semcor_chunk.py
Rogerwlk/Natural-Language-Processing
e1c0499180cec49ac0060aad7f0da00b61cfac94
[ "MIT" ]
null
null
null
from nltk.corpus import semcor # if __name__ == "__main__": # s = semcor.tagged_sents(tag='sem')[0] # for chunk in s: # a = semcor_chunk(chunk) # print a.get_syn_set() # for chunk in s: # a = semcor_chunk(chunk) # print a.get_words()
19.945946
58
0.682927
4ad2ef7203bc120919170c5085d9fe1547885b6b
8,318
py
Python
gnn_model.py
thoang3/graph_neural_network_benchmark
72dc031ed23c6684c43d6f2ace03425f9b69cee6
[ "MIT" ]
null
null
null
gnn_model.py
thoang3/graph_neural_network_benchmark
72dc031ed23c6684c43d6f2ace03425f9b69cee6
[ "MIT" ]
null
null
null
gnn_model.py
thoang3/graph_neural_network_benchmark
72dc031ed23c6684c43d6f2ace03425f9b69cee6
[ "MIT" ]
null
null
null
import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from load_cora import load_cora from baseline_model import create_ffn from utils import run_experiment from utils import display_learning_curves # Graph convolution layer if __name__ == '__main__': papers, train_data, test_data, paper_idx, class_idx, citations, feature_names = load_cora(verbose=1) num_features = len(feature_names) num_classes = len(class_idx) hidden_units = [32, 32] learning_rate = 0.01 dropout_rate = 0.5 epochs = 300 batch_size = 256 # Create an edges array (sparse adjacency matrix) of shape [2, num_edges] edges = citations[["source", "target"]].to_numpy().T #print(edges) # Create an edge weights array of ones (default weights) edge_weights = tf.ones(shape=edges.shape[1]) # Create a node features array of shape [num_nodes, num_features] node_features = tf.cast( papers.sort_values("paper_id")[feature_names].to_numpy(), dtype=tf.float32) # Create graph info tuple with node_features, edges, and edge_weights graph_info = (node_features, edges, edge_weights) print("Edges shape: ", edges.shape) print("Nodes shape: ", node_features.shape) gnn_model = GNNNodeClassifier( graph_info=graph_info, num_classes=num_classes, hidden_units=hidden_units, dropout_rate=dropout_rate, name="gnn_model" ) print("GNN output shape: ", gnn_model([1, 10, 100])) gnn_model.summary() # Train the GNN model X_train = train_data.paper_id.to_numpy() y_train = train_data.subject history = run_experiment(gnn_model, X_train, y_train, batch_size, epochs, learning_rate) # Plot the learning curves display_learning_curves(history, figure_name="gnn.png") # Evaluate on test data X_test = test_data.paper_id.to_numpy() y_test = test_data.subject _, test_accuracy = gnn_model.evaluate(x=X_test, y=y_test, verbose=1) print(f"Test accuracy: {round(test_accuracy * 100, 2)}%")
33.007937
103
0.709425
4ad31bb3fb3f281f7ca24b5d13a95985f1d2e610
868
py
Python
deps/lib/python3.5/site-packages/netdisco/discoverables/samsung_tv.py
jfarmer08/hassio
792a6071a97bb33857c14c9937946233c620035c
[ "MIT" ]
78
2017-08-19T03:46:13.000Z
2020-02-19T04:29:45.000Z
deps/lib/python3.5/site-packages/netdisco/discoverables/samsung_tv.py
jfarmer08/hassio
792a6071a97bb33857c14c9937946233c620035c
[ "MIT" ]
5
2017-08-21T16:33:08.000Z
2018-06-21T18:37:18.000Z
deps/lib/python3.5/site-packages/netdisco/discoverables/samsung_tv.py
jfarmer08/hassio
792a6071a97bb33857c14c9937946233c620035c
[ "MIT" ]
13
2017-08-19T16:46:08.000Z
2018-11-05T23:11:34.000Z
"""Discover Samsung Smart TV services.""" from . import SSDPDiscoverable from ..const import ATTR_NAME # For some models, Samsung forces a [TV] prefix to the user-specified name. FORCED_NAME_PREFIX = '[TV]'
33.384615
79
0.691244
4ad35edb76ff8aacbd63002439bbf9d2f5995fd2
59
py
Python
pyecsca/sca/re/__init__.py
scrambler-crypto/pyecsca
491abfb548455669abd470382a48dcd07b2eda87
[ "MIT" ]
24
2019-07-01T00:27:24.000Z
2022-02-17T00:46:28.000Z
pyecsca/sca/re/__init__.py
scrambler-crypto/pyecsca
491abfb548455669abd470382a48dcd07b2eda87
[ "MIT" ]
18
2020-12-10T15:08:56.000Z
2022-03-01T11:44:37.000Z
pyecsca/sca/re/__init__.py
scrambler-crypto/pyecsca
491abfb548455669abd470382a48dcd07b2eda87
[ "MIT" ]
7
2020-02-20T18:44:29.000Z
2021-11-30T21:16:44.000Z
"""Package for reverse-engineering.""" from .rpa import *
14.75
38
0.694915
4ad38b4a5080c2f9ece1062934512164a3b8e38a
324
py
Python
sapmi/employees/migrations/0002_remove_employee_phone_alt.py
Juhanostby/django-apotek-sapmi
972a05ca9d54eed62b640572fcf582cc8751d15a
[ "MIT" ]
1
2021-09-04T17:29:14.000Z
2021-09-04T17:29:14.000Z
sapmi/employees/migrations/0002_remove_employee_phone_alt.py
Juhanostby/django-apotek-sapmi
972a05ca9d54eed62b640572fcf582cc8751d15a
[ "MIT" ]
1
2021-07-19T15:54:27.000Z
2021-07-20T23:01:57.000Z
sapmi/employees/migrations/0002_remove_employee_phone_alt.py
Juhanostby/django-apotek-sapmi
972a05ca9d54eed62b640572fcf582cc8751d15a
[ "MIT" ]
null
null
null
# Generated by Django 3.2.5 on 2021-12-21 19:42 from django.db import migrations
18
47
0.58642
4ad45250872794a6a29b08c6da2bcb27a740d5f5
5,098
py
Python
src/sim/basicExample/main.py
andremtsilva/dissertacao
7c039ffe871468be0215c482adb42830fff586aa
[ "MIT" ]
null
null
null
src/sim/basicExample/main.py
andremtsilva/dissertacao
7c039ffe871468be0215c482adb42830fff586aa
[ "MIT" ]
null
null
null
src/sim/basicExample/main.py
andremtsilva/dissertacao
7c039ffe871468be0215c482adb42830fff586aa
[ "MIT" ]
null
null
null
""" This is the most simple scenario with a basic topology, some users and a set of apps with only one service. @author: Isaac Lera """ import os import time import json import random import logging.config import networkx as nx import numpy as np from pathlib import Path from yafs.core import Sim from yafs.application import create_applications_from_json from yafs.topology import Topology from yafs.placement import JSONPlacement from yafs.path_routing import DeviceSpeedAwareRouting from yafs.distribution import deterministic_distribution from yafs.stats import Stats RANDOM_SEED = 1 if __name__ == '__main__': logging.config.fileConfig(os.getcwd() + '/logging.ini') nIterations = 1 # iteration for each experiment simulationDuration = 1000 # Iteration for each experiment changing the seed of randoms for iteration in range(nIterations): random.seed(iteration) logging.info("Running experiment it: - %i" % iteration) start_time = time.time() main(stop_time=simulationDuration, it=iteration) print("\n--- %s seconds ---" % (time.time() - start_time)) print("Simulation Done!") m = Stats(defaultPath="results/sim_trace") # print ("\tNetwork bytes transmitted:") # print (f"\t\t{m.bytes_transmitted():.1f}") # m.df_link.head(15) # from Stats class time_loops = [["M.USER.APP.0", "M.USER.APP.1", "M.USER.APP.2", "M.USER.APP.3"]] m.showResults2(10000, time_loops=time_loops) m.compute_times_df() print ("\t- Network saturation -") print() print ("\t\tAverage waiting messages : " f"{m.average_messages_not_transmitted()}") print() print ("\t\tPeak of waiting messages :" f"{m.peak_messages_not_transmitted()}") print() print(f"\t\tShow Loops: {m.showLoops(time_loops)}") print() print (f"\t\tTOTAL messages not transmitted:" f" {m.messages_not_transmitted()}") print() #print(m.df.head()) #print(m.df['time_latency']) #print(m.df_link.head()) print(m.get_df_modules())
27.857923
156
0.652217
4ad523fc14942dd490ad41c526c6171f60967ac3
476
py
Python
Backend/models/risklayerPrognosis.py
dbvis-ukon/coronavis
f00374ac655c9d68541183d28ede6fe5536581dc
[ "Apache-2.0" ]
15
2020-04-24T20:18:11.000Z
2022-01-31T21:05:05.000Z
Backend/models/risklayerPrognosis.py
dbvis-ukon/coronavis
f00374ac655c9d68541183d28ede6fe5536581dc
[ "Apache-2.0" ]
2
2021-05-19T07:15:09.000Z
2022-03-07T08:29:34.000Z
Backend/models/risklayerPrognosis.py
dbvis-ukon/coronavis
f00374ac655c9d68541183d28ede6fe5536581dc
[ "Apache-2.0" ]
4
2020-04-27T16:20:13.000Z
2021-02-23T10:39:42.000Z
from db import db # class RisklayerPrognosisSchema(SQLAlchemyAutoSchema): # class Meta: # strict = True # model = RisklayerPrognosis # # timestamp = fields.Timestamp(data_key="datenbestand") # prognosis = fields.Number(data_key="prognosis")
28
76
0.72479
4ad5abaadbbca74176e6ec4d71b60fea9789204e
2,520
py
Python
tests.py
smartfile/django-secureform
3b7a8b90550327f370ea02c6886220b2db0517b5
[ "MIT" ]
12
2015-02-23T19:45:45.000Z
2021-05-05T20:35:26.000Z
tests.py
smartfile/django-secureform
3b7a8b90550327f370ea02c6886220b2db0517b5
[ "MIT" ]
3
2015-08-09T18:14:16.000Z
2018-10-23T03:16:38.000Z
tests.py
smartfile/django-secureform
3b7a8b90550327f370ea02c6886220b2db0517b5
[ "MIT" ]
6
2015-05-09T07:46:00.000Z
2019-11-27T09:54:57.000Z
import os import unittest os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' import django if django.VERSION >= (1, 7): django.setup() from django import forms from django.db import models from django.forms.forms import NON_FIELD_ERRORS from django_secureform.forms import SecureForm if __name__ == '__main__': unittest.main()
28.965517
94
0.660714
4ad5badf5fa7e630a25fb87b42b8e063138bfecd
495
py
Python
opencv/resizing.py
hackerman-101/Hacktoberfest-2022
839f28293930987da55f8a2414efaa1cf9676cc9
[ "MIT" ]
1
2022-02-22T17:13:54.000Z
2022-02-22T17:13:54.000Z
opencv/resizing.py
hackerman-101/Hacktoberfest-2022
839f28293930987da55f8a2414efaa1cf9676cc9
[ "MIT" ]
11
2022-01-24T20:42:11.000Z
2022-02-27T23:58:24.000Z
opencv/resizing.py
hackerman-101/Hacktoberfest-2022
839f28293930987da55f8a2414efaa1cf9676cc9
[ "MIT" ]
null
null
null
import cv2 as cv import numpy as np cap = cv.VideoCapture(1) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) cap.set(3,3000) cap.set(4,3000) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) while (cap.isOpened()): ret , frame = cap.read() if (ret == True): cv.imshow("camVid", frame) if cv.waitKey(25) & 0xFF == ord('q'): break else: break cap.release() cv.destroyAllWindows()
18.333333
45
0.656566
4ad5ce0b4290abab4891890ac501c3156152672b
13,902
py
Python
minibenchmarks/go.py
kevinxucs/pyston
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
[ "Apache-2.0" ]
1
2020-02-06T14:28:45.000Z
2020-02-06T14:28:45.000Z
minibenchmarks/go.py
kevinxucs/pyston
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
[ "Apache-2.0" ]
null
null
null
minibenchmarks/go.py
kevinxucs/pyston
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
[ "Apache-2.0" ]
1
2020-02-06T14:29:00.000Z
2020-02-06T14:29:00.000Z
# from pypy-benchmarks/own/chaos.py, with some minor modifications # (more output, took out the benchmark harness) # import random, math, sys, time SIZE = 9 GAMES = 200 KOMI = 7.5 EMPTY, WHITE, BLACK = 0, 1, 2 SHOW = {EMPTY: '.', WHITE: 'o', BLACK: 'x'} PASS = -1 MAXMOVES = SIZE*SIZE*3 TIMESTAMP = 0 MOVES = 0 def user_move(board): while True: text = raw_input('?').strip() if text == 'p': return PASS if text == 'q': raise EOFError try: x, y = [int(i) for i in text.split()] except ValueError: continue if not (0 <= x < SIZE and 0 <= y < SIZE): continue pos = to_pos(x, y) if board.useful(pos): return pos if __name__ == "__main__": main(100)
31.310811
105
0.534743
4ad6cfb56509f081f06c889b6fbe45a5dd8ec0f3
24,265
py
Python
tools/gen_usb_descriptor.py
BrianPugh/circuitpython
f0bb9635bf311013e7b1ff69d1a0542575cf9d0a
[ "MIT", "Unlicense", "MIT-0", "BSD-3-Clause" ]
1
2020-08-29T12:06:14.000Z
2020-08-29T12:06:14.000Z
tools/gen_usb_descriptor.py
BrianPugh/circuitpython
f0bb9635bf311013e7b1ff69d1a0542575cf9d0a
[ "MIT", "Unlicense", "MIT-0", "BSD-3-Clause" ]
null
null
null
tools/gen_usb_descriptor.py
BrianPugh/circuitpython
f0bb9635bf311013e7b1ff69d1a0542575cf9d0a
[ "MIT", "Unlicense", "MIT-0", "BSD-3-Clause" ]
1
2021-01-18T00:52:39.000Z
2021-01-18T00:52:39.000Z
# SPDX-FileCopyrightText: 2014 MicroPython & CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors) # # SPDX-License-Identifier: MIT import argparse import os import sys sys.path.append("../../tools/usb_descriptor") from adafruit_usb_descriptor import audio, audio10, cdc, hid, midi, msc, standard, util import hid_report_descriptors DEFAULT_INTERFACE_NAME = 'CircuitPython' ALL_DEVICES='CDC,MSC,AUDIO,HID' ALL_DEVICES_SET=frozenset(ALL_DEVICES.split(',')) DEFAULT_DEVICES='CDC,MSC,AUDIO,HID' ALL_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,SYS_CONTROL,GAMEPAD,DIGITIZER,XAC_COMPATIBLE_GAMEPAD,RAW' ALL_HID_DEVICES_SET=frozenset(ALL_HID_DEVICES.split(',')) # Digitizer works on Linux but conflicts with mouse, so omit it. DEFAULT_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,GAMEPAD' parser = argparse.ArgumentParser(description='Generate USB descriptors.') parser.add_argument('--highspeed', default=False, action='store_true', help='descriptor for highspeed device') parser.add_argument('--manufacturer', type=str, help='manufacturer of the device') parser.add_argument('--product', type=str, help='product name of the device') parser.add_argument('--vid', type=lambda x: int(x, 16), help='vendor id') parser.add_argument('--pid', type=lambda x: int(x, 16), help='product id') parser.add_argument('--serial_number_length', type=int, default=32, help='length needed for the serial number in digits') parser.add_argument('--devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_DEVICES, help='devices to include in descriptor (AUDIO includes MIDI support)') parser.add_argument('--hid_devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_HID_DEVICES, help='HID devices to include in HID report descriptor') parser.add_argument('--interface_name', type=str, help='The name/prefix to use in the interface descriptions', default=DEFAULT_INTERFACE_NAME) parser.add_argument('--no-renumber_endpoints', dest='renumber_endpoints', action='store_false', help='use to not renumber endpoint') parser.add_argument('--cdc_ep_num_notification', type=int, default=0, help='endpoint number of CDC NOTIFICATION') parser.add_argument('--cdc_ep_num_data_out', type=int, default=0, help='endpoint number of CDC DATA OUT') parser.add_argument('--cdc_ep_num_data_in', type=int, default=0, help='endpoint number of CDC DATA IN') parser.add_argument('--msc_ep_num_out', type=int, default=0, help='endpoint number of MSC OUT') parser.add_argument('--msc_ep_num_in', type=int, default=0, help='endpoint number of MSC IN') parser.add_argument('--hid_ep_num_out', type=int, default=0, help='endpoint number of HID OUT') parser.add_argument('--hid_ep_num_in', type=int, default=0, help='endpoint number of HID IN') parser.add_argument('--midi_ep_num_out', type=int, default=0, help='endpoint number of MIDI OUT') parser.add_argument('--midi_ep_num_in', type=int, default=0, help='endpoint number of MIDI IN') parser.add_argument('--output_c_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) parser.add_argument('--output_h_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) args = parser.parse_args() unknown_devices = list(frozenset(args.devices) - ALL_DEVICES_SET) if unknown_devices: raise ValueError("Unknown device(s)", unknown_devices) unknown_hid_devices = list(frozenset(args.hid_devices) - ALL_HID_DEVICES_SET) if unknown_hid_devices: raise ValueError("Unknown HID devices(s)", unknown_hid_devices) if not args.renumber_endpoints: if 'CDC' in args.devices: if args.cdc_ep_num_notification == 0: raise ValueError("CDC notification endpoint number must not be 0") elif args.cdc_ep_num_data_out == 0: raise ValueError("CDC data OUT endpoint number must not be 0") elif args.cdc_ep_num_data_in == 0: raise ValueError("CDC data IN endpoint number must not be 0") if 'MSC' in args.devices: if args.msc_ep_num_out == 0: raise ValueError("MSC endpoint OUT number must not be 0") elif args.msc_ep_num_in == 0: raise ValueError("MSC endpoint IN number must not be 0") if 'HID' in args.devices: if args.args.hid_ep_num_out == 0: raise ValueError("HID endpoint OUT number must not be 0") elif args.hid_ep_num_in == 0: raise ValueError("HID endpoint IN number must not be 0") if 'AUDIO' in args.devices: if args.args.midi_ep_num_out == 0: raise ValueError("MIDI endpoint OUT number must not be 0") elif args.midi_ep_num_in == 0: raise ValueError("MIDI endpoint IN number must not be 0") # langid must be the 0th string descriptor LANGID_INDEX = StringIndex.index("\u0409", variable_name="language_id") assert LANGID_INDEX == 0 SERIAL_NUMBER_INDEX = StringIndex.index("S" * args.serial_number_length, variable_name="usb_serial_number") device = standard.DeviceDescriptor( description="top", idVendor=args.vid, idProduct=args.pid, iManufacturer=StringIndex.index(args.manufacturer), iProduct=StringIndex.index(args.product), iSerialNumber=SERIAL_NUMBER_INDEX) # Interface numbers are interface-set local and endpoints are interface local # until util.join_interfaces renumbers them. cdc_union = cdc.Union( description="CDC comm", bMasterInterface=0x00, # Adjust this after interfaces are renumbered. bSlaveInterface_list=[0x01]) # Adjust this after interfaces are renumbered. cdc_call_management = cdc.CallManagement( description="CDC comm", bmCapabilities=0x01, bDataInterface=0x01) # Adjust this after interfaces are renumbered. cdc_comm_interface = standard.InterfaceDescriptor( description="CDC comm", bInterfaceClass=cdc.CDC_CLASS_COMM, # Communications Device Class bInterfaceSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bInterfaceProtocol=cdc.CDC_PROTOCOL_NONE, iInterface=StringIndex.index("{} CDC control".format(args.interface_name)), subdescriptors=[ cdc.Header( description="CDC comm", bcdCDC=0x0110), cdc_call_management, cdc.AbstractControlManagement( description="CDC comm", bmCapabilities=0x02), cdc_union, standard.EndpointDescriptor( description="CDC comm in", bEndpointAddress=args.cdc_ep_num_notification | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, wMaxPacketSize=0x0040, bInterval=0x10) ]) cdc_data_interface = standard.InterfaceDescriptor( description="CDC data", bInterfaceClass=cdc.CDC_CLASS_DATA, iInterface=StringIndex.index("{} CDC data".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description="CDC data out", bEndpointAddress=args.cdc_ep_num_data_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor( description="CDC data in", bEndpointAddress=args.cdc_ep_num_data_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), ]) cdc_interfaces = [cdc_comm_interface, cdc_data_interface] msc_interfaces = [ standard.InterfaceDescriptor( description="MSC", bInterfaceClass=msc.MSC_CLASS, bInterfaceSubClass=msc.MSC_SUBCLASS_TRANSPARENT, bInterfaceProtocol=msc.MSC_PROTOCOL_BULK, iInterface=StringIndex.index("{} Mass Storage".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description="MSC in", bEndpointAddress=args.msc_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor( description="MSC out", bEndpointAddress=(args.msc_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT), bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), ] ) ] # When there's only one hid_device, it shouldn't have a report id. # Otherwise, report ids are assigned sequentially: # args.hid_devices[0] has report_id 1 # args.hid_devices[1] has report_id 2 # etc. report_ids = {} if len(args.hid_devices) == 1: name = args.hid_devices[0] combined_hid_report_descriptor = hid.ReportDescriptor( description=name, report_descriptor=bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](0))) report_ids[name] = 0 else: report_id = 1 concatenated_descriptors = bytearray() for name in args.hid_devices: concatenated_descriptors.extend( bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](report_id))) report_ids[name] = report_id report_id += 1 combined_hid_report_descriptor = hid.ReportDescriptor( description="MULTIDEVICE", report_descriptor=bytes(concatenated_descriptors)) # ASF4 expects keyboard and generic devices to have both in and out endpoints, # and will fail (possibly silently) if both are not supplied. hid_endpoint_in_descriptor = standard.EndpointDescriptor( description="HID in", bEndpointAddress=args.hid_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_endpoint_out_descriptor = standard.EndpointDescriptor( description="HID out", bEndpointAddress=args.hid_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_interfaces = [ standard.InterfaceDescriptor( description="HID Multiple Devices", bInterfaceClass=hid.HID_CLASS, bInterfaceSubClass=hid.HID_SUBCLASS_NOBOOT, bInterfaceProtocol=hid.HID_PROTOCOL_NONE, iInterface=StringIndex.index("{} HID".format(args.interface_name)), subdescriptors=[ hid.HIDDescriptor( description="HID", wDescriptorLength=len(bytes(combined_hid_report_descriptor))), hid_endpoint_in_descriptor, hid_endpoint_out_descriptor, ] ), ] # Audio! # In and out here are relative to CircuitPython # USB OUT -> midi_in_jack_emb -> midi_out_jack_ext -> CircuitPython midi_in_jack_emb = midi.InJackDescriptor( description="MIDI PC -> {}".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, iJack=StringIndex.index("{} usb_midi.ports[0]".format(args.interface_name))) midi_out_jack_ext = midi.OutJackDescriptor( description="MIDI data out to user code.", bJackType=midi.JACK_TYPE_EXTERNAL, input_pins=[(midi_in_jack_emb, 1)], iJack=0) # USB IN <- midi_out_jack_emb <- midi_in_jack_ext <- CircuitPython midi_in_jack_ext = midi.InJackDescriptor( description="MIDI data in from user code.", bJackType=midi.JACK_TYPE_EXTERNAL, iJack=0) midi_out_jack_emb = midi.OutJackDescriptor( description="MIDI PC <- {}".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, input_pins=[(midi_in_jack_ext, 1)], iJack=StringIndex.index("{} usb_midi.ports[1]".format(args.interface_name))) audio_midi_interface = standard.InterfaceDescriptor( description="Midi goodness", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_MIDI_STREAMING, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index("{} MIDI".format(args.interface_name)), subdescriptors=[ midi.Header( jacks_and_elements=[ midi_in_jack_emb, midi_in_jack_ext, midi_out_jack_emb, midi_out_jack_ext ], ), standard.EndpointDescriptor( description="MIDI data out to {}".format(args.interface_name), bEndpointAddress=args.midi_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_in_jack_emb]), standard.EndpointDescriptor( description="MIDI data in from {}".format(args.interface_name), bEndpointAddress=args.midi_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval = 0x0, wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_out_jack_emb]), ]) cs_ac_interface = audio10.AudioControlInterface( description="Empty audio control", audio_streaming_interfaces = [], midi_streaming_interfaces = [ audio_midi_interface ] ) audio_control_interface = standard.InterfaceDescriptor( description="All the audio", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_CONTROL, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index("{} Audio".format(args.interface_name)), subdescriptors=[ cs_ac_interface, ]) # Audio streaming interfaces must occur before MIDI ones. audio_interfaces = [audio_control_interface] + cs_ac_interface.audio_streaming_interfaces + cs_ac_interface.midi_streaming_interfaces interfaces_to_join = [] if 'CDC' in args.devices: interfaces_to_join.append(cdc_interfaces) if 'MSC' in args.devices: interfaces_to_join.append(msc_interfaces) if 'HID' in args.devices: interfaces_to_join.append(hid_interfaces) if 'AUDIO' in args.devices: interfaces_to_join.append(audio_interfaces) # util.join_interfaces() will renumber the endpoints to make them unique across descriptors, # and renumber the interfaces in order. But we still need to fix up certain # interface cross-references. interfaces = util.join_interfaces(interfaces_to_join, renumber_endpoints=args.renumber_endpoints) # Now adjust the CDC interface cross-references. cdc_union.bMasterInterface = cdc_comm_interface.bInterfaceNumber cdc_union.bSlaveInterface_list = [cdc_data_interface.bInterfaceNumber] cdc_call_management.bDataInterface = cdc_data_interface.bInterfaceNumber cdc_iad = standard.InterfaceAssociationDescriptor( description="CDC IAD", bFirstInterface=cdc_comm_interface.bInterfaceNumber, bInterfaceCount=len(cdc_interfaces), bFunctionClass=cdc.CDC_CLASS_COMM, # Communications Device Class bFunctionSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bFunctionProtocol=cdc.CDC_PROTOCOL_NONE) descriptor_list = [] if 'CDC' in args.devices: # Put the CDC IAD just before the CDC interfaces. # There appears to be a bug in the Windows composite USB driver that requests the # HID report descriptor with the wrong interface number if the HID interface is not given # first. However, it still fetches the descriptor anyway. We could reorder the interfaces but # the Windows 7 Adafruit_usbser.inf file thinks CDC is at Interface 0, so we'll leave it # there for backwards compatibility. descriptor_list.append(cdc_iad) descriptor_list.extend(cdc_interfaces) if 'MSC' in args.devices: descriptor_list.extend(msc_interfaces) if 'HID' in args.devices: descriptor_list.extend(hid_interfaces) if 'AUDIO' in args.devices: # Only add the control interface because other audio interfaces are managed by it to ensure the # correct ordering. descriptor_list.append(audio_control_interface) # Finally, build the composite descriptor. configuration = standard.ConfigurationDescriptor( description="Composite configuration", wTotalLength=(standard.ConfigurationDescriptor.bLength + sum([len(bytes(x)) for x in descriptor_list])), bNumInterfaces=len(interfaces)) descriptor_list.insert(0, configuration) string_descriptors = [standard.StringDescriptor(string) for string in StringIndex.strings_in_order()] serial_number_descriptor = string_descriptors[SERIAL_NUMBER_INDEX] c_file = args.output_c_file h_file = args.output_h_file c_file.write("""\ #include <stdint.h> #include "py/objtuple.h" #include "shared-bindings/usb_hid/Device.h" #include "{H_FILE_NAME}" """.format(H_FILE_NAME=h_file.name)) c_file.write("""\ // {DESCRIPTION} : {CLASS} """.format(DESCRIPTION=device.description, CLASS=device.__class__)) c_file.write("""\ const uint8_t usb_desc_dev[] = { """) for b in bytes(device): c_file.write("0x{:02x}, ".format(b)) c_file.write("""\ }; """) c_file.write("""\ const uint8_t usb_desc_cfg[] = { """) # Write out all the regular descriptors as one long array (that's how ASF4 does it). descriptor_length = 0 for descriptor in descriptor_list: c_file.write("""\ // {DESCRIPTION} : {CLASS} """.format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b = bytes(descriptor) notes = descriptor.notes() i = 0 # This prints each subdescriptor on a separate line. n = 0 while i < len(b): length = b[i] for j in range(length): c_file.write("0x{:02x}, ".format(b[i + j])) c_file.write("// " + notes[n]) n += 1 c_file.write("\n") i += length descriptor_length += len(b) c_file.write("""\ }; """) pointers_to_strings = [] for idx, descriptor in enumerate(string_descriptors): c_file.write("""\ // {DESCRIPTION} : {CLASS} """.format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b = bytes(descriptor) notes = descriptor.notes() i = 0 # This prints each subdescriptor on a separate line. variable_name = StringIndex.index_to_variable[idx] if not variable_name: variable_name = "string_descriptor{}".format(idx) const = "const " if variable_name == "usb_serial_number": const = "" c_file.write("""\ {const}uint16_t {NAME}[] = {{ """.format(const=const, NAME=variable_name)) pointers_to_strings.append("{name}".format(name=variable_name)) n = 0 while i < len(b): length = b[i] for j in range(length // 2): c_file.write("0x{:04x}, ".format(b[i + 2*j + 1] << 8 | b[i + 2*j])) n += 1 c_file.write("\n") i += length c_file.write("""\ }; """) c_file.write("""\ // array of pointer to string descriptors uint16_t const * const string_desc_arr [] = { """) c_file.write(""",\ """.join(pointers_to_strings)) c_file.write(""" }; """) c_file.write("\n") hid_descriptor_length = len(bytes(combined_hid_report_descriptor)) # Now we values we need for the .h file. h_file.write("""\ #ifndef MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #define MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #include <stdint.h> extern const uint8_t usb_desc_dev[{device_length}]; extern const uint8_t usb_desc_cfg[{configuration_length}]; extern uint16_t usb_serial_number[{serial_number_length}]; extern uint16_t const * const string_desc_arr [{string_descriptor_length}]; extern const uint8_t hid_report_descriptor[{hid_report_descriptor_length}]; #define CFG_TUSB_RHPORT0_MODE ({rhport0_mode}) #define USB_HID_NUM_DEVICES {hid_num_devices} // Vendor name included in Inquiry response, max 8 bytes #define CFG_TUD_MSC_VENDOR "{msc_vendor}" // Product name included in Inquiry response, max 16 bytes #define CFG_TUD_MSC_PRODUCT "{msc_product}" """ .format(serial_number_length=len(bytes(serial_number_descriptor)) // 2, device_length=len(bytes(device)), configuration_length=descriptor_length, max_configuration_length=max(hid_descriptor_length, descriptor_length), string_descriptor_length=len(pointers_to_strings), hid_report_descriptor_length=len(bytes(combined_hid_report_descriptor)), rhport0_mode='OPT_MODE_DEVICE | OPT_MODE_HIGH_SPEED' if args.highspeed else 'OPT_MODE_DEVICE', hid_num_devices=len(args.hid_devices), msc_vendor=args.manufacturer[:8], msc_product=args.product[:16])) # Write out the report descriptor and info c_file.write("""\ const uint8_t hid_report_descriptor[{HID_DESCRIPTOR_LENGTH}] = {{ """.format(HID_DESCRIPTOR_LENGTH=hid_descriptor_length)) for b in bytes(combined_hid_report_descriptor): c_file.write("0x{:02x}, ".format(b)) c_file.write("""\ }; """) # Write out USB HID report buffer definitions. for name in args.hid_devices: c_file.write("""\ static uint8_t {name}_report_buffer[{report_length}]; """.format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].report_length)) if hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length > 0: c_file.write("""\ static uint8_t {name}_out_report_buffer[{report_length}]; """.format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length)) # Write out table of device objects. c_file.write(""" usb_hid_device_obj_t usb_hid_devices[] = { """) for name in args.hid_devices: device_data = hid_report_descriptors.HID_DEVICE_DATA[name] out_report_buffer = '{}_out_report_buffer'.format(name.lower()) if device_data.out_report_length > 0 else 'NULL' c_file.write("""\ {{ .base = {{ .type = &usb_hid_device_type }}, .report_buffer = {name}_report_buffer, .report_id = {report_id}, .report_length = {report_length}, .usage_page = {usage_page:#04x}, .usage = {usage:#04x}, .out_report_buffer = {out_report_buffer}, .out_report_length = {out_report_length}, }}, """.format(name=name.lower(), report_id=report_ids[name], report_length=device_data.report_length, usage_page=device_data.usage_page, usage=device_data.usage, out_report_buffer=out_report_buffer, out_report_length=device_data.out_report_length)) c_file.write("""\ }; """) # Write out tuple of device objects. c_file.write(""" mp_obj_tuple_t common_hal_usb_hid_devices = {{ .base = {{ .type = &mp_type_tuple, }}, .len = {num_devices}, .items = {{ """.format(num_devices=len(args.hid_devices))) for idx in range(len(args.hid_devices)): c_file.write("""\ (mp_obj_t) &usb_hid_devices[{idx}], """.format(idx=idx)) c_file.write("""\ }, }; """) h_file.write("""\ #endif // MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H """)
37.330769
135
0.697465
4ad7684b6c380ab5df46b6e04110892e72e1a9ab
7,500
py
Python
bclstm/train_meld.py
Columbine21/THUIAR-ERC
90e928e1ce777152e459dbc487acf04c32cbc645
[ "MIT" ]
1
2021-01-28T13:43:32.000Z
2021-01-28T13:43:32.000Z
bclstm/train_meld.py
Columbine21/THUIAR-ERC
90e928e1ce777152e459dbc487acf04c32cbc645
[ "MIT" ]
null
null
null
bclstm/train_meld.py
Columbine21/THUIAR-ERC
90e928e1ce777152e459dbc487acf04c32cbc645
[ "MIT" ]
null
null
null
from tqdm import tqdm import pandas as pd import numpy as np, argparse, time, pickle, random, os, datetime import torch import torch.optim as optim from model import MaskedNLLLoss, BC_LSTM from dataloader import MELDDataLoader from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report def setup_seed(seed): """ Manually Fix the random seed to get deterministic results. """ torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.benchmark = False torch.backends.cudnn.deterministic = True if __name__ == '__main__': args = parse_args() args.cuda = torch.cuda.is_available() if args.cuda: print('Running on GPU') else: print('Running on CPU') for seed in [1, 11, 111, 1111, 11111]: setup_seed(seed) args.seed = seed print(args) model = BC_LSTM(args) print('MELD BC_LSTM MODULE ...') if args.cuda: model.cuda() loss_weights = torch.FloatTensor([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) loss_function = MaskedNLLLoss(loss_weights.cuda() if args.cuda else loss_weights) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2) lf = open('logs/cnn_meld_logs.txt', 'a') dataloader = MELDDataLoader(args) valid_losses, valid_fscores = [], [] test_fscores, test_accuracys, test_losses = [], [], [] best_loss, best_label, best_pred, best_mask = None, None, None, None for e in range(args.epochs): start_time = time.time() train_loss, train_acc, _, _, _, train_fscore = train_or_eval_model(model, loss_function, dataloader['train'], e, optimizer, mode='train') valid_loss, valid_acc, _, _, _, valid_fscore = train_or_eval_model(model, loss_function, dataloader['valid'], e, mode='valid') test_loss, test_acc, test_label, test_pred, test_mask, test_fscore = train_or_eval_model(model, loss_function, dataloader['test'], e, mode='test') valid_losses.append(valid_loss) valid_fscores.append(valid_fscore) test_losses.append(test_loss) test_accuracys.append(test_acc) test_fscores.append(test_fscore) x = 'epoch: {}, train_loss: {}, acc: {}, fscore: {}, valid_loss: {}, acc: {}, fscore: {}, test_loss: {}, acc: {}, fscore: {}, time: {} sec'.format(e+1, train_loss, train_acc, train_fscore, valid_loss, valid_acc, valid_fscore, test_loss, test_acc, test_fscore, round(time.time()-start_time, 2)) print (x) lf.write(x + '\n') valid_fscores = np.array(valid_fscores).transpose() test_fscores = np.array(test_fscores).transpose() # [1, epoches] test_accuracys = np.array(test_accuracys).transpose() # [epoches] f1_score1 = test_fscores[0][np.argmin(valid_losses)] acc_score1 = test_accuracys[np.argmin(valid_losses)] f1_score2 = test_fscores[0][np.argmax(valid_fscores[0])] acc_score2 = test_accuracys[np.argmax(valid_fscores[0])] scores = [acc_score1, f1_score1, acc_score2, f1_score2] scores = [str(item) for item in scores] print ('Test Scores: Weighted F1') print('@Best Valid Loss: Test Acc: {}, Test F1: {}'.format(acc_score1, f1_score1)) print('@Best Valid F1: Test Acc: {}, Test F1: {}'.format(acc_score2, f1_score2)) rf = open('results/cnn_meld_results.txt', 'a') rf.write('\t'.join(scores) + '\t' + str(args) + '\n') rf.close()
41.666667
305
0.625867
4ad876898c7dcfaaa80fe53e3fa05c848775c82a
1,828
py
Python
bin/p3starcoordcheck.py
emkailu/PAT3DEM
74e7a0f30179e49ea5c7da1bea893e21a3ed601a
[ "MIT" ]
null
null
null
bin/p3starcoordcheck.py
emkailu/PAT3DEM
74e7a0f30179e49ea5c7da1bea893e21a3ed601a
[ "MIT" ]
null
null
null
bin/p3starcoordcheck.py
emkailu/PAT3DEM
74e7a0f30179e49ea5c7da1bea893e21a3ed601a
[ "MIT" ]
null
null
null
#!/usr/bin/env python import os import sys import argparse import pat3dem.star as p3s import math if __name__ == '__main__': main()
30.466667
157
0.639497
4ad89a5bebd4952730caed6adc03938d82e1dcd1
4,251
py
Python
src/review_scraper.py
ryankirkland/voice-of-the-customer
0214af45cc6aa76bfce64065f07c3f4781ee045e
[ "MIT" ]
null
null
null
src/review_scraper.py
ryankirkland/voice-of-the-customer
0214af45cc6aa76bfce64065f07c3f4781ee045e
[ "MIT" ]
null
null
null
src/review_scraper.py
ryankirkland/voice-of-the-customer
0214af45cc6aa76bfce64065f07c3f4781ee045e
[ "MIT" ]
null
null
null
from bs4 import BeautifulSoup import pandas as pd import requests import time import sys def reviews_scraper(asin_list, filename): ''' Takes a list of asins, retrieves html for reviews page, and parses out key data points Parameters ---------- List of ASINs (list of strings) Returns: ------- review information (list), reviews_df (Pandas DataFrame) ''' asin_list = [asin_list] print(asin_list) reviews = [] headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0", "Accept-Encoding":"gzip, deflate", "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "DNT":"1","Connection":"close", "Upgrade-Insecure-Requests":"1"} for asin in asin_list: print(f'Collecting reviews for {asin}') passed_last_page = None counter = 1 while (passed_last_page == None) and (counter <= 10): print(len(reviews)) reviews_url = f'https://www.amazon.com/product-reviews/{asin}/ref=cm_cr_arp_d_viewopt_srt?ie=UTF8&reviewerType=all_reviews&sortBy=recent&pageNumber={counter}' print(reviews_url) rev = requests.get(reviews_url, headers=headers) print(rev.status_code) reviews_page_content = rev.content review_soup = BeautifulSoup(reviews_page_content, features='lxml') print(review_soup) passed_last_page = review_soup.find('div', attrs={'class': 'a-section a-spacing-top-large a-text-center no-reviews-section'}) if passed_last_page == None: for d in review_soup.findAll('div', attrs={'data-hook':'review'}): # print(d) try: date = d.find('span', attrs={'data-hook':'review-date'}) date = date.text.split(' ')[-3:] date = ' '.join(date) except: date = 'null' try: title = d.find('a', attrs={'data-hook': 'review-title'}) except: title = 'null' try: product = d.find('a', attrs={'data-hook': 'format-strip'}) product = product.text except: product = 'null' try: review_asin = product['href'].split('/')[3] except: review_asin = asin try: verified = d.find('span', attrs={'data-hook':'avp-badge'}) if verified == None: verified = 'Not Verified' else: verified = verified.text except: verified = 'null' try: description = d.find('span', attrs={'data-hook': 'review-body'}) except: description = 'null' try: reviewer_name = d.find('span', attrs={'class': 'a-profile-name'}) except: reviewer_name = 'null' try: stars = d.find('span', attrs={'class': 'a-icon-alt'}) except: stars = 'null' reviews.append([review_asin, product, date, verified, title.text, description.text, reviewer_name.text, float(stars.text[0:3])]) else: pass counter += 1 time.sleep(15) reviews_df = pd.DataFrame(reviews, columns=['asin','product','date', 'verified', 'title', 'desc', 'reviewer_name', 'rating']) reviews_df.to_csv(f'data/reviews/{filename}') print(f'{len(reviews)} reviews for {len(asin_list)} asins stored successfully in {filename}') return reviews, reviews_df if __name__ == '__main__': reviews_scraper(*sys.argv[1:])
42.089109
285
0.482945
4adace3be34277664a2e8a315913402feb463667
3,788
py
Python
lumberdata/metadata.py
cglumberjack/lumber_metadata
aebca5dbecb8d7684b1b169bf2961e4ab0daca2b
[ "MIT" ]
null
null
null
lumberdata/metadata.py
cglumberjack/lumber_metadata
aebca5dbecb8d7684b1b169bf2961e4ab0daca2b
[ "MIT" ]
null
null
null
lumberdata/metadata.py
cglumberjack/lumber_metadata
aebca5dbecb8d7684b1b169bf2961e4ab0daca2b
[ "MIT" ]
null
null
null
# noinspection PyUnresolvedReferences import os import re # TODO I'm going to need to make a dictionary for my big list of stuff i care about and what's needed for # every file type.... RAF = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake'] MOV = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake', 'QuickTime:VideoFrameRate', 'QuickTime:Duration'] R3D = ['ClipName', 'EdgeTC', 'EndEdgeTC', 'TotalFrames', 'FrameHeight', 'FrameWidth', 'Aperture', 'ISO', 'Date', 'AudioSlate', 'VideoSlate', 'Camera', 'CameraModel', 'CameraPIN', 'MediaSerialNumber', 'LensSerialNumber', 'FPS', 'AspectRatio', 'Kelvin', 'LensName', 'LensBrand', 'FocalLength', 'Shutter(deg)', 'SensorID', 'SensorName', 'Take'] def check_exiftool(): """ checks if exiftool is installed. :return: """ pass def check_redline(): """ checks if redline is installed :return: """ pass def check_ffprobe(): """ checks if ffprobe is installed :return: """ pass def get(filein, tool='exiftool', print_output=False): """ Due to issues with the exiftool module this is provided as a way to parse output directly from exiftool through the system commands and cglexecute. For the moment it's only designed to get the lumberdata for a single file. :param filein: :return: dictionary containing lumberdata from exiftool """ ext = os.path.splitext(filein)[-1] d = {} if tool == 'exiftool': command = r'exiftool %s' % filein output = cgl_execute(command=command, verbose=False, print_output=print_output) for each in output['printout']: key, value = re.split("\s+:\s+", each) d[key] = value return d elif tool == 'ffprobe': command = r'%s %s' % ('ffprobe', filein) output = cgl_execute(command=command) for each in output['printout']: try: values = re.split(":\s+", each) key = values[0] values.pop(0) if 'Stream' in key: split_v = values[1].split(',') d['Image Size'] = split_v[2].split()[0] d['Source Image Width'], d['Source Image Height'] = d['Image Size'].split('x') d['Video Frame Rate'] = split_v[4].split(' fps')[0].replace(' ', '') if 'Duration' in key: d['Track Duration'] = '%s s' % values[0].split(',')[0] value = ' '.join(values) d[key] = value except ValueError: print('skipping %s' % each) return d def get_red_data(filein): """ method for pulling lumberdata from r3d files. REDLINE is a command line interface from RED that is required for this https://www.red.com/downloads/options?itemInternalId=16144 :param filein: :return: """ file_, ext_ = os.path.splitext(filein) if ext_.upper() == '.R3D': command = r'REDLINE --i %s --printMeta 1' % filein d = {} for line in os.popen(command).readlines(): line = line.strip('\n') line = line.replace('\t', '') line = line.replace(' ', '') try: key_, value = line.split(':', 1) if key_ != 'None': d[key_] = value except ValueError: pass return d
35.735849
120
0.573654
4add579fe7516845335bc7bc7e7d3e61d0a5f88e
27,214
py
Python
rlbench/task_environment.py
robfiras/RLBench
97ab9526b6efb718f2b5aae40897ccd75aeff11e
[ "BSD-3-Clause" ]
null
null
null
rlbench/task_environment.py
robfiras/RLBench
97ab9526b6efb718f2b5aae40897ccd75aeff11e
[ "BSD-3-Clause" ]
null
null
null
rlbench/task_environment.py
robfiras/RLBench
97ab9526b6efb718f2b5aae40897ccd75aeff11e
[ "BSD-3-Clause" ]
null
null
null
import logging from typing import List, Callable import numpy as np from pyquaternion import Quaternion from pyrep import PyRep from pyrep.errors import IKError from pyrep.objects import Dummy, Object from rlbench import utils from rlbench.action_modes import ArmActionMode, ActionMode from rlbench.backend.exceptions import BoundaryError, WaypointError from rlbench.backend.observation import Observation from rlbench.backend.robot import Robot from rlbench.backend.scene import Scene from rlbench.backend.task import Task from rlbench.demo import Demo from rlbench.observation_config import ObservationConfig _TORQUE_MAX_VEL = 9999 _DT = 0.05 _MAX_RESET_ATTEMPTS = 40 _MAX_DEMO_ATTEMPTS = 10
44.833608
124
0.596017
4add672a5d82fff4c573be986ee4381ccf2640c3
11,795
py
Python
tests/generic_relations/test_forms.py
Yoann-Vie/esgi-hearthstone
115d03426c7e8e80d89883b78ac72114c29bed12
[ "PSF-2.0", "BSD-3-Clause" ]
null
null
null
tests/generic_relations/test_forms.py
Yoann-Vie/esgi-hearthstone
115d03426c7e8e80d89883b78ac72114c29bed12
[ "PSF-2.0", "BSD-3-Clause" ]
null
null
null
tests/generic_relations/test_forms.py
Yoann-Vie/esgi-hearthstone
115d03426c7e8e80d89883b78ac72114c29bed12
[ "PSF-2.0", "BSD-3-Clause" ]
null
null
null
from django import forms from django.contrib.contenttypes.forms import generic_inlineformset_factory from django.contrib.contenttypes.models import ContentType from django.db import models from django.test import TestCase from django.test.utils import isolate_apps from .models import ( Animal, ForProxyModelModel, Gecko, Mineral, ProxyRelatedModel, TaggedItem, )
49.145833
115
0.667147
4add77a89d96d39ac35506a52c38ceda993b7f43
3,192
py
Python
src/sage/rings/polynomial/pbori/fglm.py
tamnguyen135/sage
2c87dc16f26604033bb1b2d1dc6796d279c88b16
[ "BSL-1.0" ]
1
2020-11-12T04:06:19.000Z
2020-11-12T04:06:19.000Z
src/sage/rings/polynomial/pbori/fglm.py
tamnguyen135/sage
2c87dc16f26604033bb1b2d1dc6796d279c88b16
[ "BSL-1.0" ]
null
null
null
src/sage/rings/polynomial/pbori/fglm.py
tamnguyen135/sage
2c87dc16f26604033bb1b2d1dc6796d279c88b16
[ "BSL-1.0" ]
null
null
null
from .PyPolyBoRi import (BooleSet, Polynomial, BoolePolynomialVector, FGLMStrategy) def _fglm(I, from_ring, to_ring): r""" Unchecked variant of fglm """ vec = BoolePolynomialVector(I) return FGLMStrategy(from_ring, to_ring, vec).main() def fglm(I, from_ring, to_ring): r""" Convert *reduced* Groebner Basis in from_ring to a GroebnerBasis in to_ring. It acts independent of the global ring, which is restored at the end of the computation. TESTS:: sage: from sage.rings.polynomial.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc sage: r=declare_ring(['x','y','z'],dict()) sage: old_ring = r sage: new_ring = old_ring.clone(ordering=dp_asc) sage: (x,y,z) = [old_ring.variable(i) for i in range(3)] sage: ideal=[x+z, y+z]# lp Groebner basis sage: from sage.rings.polynomial.pbori.fglm import fglm sage: list(fglm(ideal, old_ring, new_ring)) [y + x, z + x] """ for poly in I: if poly.ring().id() != from_ring.id(): raise ValueError("Ideal I must be from the first ring argument") return _fglm(I, from_ring, to_ring) def vars_real_divisors(monomial, monomial_set): r""" Returns all elements of of monomial_set, which result multiplied by a variable in monomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Ring sage: r=Ring(1000) sage: x = r.variable sage: b=BooleSet([x(1)*x(2),x(2)]) sage: from sage.rings.polynomial.pbori.fglm import vars_real_divisors sage: vars_real_divisors(x(1)*x(2)*x(3),b) {{x(1),x(2)}} """ return BooleSet(Polynomial(monomial_set.divisors_of(monomial)). \ graded_part(monomial.deg() - 1)) def m_k_plus_one(completed_elements, variables): r""" Calculates $m_{k+1}$ from the FGLM algorithm as described in Wichmanns diploma thesis It would be nice to be able to efficiently extract the smallest term of a polynomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Ring sage: r=Ring(1000) sage: x = r.variable sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Monomial sage: s=BooleSet([x(1)*x(2),x(1),x(2),Monomial(r),x(3)]) sage: from sage.rings.polynomial.pbori.fglm import m_k_plus_one sage: variables=BooleSet([x(1),x(2),x(3)]) sage: m_k_plus_one(s,variables) x(2)*x(3) sage: r2 = r.clone(ordering=dp_asc) sage: m_k_plus_one(r2(s).set(),r2(variables).set()) x(1)*x(3) """ return sorted(completed_elements.cartesian_product(variables).diff( completed_elements))[0]
37.116279
95
0.643797
4add8cc9f3e45d7c32a6f558ec3d3dca3bae287a
797
py
Python
ferry/embed/umap_reduce.py
coursetable/ferry
f369b9588557c359af8589f2575a03493d6b08b6
[ "MIT" ]
4
2020-11-12T19:37:06.000Z
2021-12-14T01:38:39.000Z
ferry/embed/umap_reduce.py
coursetable/ferry
f369b9588557c359af8589f2575a03493d6b08b6
[ "MIT" ]
96
2020-09-08T05:17:17.000Z
2022-03-31T23:12:51.000Z
ferry/embed/umap_reduce.py
coursetable/ferry
f369b9588557c359af8589f2575a03493d6b08b6
[ "MIT" ]
2
2021-03-03T23:02:40.000Z
2021-06-17T23:33:05.000Z
""" Uses UMAP (https://umap-learn.readthedocs.io/en/latest/index.html) to reduce course embeddings to two dimensions for visualization. """ import pandas as pd import umap from sklearn.preprocessing import StandardScaler from ferry import config courses = pd.read_csv( config.DATA_DIR / "course_embeddings/courses_deduplicated.csv", index_col=0, ) # mypy: ignore-errors embeddings = pd.read_hdf( config.DATA_DIR / "course_embeddings/fasttext_embeddings.h5", key="embeddings", ) embeddings = StandardScaler().fit_transform(embeddings) reducer = umap.UMAP() umap_embeddings = reducer.fit_transform(embeddings) courses["umap1"] = umap_embeddings[:, 0] courses["umap2"] = umap_embeddings[:, 1] courses.to_csv(config.DATA_DIR / "course_embeddings/courses_deduplicated_umap.csv")
25.709677
83
0.771644
4ade3cbddad00f03add91a88139ed29e5accd6ee
1,359
py
Python
flora_fauna.py
zhumakova/ClassProject
b869258706dae7c8e8ab723c61a45fd78e26494f
[ "MIT" ]
null
null
null
flora_fauna.py
zhumakova/ClassProject
b869258706dae7c8e8ab723c61a45fd78e26494f
[ "MIT" ]
null
null
null
flora_fauna.py
zhumakova/ClassProject
b869258706dae7c8e8ab723c61a45fd78e26494f
[ "MIT" ]
null
null
null
import inheritance shark = Predator('baby shark','sea','all',20) giraffe = Mammal('malwan','earth',20) giraffe.check_planet(inheritance.friendly) marti = Mammal('marti','earth',20) marti.check_planet(inheritance.friendly) print(inheritance.friendly.__dict__) print(inheritance.Planet.__dict__)
23.033898
81
0.659308
4adecc45d925a985d290d61ac2e4d5096ee82755
3,057
py
Python
jug/subcommands/demo.py
rdenham/jug
40925445a5f96f9eec237de37e46e6fabcce6526
[ "MIT" ]
309
2015-02-09T09:33:52.000Z
2022-03-26T22:30:18.000Z
jug/subcommands/demo.py
zhaoxiugao/jug
9c5e3930777658699bc9579c872a010a7c3bffe3
[ "MIT" ]
61
2015-01-25T18:11:14.000Z
2020-10-15T06:52:13.000Z
jug/subcommands/demo.py
zhaoxiugao/jug
9c5e3930777658699bc9579c872a010a7c3bffe3
[ "MIT" ]
51
2015-01-25T17:40:31.000Z
2022-02-28T20:42:42.000Z
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (C) 2017, Luis Pedro Coelho <luis@luispedro.org> # vim: set ts=4 sts=4 sw=4 expandtab smartindent: # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from . import SubCommand __all__ = ['DemoCommand'] demo = DemoCommand()
26.815789
80
0.700687
4ae1184aa79f99e44e7d8332e7ab1d618e3d5b6f
16,307
py
Python
search/controllers/simple/tests.py
ID2797370/arxiv-search
889402e8eef9a2faaa8e900978cd27ff2784ce33
[ "MIT" ]
35
2018-12-18T02:51:09.000Z
2022-03-30T04:43:20.000Z
search/controllers/simple/tests.py
ID2797370/arxiv-search
889402e8eef9a2faaa8e900978cd27ff2784ce33
[ "MIT" ]
172
2018-02-02T14:35:11.000Z
2018-12-04T15:35:30.000Z
search/controllers/simple/tests.py
ID2797370/arxiv-search
889402e8eef9a2faaa8e900978cd27ff2784ce33
[ "MIT" ]
13
2019-01-10T22:01:48.000Z
2021-11-05T12:25:08.000Z
"""Tests for simple search controller, :mod:`search.controllers.simple`.""" from http import HTTPStatus from unittest import TestCase, mock from werkzeug.datastructures import MultiDict from werkzeug.exceptions import InternalServerError, NotFound, BadRequest from search.domain import SimpleQuery from search.controllers import simple from search.controllers.simple.forms import SimpleSearchForm from search.services.index import ( IndexConnectionError, QueryError, DocumentNotFound, )
37.145786
79
0.612682
4ae16756e558b0122e3a75646fd26aece7eef166
19,270
py
Python
kuri_wandering_robot/scripts/kuri_wandering_robot_executive_node.py
hcrlab/kuri_wandering_robot
9c747bfe27e3c3450fd4717e26b866af2ef70149
[ "BSD-3-Clause" ]
null
null
null
kuri_wandering_robot/scripts/kuri_wandering_robot_executive_node.py
hcrlab/kuri_wandering_robot
9c747bfe27e3c3450fd4717e26b866af2ef70149
[ "BSD-3-Clause" ]
null
null
null
kuri_wandering_robot/scripts/kuri_wandering_robot_executive_node.py
hcrlab/kuri_wandering_robot
9c747bfe27e3c3450fd4717e26b866af2ef70149
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # ROS Libraries import actionlib from actionlib_msgs.msg import GoalStatus from control_msgs.msg import JointTrajectoryControllerState, FollowJointTrajectoryAction, FollowJointTrajectoryGoal from kuri_wandering_robot.msg import Power from wandering_behavior.msg import WanderAction, WanderGoal import rospy from sensor_msgs.msg import CompressedImage from std_msgs.msg import Empty from trajectory_msgs.msg import JointTrajectoryPoint # Python Default Libraries import base64 import csv from enum import Enum import os import requests import threading import time import traceback # Custom Libraries from sent_messages_database import SentMessagesDatabase if __name__ == "__main__": rospy.init_node("kuri_wandering_robot") kuri_wandering_robot = KuriWanderingRobot() rospy.spin()
47.69802
195
0.631915
4ae20be2afc3642f06f66e4a1b7fcf4056c7970b
11,636
py
Python
src/python/nimbusml/internal/entrypoints/trainers_lightgbmbinaryclassifier.py
montehoover/NimbusML
f6be39ce9359786976429bab0ccd837e849b4ba5
[ "MIT" ]
134
2018-11-01T22:15:24.000Z
2019-05-04T11:30:08.000Z
src/python/nimbusml/internal/entrypoints/trainers_lightgbmbinaryclassifier.py
montehoover/NimbusML
f6be39ce9359786976429bab0ccd837e849b4ba5
[ "MIT" ]
226
2019-05-07T19:00:44.000Z
2021-01-06T07:59:48.000Z
src/python/nimbusml/internal/entrypoints/trainers_lightgbmbinaryclassifier.py
montehoover/NimbusML
f6be39ce9359786976429bab0ccd837e849b4ba5
[ "MIT" ]
43
2019-05-15T20:19:42.000Z
2022-03-30T10:26:07.000Z
# - Generated by tools/entrypoint_compiler.py: do not edit by hand """ Trainers.LightGbmBinaryClassifier """ import numbers from ..utils.entrypoints import EntryPoint from ..utils.utils import try_set, unlist def trainers_lightgbmbinaryclassifier( training_data, predictor_model=None, number_of_iterations=100, learning_rate=None, number_of_leaves=None, minimum_example_count_per_leaf=None, feature_column_name='Features', booster=None, label_column_name='Label', example_weight_column_name=None, row_group_column_name=None, normalize_features='Auto', caching='Auto', unbalanced_sets=False, weight_of_positive_examples=1.0, sigmoid=0.5, evaluation_metric='Logloss', maximum_bin_count_per_feature=255, verbose=False, silent=True, number_of_threads=None, early_stopping_round=0, batch_size=1048576, use_categorical_split=None, handle_missing_value=True, use_zero_as_missing_value=False, minimum_example_count_per_group=100, maximum_categorical_split_point_count=32, categorical_smoothing=10.0, l2_categorical_regularization=10.0, seed=None, parallel_trainer=None, **params): """ **Description** Train a LightGBM binary classification model. :param number_of_iterations: Number of iterations. (inputs). :param training_data: The data to be used for training (inputs). :param learning_rate: Shrinkage rate for trees, used to prevent over-fitting. Range: (0,1]. (inputs). :param number_of_leaves: Maximum leaves for trees. (inputs). :param minimum_example_count_per_leaf: Minimum number of instances needed in a child. (inputs). :param feature_column_name: Column to use for features (inputs). :param booster: Which booster to use, can be gbtree, gblinear or dart. gbtree and dart use tree based model while gblinear uses linear function. (inputs). :param label_column_name: Column to use for labels (inputs). :param example_weight_column_name: Column to use for example weight (inputs). :param row_group_column_name: Column to use for example groupId (inputs). :param normalize_features: Normalize option for the feature column (inputs). :param caching: Whether trainer should cache input training data (inputs). :param unbalanced_sets: Use for binary classification when training data is not balanced. (inputs). :param weight_of_positive_examples: Control the balance of positive and negative weights, useful for unbalanced classes. A typical value to consider: sum(negative cases) / sum(positive cases). (inputs). :param sigmoid: Parameter for the sigmoid function. (inputs). :param evaluation_metric: Evaluation metrics. (inputs). :param maximum_bin_count_per_feature: Maximum number of bucket bin for features. (inputs). :param verbose: Verbose (inputs). :param silent: Printing running messages. (inputs). :param number_of_threads: Number of parallel threads used to run LightGBM. (inputs). :param early_stopping_round: Rounds of early stopping, 0 will disable it. (inputs). :param batch_size: Number of entries in a batch when loading data. (inputs). :param use_categorical_split: Enable categorical split or not. (inputs). :param handle_missing_value: Enable special handling of missing value or not. (inputs). :param use_zero_as_missing_value: Enable usage of zero (0) as missing value. (inputs). :param minimum_example_count_per_group: Minimum number of instances per categorical group. (inputs). :param maximum_categorical_split_point_count: Max number of categorical thresholds. (inputs). :param categorical_smoothing: Lapalace smooth term in categorical feature spilt. Avoid the bias of small categories. (inputs). :param l2_categorical_regularization: L2 Regularization for categorical split. (inputs). :param seed: Sets the random seed for LightGBM to use. (inputs). :param parallel_trainer: Parallel LightGBM Learning Algorithm (inputs). :param predictor_model: The trained model (outputs). """ entrypoint_name = 'Trainers.LightGbmBinaryClassifier' inputs = {} outputs = {} if number_of_iterations is not None: inputs['NumberOfIterations'] = try_set( obj=number_of_iterations, none_acceptable=True, is_of_type=numbers.Real) if training_data is not None: inputs['TrainingData'] = try_set( obj=training_data, none_acceptable=False, is_of_type=str) if learning_rate is not None: inputs['LearningRate'] = try_set( obj=learning_rate, none_acceptable=True, is_of_type=numbers.Real) if number_of_leaves is not None: inputs['NumberOfLeaves'] = try_set( obj=number_of_leaves, none_acceptable=True, is_of_type=numbers.Real) if minimum_example_count_per_leaf is not None: inputs['MinimumExampleCountPerLeaf'] = try_set( obj=minimum_example_count_per_leaf, none_acceptable=True, is_of_type=numbers.Real) if feature_column_name is not None: inputs['FeatureColumnName'] = try_set( obj=feature_column_name, none_acceptable=True, is_of_type=str, is_column=True) if booster is not None: inputs['Booster'] = try_set( obj=booster, none_acceptable=True, is_of_type=dict) if label_column_name is not None: inputs['LabelColumnName'] = try_set( obj=label_column_name, none_acceptable=True, is_of_type=str, is_column=True) if example_weight_column_name is not None: inputs['ExampleWeightColumnName'] = try_set( obj=example_weight_column_name, none_acceptable=True, is_of_type=str, is_column=True) if row_group_column_name is not None: inputs['RowGroupColumnName'] = try_set( obj=row_group_column_name, none_acceptable=True, is_of_type=str, is_column=True) if normalize_features is not None: inputs['NormalizeFeatures'] = try_set( obj=normalize_features, none_acceptable=True, is_of_type=str, values=[ 'No', 'Warn', 'Auto', 'Yes']) if caching is not None: inputs['Caching'] = try_set( obj=caching, none_acceptable=True, is_of_type=str, values=[ 'Auto', 'Memory', 'None']) if unbalanced_sets is not None: inputs['UnbalancedSets'] = try_set( obj=unbalanced_sets, none_acceptable=True, is_of_type=bool) if weight_of_positive_examples is not None: inputs['WeightOfPositiveExamples'] = try_set( obj=weight_of_positive_examples, none_acceptable=True, is_of_type=numbers.Real) if sigmoid is not None: inputs['Sigmoid'] = try_set( obj=sigmoid, none_acceptable=True, is_of_type=numbers.Real) if evaluation_metric is not None: inputs['EvaluationMetric'] = try_set( obj=evaluation_metric, none_acceptable=True, is_of_type=str, values=[ 'None', 'Default', 'Logloss', 'Error', 'AreaUnderCurve']) if maximum_bin_count_per_feature is not None: inputs['MaximumBinCountPerFeature'] = try_set( obj=maximum_bin_count_per_feature, none_acceptable=True, is_of_type=numbers.Real) if verbose is not None: inputs['Verbose'] = try_set( obj=verbose, none_acceptable=True, is_of_type=bool) if silent is not None: inputs['Silent'] = try_set( obj=silent, none_acceptable=True, is_of_type=bool) if number_of_threads is not None: inputs['NumberOfThreads'] = try_set( obj=number_of_threads, none_acceptable=True, is_of_type=numbers.Real) if early_stopping_round is not None: inputs['EarlyStoppingRound'] = try_set( obj=early_stopping_round, none_acceptable=True, is_of_type=numbers.Real) if batch_size is not None: inputs['BatchSize'] = try_set( obj=batch_size, none_acceptable=True, is_of_type=numbers.Real) if use_categorical_split is not None: inputs['UseCategoricalSplit'] = try_set( obj=use_categorical_split, none_acceptable=True, is_of_type=bool) if handle_missing_value is not None: inputs['HandleMissingValue'] = try_set( obj=handle_missing_value, none_acceptable=True, is_of_type=bool) if use_zero_as_missing_value is not None: inputs['UseZeroAsMissingValue'] = try_set( obj=use_zero_as_missing_value, none_acceptable=True, is_of_type=bool) if minimum_example_count_per_group is not None: inputs['MinimumExampleCountPerGroup'] = try_set( obj=minimum_example_count_per_group, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max': 2147483647}) if maximum_categorical_split_point_count is not None: inputs['MaximumCategoricalSplitPointCount'] = try_set( obj=maximum_categorical_split_point_count, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max': 2147483647}) if categorical_smoothing is not None: inputs['CategoricalSmoothing'] = try_set( obj=categorical_smoothing, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if l2_categorical_regularization is not None: inputs['L2CategoricalRegularization'] = try_set( obj=l2_categorical_regularization, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if seed is not None: inputs['Seed'] = try_set( obj=seed, none_acceptable=True, is_of_type=numbers.Real) if parallel_trainer is not None: inputs['ParallelTrainer'] = try_set( obj=parallel_trainer, none_acceptable=True, is_of_type=dict) if predictor_model is not None: outputs['PredictorModel'] = try_set( obj=predictor_model, none_acceptable=False, is_of_type=str) input_variables = { x for x in unlist(inputs.values()) if isinstance(x, str) and x.startswith("$")} output_variables = { x for x in unlist(outputs.values()) if isinstance(x, str) and x.startswith("$")} entrypoint = EntryPoint( name=entrypoint_name, inputs=inputs, outputs=outputs, input_variables=input_variables, output_variables=output_variables) return entrypoint
38.026144
77
0.633293
4ae27b557f549eb57426e50a39da725dc0fc0caa
2,353
py
Python
Job Portal with Automated Resume Screening/gensim-4.1.2/gensim/test/test_rpmodel.py
Candida18/Job-Portal-with-Automated-Resume-Screening
19d19464ad3d1714da856656753a4afdfe257b31
[ "MIT" ]
3
2021-03-29T19:21:08.000Z
2021-12-31T09:30:11.000Z
Job Portal with Automated Resume Screening/gensim-4.1.2/gensim/test/test_rpmodel.py
Candida18/Job-Portal-with-Automated-Resume-Screening
19d19464ad3d1714da856656753a4afdfe257b31
[ "MIT" ]
1
2021-08-30T08:53:09.000Z
2021-08-30T08:53:09.000Z
venv/Lib/site-packages/gensim/test/test_rpmodel.py
saritmaitra/nlp_ner_topic_modeling
70914b4ae4cd7d3b9cb10776161132216394883c
[ "MIT" ]
2
2022-01-15T05:36:58.000Z
2022-02-08T15:25:50.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz> # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html """ Automated tests for checking transformation algorithms (the models package). """ import logging import unittest import numpy as np from gensim.corpora.mmcorpus import MmCorpus from gensim.models import rpmodel from gensim import matutils from gensim.test.utils import datapath, get_tmpfile if __name__ == '__main__': logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG) unittest.main()
36.2
118
0.694008
4ae2c9c85b28962ffc9f80c3635fc6bd15adc317
3,306
py
Python
playground/tianhaoz95/gan_getting_started/cgan_model.py
tianhaoz95/mangekyo
fd2b151538d0c15cca60e05a844baffcbe08e68c
[ "MIT" ]
null
null
null
playground/tianhaoz95/gan_getting_started/cgan_model.py
tianhaoz95/mangekyo
fd2b151538d0c15cca60e05a844baffcbe08e68c
[ "MIT" ]
5
2020-09-25T00:43:18.000Z
2020-10-10T03:59:39.000Z
playground/tianhaoz95/gan_getting_started/cgan_model.py
tianhaoz95/mangekyo
fd2b151538d0c15cca60e05a844baffcbe08e68c
[ "MIT" ]
null
null
null
import tensorflow as tf from tensorflow import keras
38.44186
75
0.594374
4ae3be8ccc9773f8672701a5f6e37ff13253c5e3
13,115
py
Python
ahd2fhir/utils/resource_handler.py
miracum/ahd2fhir
0c1bf3e0d86278145f9f1fa5c99a121f8e961d5f
[ "Apache-2.0" ]
3
2021-11-23T16:24:21.000Z
2022-03-30T07:59:03.000Z
ahd2fhir/utils/resource_handler.py
miracum/ahd2fhir
0c1bf3e0d86278145f9f1fa5c99a121f8e961d5f
[ "Apache-2.0" ]
40
2021-05-27T14:26:33.000Z
2022-03-29T14:29:33.000Z
ahd2fhir/utils/resource_handler.py
miracum/ahd2fhir
0c1bf3e0d86278145f9f1fa5c99a121f8e961d5f
[ "Apache-2.0" ]
1
2021-06-30T11:11:01.000Z
2021-06-30T11:11:01.000Z
import base64 import datetime import logging import os import time from typing import List, Tuple import structlog import tenacity from averbis import Pipeline from fhir.resources.bundle import Bundle from fhir.resources.codeableconcept import CodeableConcept from fhir.resources.composition import Composition, CompositionSection from fhir.resources.documentreference import DocumentReference from fhir.resources.fhirtypes import DateTime from fhir.resources.identifier import Identifier from fhir.resources.reference import Reference from fhir.resources.resource import Resource from prometheus_client import Counter, Histogram, Summary from tenacity.after import after_log from ahd2fhir.mappers import ahd_to_condition, ahd_to_medication_statement from ahd2fhir.utils.bundle_builder import BundleBuilder from ahd2fhir.utils.custom_mappers import custom_mappers, mapper_functions from ahd2fhir.utils.device_builder import build_device from ahd2fhir.utils.fhir_utils import sha256_of_identifier MAPPING_FAILURES_COUNTER = Counter("mapping_failures", "Exceptions during mapping") MAPPING_DURATION_SUMMARY = Histogram( "map_duration_seconds", "Time spent mapping", buckets=( 0.05, 0.1, 0.5, 1.0, 2.0, 3.0, 5.0, 8.0, 13.0, 21.0, 34.0, 55.0, "inf", ), ) EXTRACTED_RESOURCES_COUNT_SUMMARY = Summary( "extracted_resources", "Number of extracted resources for each processed document" ) DOCUMENT_LENGTH_SUMMARY = Summary( "document_length", "Length of each processed document's text in charactes", ) DISCHARGE_SUMMARY_CONCEPT_TEXT = ( "Clinical document Kind of document from LOINC Document Ontology" ) DISCHARGE_SUMMARY_CONCEPT = CodeableConcept( **{ "coding": [ { "system": "http://loinc.org", "code": "74477-1", "display": DISCHARGE_SUMMARY_CONCEPT_TEXT, }, ], "text": DISCHARGE_SUMMARY_CONCEPT_TEXT, } ) AHD_TYPE_DOCUMENT_ANNOTATION = "de.averbis.types.health.DocumentAnnotation" AHD_TYPE_MEDICATION = "de.averbis.types.health.Medication" AHD_TYPE_DIAGNOSIS = "de.averbis.types.health.Diagnosis" log = structlog.get_logger()
34.24282
88
0.619596
4ae54219ff08b6f61e95d73dac2e96ebfcf30193
12,521
py
Python
maestros/lookups.py
Infinityloopsistemas/SIVA
92b6c82f018d39ef405989639974d1f2757476ed
[ "BSD-3-Clause" ]
null
null
null
maestros/lookups.py
Infinityloopsistemas/SIVA
92b6c82f018d39ef405989639974d1f2757476ed
[ "BSD-3-Clause" ]
1
2018-09-27T12:07:19.000Z
2018-10-08T15:56:30.000Z
maestros/lookups.py
Infinityloopsistemas/SIVA
92b6c82f018d39ef405989639974d1f2757476ed
[ "BSD-3-Clause" ]
1
2018-10-12T13:41:20.000Z
2018-10-12T13:41:20.000Z
# -*- coding: utf-8 -*- from selectable.decorators import login_required from maestros.models import TiposMedidasActuacion, TiposLimitesCriticos, TiposMedidasVigilancia, TiposTemperaturas, TiposFrecuencias, Zonas, Terceros, CatalogoEquipos, Personal, Consumibles, ParametrosAnalisis, Actividades, Etapas, Peligros, TiposCursos, TiposLegislacion, Unidades, Firmas, HorarioTurnos from selectable.base import ModelLookup from selectable.registry import registry from maestros_generales.models import Empresas from siva import settings __author__ = 'julian' registry.register(TPActuacionPrevLookup) registry.register(TPActuacionCorrLookup) registry.register(TPLimitesCritLookup) registry.register(ActividadesLookup) registry.register(TipoMedidasVigilanciaLookup) registry.register(TiposTemperaturasLookup) registry.register(TiposFrecuenciasLookup) registry.register(ZonasLookup) registry.register(TercerosLookup) registry.register(TercerosTiposLookup) registry.register(CatalogoEquiposLookup) registry.register(PersonalLookup) registry.register(TiposCursosLookup) registry.register(TiposLegislacionLookup) registry.register(ConsumiblesLookup) registry.register(ParametrosAnalisisLookup) registry.register(EtapasLookup) registry.register(PeligrosLookup) registry.register(UnidadesLookup) registry.register(FirmasLookup) registry.register(HorarioTurnoLookup)
31.459799
304
0.72614
4ae60da63587ab2aea48c92c16464b071dd138fd
828
py
Python
julynter/oldcmd.py
dew-uff/julynter
f4657aba4fa3e17af2cd241f0c3170b76df7c57c
[ "BSD-3-Clause" ]
9
2020-07-13T23:56:04.000Z
2021-11-02T18:42:07.000Z
julynter/oldcmd.py
dew-uff/julynter
f4657aba4fa3e17af2cd241f0c3170b76df7c57c
[ "BSD-3-Clause" ]
8
2021-07-14T15:33:57.000Z
2022-02-27T06:45:57.000Z
julynter/oldcmd.py
dew-uff/julynter
f4657aba4fa3e17af2cd241f0c3170b76df7c57c
[ "BSD-3-Clause" ]
null
null
null
"""Define commands for Python 2.7""" import argparse import traceback from . import util from .cmd import run from .cmd import extractpipenv def main(): """Main function""" print("This version is not supported! It has limitted analysis features") parser = argparse.ArgumentParser(description='Analyze Jupyter Notebooks') subparsers = parser.add_subparsers() run.create_subparsers(subparsers) extractpipenv.create_subparsers(subparsers) args, rest = parser.parse_known_args() try: if not getattr(args, 'func', None): parser.print_help() else: args.func(args, rest) if not util.EXITED: util.do_exit(0) except: # pylint: disable=bare-except if not util.EXITED: traceback.print_exc() util.do_exit(1)
28.551724
77
0.657005
4ae8e1876538896679e757644a54528296f6f24d
62,352
py
Python
gpMgmt/bin/gppylib/test/unit/test_unit_gpcrondump.py
nurikk/gpdb
04fe0202c59721826d1eda2b19d73e5572893fcb
[ "PostgreSQL", "Apache-2.0" ]
null
null
null
gpMgmt/bin/gppylib/test/unit/test_unit_gpcrondump.py
nurikk/gpdb
04fe0202c59721826d1eda2b19d73e5572893fcb
[ "PostgreSQL", "Apache-2.0" ]
null
null
null
gpMgmt/bin/gppylib/test/unit/test_unit_gpcrondump.py
nurikk/gpdb
04fe0202c59721826d1eda2b19d73e5572893fcb
[ "PostgreSQL", "Apache-2.0" ]
null
null
null
#!/usr/bin/env python import os import imp gpcrondump_path = os.path.abspath('gpcrondump') gpcrondump = imp.load_source('gpcrondump', gpcrondump_path) import unittest2 as unittest from datetime import datetime from gppylib import gplog from gpcrondump import GpCronDump from gppylib.operations.utils import DEFAULT_NUM_WORKERS from mock import patch, Mock from gppylib.operations.dump import MailDumpEvent from gppylib.operations.backup_utils import get_backup_directory, write_lines_to_file import mock logger = gplog.get_unittest_logger() #------------------------------- Mainline -------------------------------- if __name__ == '__main__': unittest.main()
52.264878
175
0.697203
4ae9ad24978103134e04134f0b180d82ec622bb0
5,118
py
Python
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
vsosrc/ambari
e3cc898672707bedf7597f2e16d684c8a00bba3b
[ "Apache-2.0" ]
null
null
null
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
vsosrc/ambari
e3cc898672707bedf7597f2e16d684c8a00bba3b
[ "Apache-2.0" ]
null
null
null
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
vsosrc/ambari
e3cc898672707bedf7597f2e16d684c8a00bba3b
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import os from mock.mock import MagicMock, call, patch from stacks.utils.RMFTestCase import * import datetime, sys, socket import resource_management.libraries.functions
47.831776
194
0.557835
4aea193e4b6512fd0f264e141522245728635ebf
1,273
py
Python
test/linux/gyptest-ldflags-from-environment.py
chlorm-forks/gyp
a8921fcaab1a18c8cf7e4ab09ceb940e336918ec
[ "BSD-3-Clause" ]
77
2018-07-01T15:55:34.000Z
2022-03-30T09:16:54.000Z
test/linux/gyptest-ldflags-from-environment.py
chlorm-forks/gyp
a8921fcaab1a18c8cf7e4ab09ceb940e336918ec
[ "BSD-3-Clause" ]
116
2021-05-29T16:32:51.000Z
2021-08-13T16:05:29.000Z
test/linux/gyptest-ldflags-from-environment.py
chlorm-forks/gyp
a8921fcaab1a18c8cf7e4ab09ceb940e336918ec
[ "BSD-3-Clause" ]
53
2018-04-13T12:06:06.000Z
2022-03-25T13:54:38.000Z
#!/usr/bin/env python # Copyright (c) 2017 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies the use of linker flags in environment variables. In this test, gyp and build both run in same local environment. """ import TestGyp import re import subprocess import sys FORMATS = ('make', 'ninja') if sys.platform.startswith('linux'): test = TestGyp.TestGyp(formats=FORMATS) CHDIR = 'ldflags-from-environment' with TestGyp.LocalEnv({'LDFLAGS': '-Wl,--dynamic-linker=/target', 'LDFLAGS_host': '-Wl,--dynamic-linker=/host', 'GYP_CROSSCOMPILE': '1'}): test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', chdir=CHDIR) if GetDynamicLinker('ldflags') != '/target': test.fail_test() if GetDynamicLinker('ldflags_host') != '/host': test.fail_test() test.pass_test()
27.673913
73
0.660644
4aeb5cb919a70c0ac2be053ebf69b329fe3c2ae2
109
py
Python
tests/test_advanced.py
dhaitz/python-package-template
b4c636e48ae192e5efe30fe71af37be6f8273d29
[ "BSD-2-Clause" ]
null
null
null
tests/test_advanced.py
dhaitz/python-package-template
b4c636e48ae192e5efe30fe71af37be6f8273d29
[ "BSD-2-Clause" ]
null
null
null
tests/test_advanced.py
dhaitz/python-package-template
b4c636e48ae192e5efe30fe71af37be6f8273d29
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from .context import sample
13.625
32
0.642202
4aeb6ef2b04d214ccf1780ce3742b6d40d27fe53
2,572
py
Python
binary_tree/m_post_order_traversal.py
dhrubach/python-code-recipes
14356c6adb1946417482eaaf6f42dde4b8351d2f
[ "MIT" ]
null
null
null
binary_tree/m_post_order_traversal.py
dhrubach/python-code-recipes
14356c6adb1946417482eaaf6f42dde4b8351d2f
[ "MIT" ]
null
null
null
binary_tree/m_post_order_traversal.py
dhrubach/python-code-recipes
14356c6adb1946417482eaaf6f42dde4b8351d2f
[ "MIT" ]
null
null
null
###################################################################### # LeetCode Problem Number : 145 # Difficulty Level : Medium # URL : https://leetcode.com/problems/binary-tree-postorder-traversal/ ###################################################################### from binary_search_tree.tree_node import TreeNode
28.898876
70
0.47395
4aecb09acc6ad3252011c93a09793cb698638ff1
18,290
py
Python
dokuwiki.py
luminisward/python-dokuwiki
329862e6c91a79b2ad9f0b7616f7591459f2d4fd
[ "MIT" ]
null
null
null
dokuwiki.py
luminisward/python-dokuwiki
329862e6c91a79b2ad9f0b7616f7591459f2d4fd
[ "MIT" ]
null
null
null
dokuwiki.py
luminisward/python-dokuwiki
329862e6c91a79b2ad9f0b7616f7591459f2d4fd
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """This python module aims to manage `DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis by using the provided `XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_. It is compatible with python2.7 and python3+. Installation ------------ It is on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so you can use the ``pip`` command to install it:: pip install dokuwiki Otherwise sources are in `github <https://github.com/fmenabe/python-dokuwiki>`_ """ import re import sys import base64 import weakref from xml.parsers.expat import ExpatError if sys.version_info[0] == 3: from xmlrpc.client import ServerProxy, Binary, Fault, Transport from urllib.parse import urlencode else: from xmlrpclib import ServerProxy, Binary, Fault, Transport from urllib import urlencode from datetime import datetime, timedelta ERR = 'XML or text declaration not at start of entity: line 2, column 0' _URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?') def date(date): """DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime`` type and the format changes between DokuWiki versions ... This function convert *date* to a `datetime` object. """ date = date.value return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S') if len(date) == 24 else datetime.strptime(date, '%Y%m%dT%H:%M:%S')) def utc2local(date): """DokuWiki returns date with a +0000 timezone. This function convert *date* to the local time. """ date_offset = (datetime.now() - datetime.utcnow()) #Python < 2.7 don't have the 'total_seconds' method so calculate it by hand! date_offset = (date_offset.microseconds + (date_offset.seconds + date_offset.days * 24 * 3600) * 1e6) / 1e6 date_offset = int(round(date_offset / 60 / 60)) return date + timedelta(hours=date_offset) def add_acl(self, scope, user, permission): """Add an `ACL <https://www.dokuwiki.org/acl>`_ rule that restricts the page/namespace *scope* to *user* (use *@group* syntax for groups) with *permission* level. It returns a boolean that indicate if the rule was correctly added. """ return self.send('plugin.acl.addAcl', scope, user, permission) def del_acl(self, scope, user): """Delete any ACL matching the given *scope* and *user* (or group if *@group* syntax is used). It returns a boolean that indicate if the rule was correctly removed. """ return self.send('plugin.acl.delAcl', scope, user) class _Pages(object): """This object regroup methods for managing pages of a DokuWiki. This object is accessible from the ``pages`` property of an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.pages.list() """ def list(self, namespace='/', **options): """List all pages of the given *namespace*. Valid *options* are: * *depth*: (int) recursion level, 0 for all * *hash*: (bool) do an md5 sum of content * *skipacl*: (bool) list everything regardless of ACL """ return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options) def changes(self, timestamp): """Returns a list of changes since given *timestamp*. For example, for returning all changes since *2016-01-01*:: from datetime import datetime wiki.pages.changes(datetime(2016, 1, 1).timestamp()) """ return self._dokuwiki.send('wiki.getRecentChanges', timestamp) def search(self, string): """Performs a fulltext search on *string* and returns the first 15 results. """ return self._dokuwiki.send('dokuwiki.search', string) def versions(self, page, offset=0): """Returns the available versions of *page*. *offset* can be used to list earlier versions in the history. """ return self._dokuwiki.send('wiki.getPageVersions', page, offset) def info(self, page, version=None): """Returns informations of *page*. Informations of the last version is returned if *version* is not set. """ return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPageInfo', page)) def get(self, page, version=None): """Returns the content of *page*. The content of the last version is returned if *version* is not set. """ return (self._dokuwiki.send('wiki.getPageVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPage', page)) def append(self, page, content, **options): """Appends *content* text to *page*. Valid *options* are: * *sum*: (str) change summary * *minor*: (bool) whether this is a minor change """ return self._dokuwiki.send('dokuwiki.appendPage', page, content, options) def html(self, page, version=None): """Returns HTML content of *page*. The HTML content of the last version of the page is returned if *version* is not set. """ return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPageHTML', page)) def set(self, page, content, **options): """Set/replace the *content* of *page*. Valid *options* are: * *sum*: (str) change summary * *minor*: (bool) whether this is a minor change """ try: return self._dokuwiki.send('wiki.putPage', page, content, options) except ExpatError as err: # Sometime the first line of the XML response is blank which raise # the 'ExpatError' exception although the change has been done. This # allow to ignore the error. if str(err) != ERR: raise DokuWikiError(err) def delete(self, page): """Delete *page* by setting an empty content.""" return self.set(page, '') def lock(self, page): """Locks *page*.""" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[page], unlock=[]) if result['lockfail']: raise DokuWikiError('unable to lock page') def unlock(self, page): """Unlocks *page*.""" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[], unlock=[page]) if result['unlockfail']: raise DokuWikiError('unable to unlock page') def permission(self, page): """Returns the permission level of *page*.""" return self._dokuwiki.send('wiki.aclCheck', page) def links(self, page): """Returns a list of all links contained in *page*.""" return self._dokuwiki.send('wiki.listLinks', page) def backlinks(self, page): """Returns a list of all links referencing *page*.""" return self._dokuwiki.send('wiki.getBackLinks', page) class _Medias(object): """This object regroup methods for managing medias of a DokuWiki. This object is accessible from the ``medias`` property of an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.medias.list() """ def list(self, namespace='/', **options): """Returns all medias of the given *namespace*. Valid *options* are: * *depth*: (int) recursion level, 0 for all * *skipacl*: (bool) skip acl checking * *pattern*: (str) check given pattern * *hash*: (bool) add hashes to result list """ return self._dokuwiki.send('wiki.getAttachments', namespace, options) def changes(self, timestamp): """Returns the list of medias changed since given *timestamp*. For example, for returning all changes since *2016-01-01*:: from datetime import datetime wiki.medias.changes(datetime(2016, 1, 1).timestamp()) """ return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp) def get(self, media, dirpath=None, filename=None, overwrite=False, b64decode=False): """Returns the binary data of *media* or save it to a file. If *dirpath* is not set the binary data is returned, otherwise the data is saved to a file. By default, the filename is the name of the media but it can be changed with *filename* parameter. *overwrite* parameter allow to overwrite the file if it already exists locally. """ import os data = self._dokuwiki.send('wiki.getAttachment', media) data = base64.b64decode(data) if b64decode else data.data if dirpath is None: return data if filename is None: filename = media.replace('/', ':').split(':')[-1] if not os.path.exists(dirpath): os.makedirs(dirpath) filepath = os.path.join(dirpath, filename) if os.path.exists(filepath) and not overwrite: raise FileExistsError("[Errno 17] File exists: '%s'" % filepath) with open(filepath, 'wb') as fhandler: fhandler.write(data) def info(self, media): """Returns informations of *media*.""" return self._dokuwiki.send('wiki.getAttachmentInfo', media) def add(self, media, filepath, overwrite=True): """Set *media* from local file *filepath*. *overwrite* parameter specify if the media must be overwrite if it exists remotely. """ with open(filepath, 'rb') as fhandler: self._dokuwiki.send('wiki.putAttachment', media, Binary(fhandler.read()), ow=overwrite) def set(self, media, _bytes, overwrite=True, b64encode=False): """Set *media* from *_bytes*. *overwrite* parameter specify if the media must be overwrite if it exists remotely. """ data = base64.b64encode(_bytes) if b64encode else Binary(_bytes) self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite) def delete(self, media): """Delete *media*.""" return self._dokuwiki.send('wiki.deleteAttachment', media) class Dataentry(object): """Object that manage `data entries <https://www.dokuwiki.org/plugin:data>`_."""
37.326531
89
0.601203
4aed13aa20c6ab391e3ffb7e313d6df343ae7084
1,449
py
Python
setup.py
lvgig/test-aide
60a9420062dd778ce9dad43993dd8ab4f300ac4e
[ "BSD-3-Clause" ]
2
2021-11-08T08:41:08.000Z
2021-11-08T09:11:24.000Z
setup.py
lvgig/test-aide
60a9420062dd778ce9dad43993dd8ab4f300ac4e
[ "BSD-3-Clause" ]
null
null
null
setup.py
lvgig/test-aide
60a9420062dd778ce9dad43993dd8ab4f300ac4e
[ "BSD-3-Clause" ]
null
null
null
import setuptools import re with open("README.md", "r") as fh: long_description = fh.read() # get version from _version.py file, from below # https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSION_FILE = "test_aide/_version.py" version_file_str = open(VERSION_FILE, "rt").read() VERSION_STR_RE = r"^__version__ = ['\"]([^'\"]*)['\"]" mo = re.search(VERSION_STR_RE, version_file_str, re.M) if mo: version = mo.group(1) else: raise RuntimeError("Unable to find version string in %s." % (VERSION_FILE,)) setuptools.setup( name="test-aide", version=version, author="LV GI Data Science Team", author_email="#DataSciencePackages@lv.co.uk", description="Package of helper functions to be used for unit testing", long_description=long_description, long_description_content_type="text/markdown", packages=setuptools.find_packages(), install_requires=list_reqs(), python_requires=">=3.6", classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Operating System :: OS Independent", "License :: OSI Approved :: BSD License", ], )
32.2
94
0.673568
4aed5c4d1497088a992494a4109f38cb6b27e78e
510
py
Python
examples/pylab_examples/matshow.py
jbbrokaw/matplotlib
86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427
[ "MIT", "BSD-3-Clause" ]
16
2016-06-14T19:45:35.000Z
2020-11-30T19:02:58.000Z
lib/mpl_examples/pylab_examples/matshow.py
yingkailiang/matplotlib
255a79b106c98c1904489afe6a754e4d943179d6
[ "MIT", "BSD-3-Clause" ]
7
2015-05-08T19:36:25.000Z
2015-06-30T15:32:17.000Z
lib/mpl_examples/pylab_examples/matshow.py
yingkailiang/matplotlib
255a79b106c98c1904489afe6a754e4d943179d6
[ "MIT", "BSD-3-Clause" ]
14
2015-10-05T04:15:46.000Z
2020-06-11T18:06:02.000Z
"""Simple matshow() example.""" from matplotlib.pylab import * def samplemat(dims): """Make a matrix with all zeros and increasing elements on the diagonal""" aa = zeros(dims) for i in range(min(dims)): aa[i, i] = i return aa # Display 2 matrices of different sizes dimlist = [(12, 12), (15, 35)] for d in dimlist: matshow(samplemat(d)) # Display a random matrix with a specified figure number and a grayscale # colormap matshow(rand(64, 64), fignum=100, cmap=cm.gray) show()
22.173913
78
0.670588
4aee10083d95f61f20711f9c9064a70b35ea7926
4,445
py
Python
setup.py
HeyLifeHD/rp-bp
9c59b1bc0267400747477467c45f96364d5528e1
[ "MIT" ]
6
2016-05-16T18:52:41.000Z
2021-12-31T06:27:29.000Z
setup.py
HeyLifeHD/rp-bp
9c59b1bc0267400747477467c45f96364d5528e1
[ "MIT" ]
110
2016-06-22T13:24:39.000Z
2022-02-07T09:29:14.000Z
setup.py
HeyLifeHD/rp-bp
9c59b1bc0267400747477467c45f96364d5528e1
[ "MIT" ]
5
2017-05-22T12:21:51.000Z
2022-02-06T10:32:56.000Z
#! /usr/bin/env python3 import importlib import logging import os import subprocess from setuptools import setup from setuptools.command.install import install as install from setuptools.command.develop import develop as develop logger = logging.getLogger(__name__) stan_model_files = [ os.path.join("nonperiodic", "no-periodicity.stan"), os.path.join("nonperiodic", "start-high-high-low.stan"), os.path.join("nonperiodic", "start-high-low-high.stan"), os.path.join("periodic", "start-high-low-low.stan"), os.path.join("untranslated", "gaussian-naive-bayes.stan"), os.path.join("translated", "periodic-gaussian-mixture.stan") ] stan_pickle_files = [ os.path.join("nonperiodic", "no-periodicity.pkl"), os.path.join("nonperiodic", "start-high-high-low.pkl"), os.path.join("nonperiodic", "start-high-low-high.pkl"), os.path.join("periodic", "start-high-low-low.pkl"), os.path.join("untranslated", "gaussian-naive-bayes.pkl"), os.path.join("translated", "periodic-gaussian-mixture.pkl") ] setup( cmdclass={ 'install': SetupInstall, 'develop': SetupDevelop } )
29.633333
80
0.64027
4aee208ed627e09e244d2f3b0703567eec906294
9,909
py
Python
utils/data_utils.py
BorisMansencal/quickNAT_pytorch
1853afbe409f2fec6db298c70a3dd0ae088091f0
[ "MIT" ]
null
null
null
utils/data_utils.py
BorisMansencal/quickNAT_pytorch
1853afbe409f2fec6db298c70a3dd0ae088091f0
[ "MIT" ]
null
null
null
utils/data_utils.py
BorisMansencal/quickNAT_pytorch
1853afbe409f2fec6db298c70a3dd0ae088091f0
[ "MIT" ]
null
null
null
import os import h5py import nibabel as nb import numpy as np import torch import torch.utils.data as data from torchvision import transforms import utils.preprocessor as preprocessor # transform_train = transforms.Compose([ # transforms.RandomCrop(200, padding=56), # transforms.ToTensor(), # ]) # def load_file_paths(data_dir, label_dir, volumes_txt_file=None): # """ # This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label. # It should be modified to suit the need of the project # :param data_dir: Directory which contains the data files # :param label_dir: Directory which contains the label files # :param volumes_txt_file: (Optional) Path to the a csv file, when provided only these data points will be read # :return: list of file paths as string # """ # # volume_exclude_list = ['IXI290', 'IXI423'] # if volumes_txt_file: # with open(volumes_txt_file) as file_handle: # volumes_to_use = file_handle.read().splitlines() # else: # volumes_to_use = [name for name in os.listdir(data_dir) if # name.startswith('IXI') and name not in volume_exclude_list] # # file_paths = [ # [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol, 'mri/aseg.auto_noCCseg.mgz')] # for # vol in volumes_to_use] # return file_paths def load_file_paths(data_dir, label_dir, data_id, volumes_txt_file=None): """ This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label. It should be modified to suit the need of the project :param data_dir: Directory which contains the data files :param label_dir: Directory which contains the label files :param data_id: A flag indicates the name of Dataset for proper file reading :param volumes_txt_file: (Optional) Path to the a csv file, when provided only these data points will be read :return: list of file paths as string """ if volumes_txt_file: with open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines() else: volumes_to_use = [name for name in os.listdir(data_dir)] if data_id == "MALC": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_glm.mgz')] for vol in volumes_to_use] elif data_id == "ADNI": file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz'), os.path.join(label_dir, vol, 'Lab_con.mgz')] for vol in volumes_to_use] elif data_id == "CANDI": file_paths = [ [os.path.join(data_dir, vol + '/' + vol + '_1.mgz'), os.path.join(label_dir, vol + '/' + vol + '_1_seg.mgz')] for vol in volumes_to_use] elif data_id == "IBSR": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_map.nii.gz')] for vol in volumes_to_use] elif data_id == "BORIS": #BORIS file_paths = [ [os.path.join(data_dir, vol), os.path.join(label_dir, vol.replace('.nii', '_seg.nii'))] for vol in volumes_to_use] else: raise ValueError("Invalid entry, valid options are MALC, ADNI, CANDI and IBSR") return file_paths def load_file_paths_eval(data_dir, volumes_txt_file, dir_struct): """ This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label. It should be modified to suit the need of the project :param data_dir: Directory which contains the data files :param volumes_txt_file: Path to the a csv file, when provided only these data points will be read :param dir_struct: If the id_list is in FreeSurfer style or normal :return: list of file paths as string """ with open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines() if dir_struct == "FS": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz')] for vol in volumes_to_use] elif dir_struct == "Linear": file_paths = [ [os.path.join(data_dir, vol)] for vol in volumes_to_use] elif dir_struct == "part_FS": file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz')] for vol in volumes_to_use] else: raise ValueError("Invalid entry, valid options are FS and Linear") return file_paths
41.460251
138
0.631749
4aef5ae78252c6d6372a39f824e9c27cc8e4b322
2,764
py
Python
lib/common/app.py
auho/python-ETL
761589814b04e076ba6fa1c0e64b83ce62ce8556
[ "Apache-2.0" ]
null
null
null
lib/common/app.py
auho/python-ETL
761589814b04e076ba6fa1c0e64b83ce62ce8556
[ "Apache-2.0" ]
null
null
null
lib/common/app.py
auho/python-ETL
761589814b04e076ba6fa1c0e64b83ce62ce8556
[ "Apache-2.0" ]
null
null
null
import argparse import yaml import sys from .conf import MysqlConf from lib.db import mysql parser = argparse.ArgumentParser() parser.add_argument("--config", help="config file name", type=str, required=False, default='office') input_args = parser.parse_args()
28.791667
100
0.624096
4aefb5a97f19992a6966a61598aa4554de228c41
4,164
py
Python
design.py
StrangeArcturus/QtAndRequestParser-Project
5205420ff06c91917ce0c1d890da85e9d72a06ea
[ "MIT" ]
null
null
null
design.py
StrangeArcturus/QtAndRequestParser-Project
5205420ff06c91917ce0c1d890da85e9d72a06ea
[ "MIT" ]
null
null
null
design.py
StrangeArcturus/QtAndRequestParser-Project
5205420ff06c91917ce0c1d890da85e9d72a06ea
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'design.ui' # # Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets
52.05
102
0.686119
4af181be525d8e8daf1ffbab71cb2d90c60d3216
597
py
Python
EP_2019/py_impl/main.py
Alisa-lisa/conferences
d93014747dc9d18493295dbc33fa51c8fb9467dc
[ "MIT" ]
5
2019-07-06T07:22:57.000Z
2020-12-19T22:49:35.000Z
EP_2019/py_impl/main.py
pindash/conferences
87fcb9f595a244408c015c66283c337d124b358d
[ "MIT" ]
null
null
null
EP_2019/py_impl/main.py
pindash/conferences
87fcb9f595a244408c015c66283c337d124b358d
[ "MIT" ]
3
2020-06-07T14:58:24.000Z
2020-11-24T22:51:14.000Z
from simulation.car import spawn_drivers from simulation.passenger import spawn_passengers from simulation.core import World, Clock conf = { "x": 100, "y": 100, "drivers": 200, "users": 1000, "start": "2019-07-08T00:00:00", "end": "2019-07-08T00:01:00" } clock = Clock(conf["start"], conf["end"]) if __name__ == '__main__': world = World([conf['x'], conf['y']], clock=clock) world.register_drivers(spawn_drivers(conf["drivers"], conf['x'], conf['y'])) world.register_passengers(spawn_passengers(conf["users"], conf['x'], conf['y'])) world.run(log=False)
28.428571
84
0.649916
4af19e16fcec726156bfcc2b3d41a671e651e34c
795
py
Python
Python/reverse_with_swap.py
avulaankith/Python
71269b1a36b45150edb7834c559386a91618e723
[ "MIT" ]
null
null
null
Python/reverse_with_swap.py
avulaankith/Python
71269b1a36b45150edb7834c559386a91618e723
[ "MIT" ]
null
null
null
Python/reverse_with_swap.py
avulaankith/Python
71269b1a36b45150edb7834c559386a91618e723
[ "MIT" ]
1
2021-08-14T13:24:11.000Z
2021-08-14T13:24:11.000Z
#!/bin/python3 import math import os import random import re import sys # # Complete the 'reverse_words_order_and_swap_cases' function below. # # The function is expected to return a STRING. # The function accepts STRING sentence as parameter. # sentence = input() news = reverse_words_order_and_swap_cases(sentence) print(news)
18.488372
67
0.566038
4af2582d62d2fd8906d1b6bfaa4cb05ec6512096
1,245
py
Python
playground/check_equal.py
INK-USC/hypter
732551e1e717b66ad26ba538593ed184957ecdea
[ "MIT" ]
11
2021-07-16T15:49:39.000Z
2021-12-17T14:46:25.000Z
playground/check_equal.py
INK-USC/hypter
732551e1e717b66ad26ba538593ed184957ecdea
[ "MIT" ]
null
null
null
playground/check_equal.py
INK-USC/hypter
732551e1e717b66ad26ba538593ed184957ecdea
[ "MIT" ]
1
2021-08-04T07:21:02.000Z
2021-08-04T07:21:02.000Z
import json d1 = {} with open("/home/qinyuan/zs/out/bart-large-with-description-grouped-1e-5-outerbsz4-innerbsz32-adapterdim4-unfreeze-dec29/test_predictions.jsonl") as fin: for line in fin: d = json.loads(line) d1[d["id"]] = d["output"][0]["answer"] d2 = {} dq = {} with open("/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl") as fin: for line in fin: d = json.loads(line) d2[d["id"]] = d["output"][0]["answer"] dq[d["id"]] = d["input"] d3 = {} with open("/home/qinyuan/zs/data/structured_zeroshot-test.jsonl") as fin: for line in fin: d = json.loads(line) d3[d["id"]] = [item["answer"] for item in d["output"]] count = 0 win1 = 0 win2 = 0 for key in d1.keys(): if d1[key]!= d2[key]: print("{}. {}. {}. {}. {}".format(key, dq[key], d1[key], d2[key], d3[key])) count += 1 if d1[key] in d3[key] and d2[key] not in d3[key]: win1 += 1 print(d1[key]) print(d2[key]) if d2[key] in d3[key] and d1[key] not in d3[key]: win2 += 1 print(d1[key]) print(d2[key]) print(count) print(win1) print(win2)
27.065217
153
0.553414
4af2b457e2a07435b2f1cbc51394d14794b7cb2f
294
py
Python
creeds/static/api1.py
MaayanLab/creeds
7d580c91ca45c03e34bbc0d1928668f266ff13d9
[ "CC0-1.0" ]
2
2019-01-10T18:10:45.000Z
2019-04-05T13:47:01.000Z
creeds/static/api1.py
MaayanLab/creeds
7d580c91ca45c03e34bbc0d1928668f266ff13d9
[ "CC0-1.0" ]
1
2019-05-09T21:25:31.000Z
2019-05-13T14:26:30.000Z
creeds/static/api1.py
MaayanLab/creeds
7d580c91ca45c03e34bbc0d1928668f266ff13d9
[ "CC0-1.0" ]
2
2018-12-21T23:59:27.000Z
2019-10-24T18:26:26.000Z
import json, requests from pprint import pprint CREEDS_URL = 'http://amp.pharm.mssm.edu/CREEDS/' response = requests.get(CREEDS_URL + 'search', params={'q':'STAT3'}) if response.status_code == 200: pprint(response.json()) json.dump(response.json(), open('api1_result.json', 'wb'), indent=4)
32.666667
69
0.721088
4af34453b4c3c543b26ea00b073366252fd5c89d
355
py
Python
admin/migrations/0041_course_color.py
rodlukas/UP-admin
08f36de0773f39c6222da82016bf1384af2cce18
[ "MIT" ]
4
2019-07-19T17:39:04.000Z
2022-03-22T21:02:15.000Z
admin/migrations/0041_course_color.py
rodlukas/UP-admin
08f36de0773f39c6222da82016bf1384af2cce18
[ "MIT" ]
53
2019-08-04T14:25:40.000Z
2022-03-26T20:30:55.000Z
admin/migrations/0041_course_color.py
rodlukas/UP-admin
08f36de0773f39c6222da82016bf1384af2cce18
[ "MIT" ]
3
2020-03-09T07:11:03.000Z
2020-09-11T01:22:50.000Z
# Generated by Django 2.2.3 on 2019-07-31 13:54 from django.db import migrations, models
23.666667
99
0.661972
4af386aea4c2e177a4c714ca6af54611fc4df7d6
673
py
Python
exercicios-Python/ex083.py
pedrosimoes-programmer/exercicios-python
150de037496d63d76086678d87425a8ccfc74573
[ "MIT" ]
null
null
null
exercicios-Python/ex083.py
pedrosimoes-programmer/exercicios-python
150de037496d63d76086678d87425a8ccfc74573
[ "MIT" ]
null
null
null
exercicios-Python/ex083.py
pedrosimoes-programmer/exercicios-python
150de037496d63d76086678d87425a8ccfc74573
[ "MIT" ]
null
null
null
# Forma sem bugs expressao = (str(input('Digite a expresso: '))) pilhaParenteses = [] for v in expressao: if v == '(': pilhaParenteses.append('(') elif v == ')': if len(pilhaParenteses) > 0: pilhaParenteses.pop() else: pilhaParenteses.append(')') break if len(pilhaParenteses) == 0: print(f'A expresso {expressao} est vlida.') else: print(f'A expresso {expressao} est invlida!') # Forma com bugs #expressao = (str(input('Digite a expresso: '))) #if expressao.count('(') == expressao.count(')'): # print('Sua expresso est vlida.') #else: # print('Sua expresso est invlida!')
28.041667
52
0.601783
4af4341b8d96ec6fde46ee878b92d71af06be79a
1,607
py
Python
src/inspectortodo/todo.py
code-acrobat/InspectorTodo
342bd0840d4f087cf2914f906ebc69bf2b21d9ce
[ "Apache-2.0" ]
8
2018-05-28T08:41:01.000Z
2022-03-02T08:54:54.000Z
src/inspectortodo/todo.py
code-acrobat/InspectorTodo
342bd0840d4f087cf2914f906ebc69bf2b21d9ce
[ "Apache-2.0" ]
9
2018-08-04T20:16:46.000Z
2022-03-08T14:29:47.000Z
src/inspectortodo/todo.py
code-acrobat/InspectorTodo
342bd0840d4f087cf2914f906ebc69bf2b21d9ce
[ "Apache-2.0" ]
3
2018-05-29T08:00:29.000Z
2022-02-23T11:02:58.000Z
# Copyright 2018 TNG Technology Consulting GmbH, Unterfhring, Germany # Licensed under the Apache License, Version 2.0 - see LICENSE.md in project root directory import logging from xml.sax.saxutils import escape log = logging.getLogger()
34.934783
118
0.632856
4af5891fa135d7fd02c534a37ddba2e1d64a9e74
9,595
py
Python
generators.py
FabLabUTFSM/fusm_usage_report
92b18ad81f97482d6e8428b6c7cbdfc23d0ca440
[ "MIT" ]
null
null
null
generators.py
FabLabUTFSM/fusm_usage_report
92b18ad81f97482d6e8428b6c7cbdfc23d0ca440
[ "MIT" ]
null
null
null
generators.py
FabLabUTFSM/fusm_usage_report
92b18ad81f97482d6e8428b6c7cbdfc23d0ca440
[ "MIT" ]
null
null
null
import dash_core_components as dcc import dash_html_components as html import plotly.graph_objs as go import plotly.express as px from plotly.subplots import make_subplots import pandas as pd import math from datetime import datetime, time from utils import MONTH_NAMES, month_range """ TODO: Terminar el heatmap de alguna manera... def fig_uses(df, months): dias = ['Lunes', 'Martes', 'Mircoles', 'Jueves', 'Viernes'] days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] data = df[df.index.month.isin(month_range(months))] figure = go.Figure() times = data.groupby([data.index.weekday_name, pd.Grouper(freq='60min', key='Hora Inicio')]).fillna(0).sum().reset_index() day_times = times[times['Marca temporal'] == 'Monday']['Hora Inicio'].dt.time z_dict = dict() for i, d in enumerate(days): z_dict.update({dias[i]: times[times['Marca temporal'] == d]['Tiempo de uso en minutos'].fillna(0).values}) z_values = pd.DataFrame(z_dict).values figure.add_trace(go.Heatmap( x=dias, y=day_times, z=z_values)) return figure """ #def uses(df, months): # return dcc.Graph(figure=fig_uses(df, months), style={'height': '80vh'})
41.004274
163
0.619906
4af59f537fb6e3fa8f98dad4df206983a8ca37fd
3,651
py
Python
gengine/app/tests_old/test_groups.py
greck2908/gamification-engine
4a74086bde4505217e4b9ba36349a427a7042b4b
[ "MIT" ]
347
2015-03-03T14:25:59.000Z
2022-03-09T07:46:31.000Z
gengine/app/tests_old/test_groups.py
greck2908/gamification-engine
4a74086bde4505217e4b9ba36349a427a7042b4b
[ "MIT" ]
76
2015-03-05T23:37:31.000Z
2022-03-31T13:41:42.000Z
gengine/app/tests_old/test_groups.py
greck2908/gamification-engine
4a74086bde4505217e4b9ba36349a427a7042b4b
[ "MIT" ]
115
2015-03-04T23:47:25.000Z
2021-12-24T06:24:06.000Z
# -*- coding: utf-8 -*- from gengine.app.tests.base import BaseDBTest from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language from gengine.metadata import DBSession from gengine.app.model import AuthUser
29.92623
99
0.564229
4af6882f3b0de2bc194a5844807fd94589dcf8e9
119,159
py
Python
Lib/fontTools/designspaceLib/__init__.py
guorenxi/fonttools
cefb41e6c261eeff0062a7b4017061982ed87aa7
[ "Apache-2.0", "MIT" ]
null
null
null
Lib/fontTools/designspaceLib/__init__.py
guorenxi/fonttools
cefb41e6c261eeff0062a7b4017061982ed87aa7
[ "Apache-2.0", "MIT" ]
null
null
null
Lib/fontTools/designspaceLib/__init__.py
guorenxi/fonttools
cefb41e6c261eeff0062a7b4017061982ed87aa7
[ "Apache-2.0", "MIT" ]
null
null
null
from __future__ import annotations import collections import copy import itertools import math import os import posixpath from io import BytesIO, StringIO from textwrap import indent from typing import Any, Dict, List, MutableMapping, Optional, Tuple, Union from fontTools.misc import etree as ET from fontTools.misc import plistlib from fontTools.misc.loggingTools import LogMixin from fontTools.misc.textTools import tobytes, tostr """ designSpaceDocument - read and write designspace files """ __all__ = [ 'DesignSpaceDocumentError', 'DesignSpaceDocument', 'SourceDescriptor', 'InstanceDescriptor', 'AxisDescriptor', 'RuleDescriptor', 'BaseDocReader', 'BaseDocWriter' ] # ElementTree allows to find namespace-prefixed elements, but not attributes # so we have to do it ourselves for 'xml:lang' XML_NS = "{http://www.w3.org/XML/1998/namespace}" XML_LANG = XML_NS + "lang" def posix(path): """Normalize paths using forward slash to work also on Windows.""" new_path = posixpath.join(*path.split(os.path.sep)) if path.startswith('/'): # The above transformation loses absolute paths new_path = '/' + new_path elif path.startswith(r'\\'): # The above transformation loses leading slashes of UNC path mounts new_path = '//' + new_path return new_path def posixpath_property(private_name): """Generate a propery that holds a path always using forward slashes.""" return property(getter, setter) def getFamilyName(self, languageCode="en"): """Getter for :attr:`localisedFamilyName` .. versionadded:: 5.0 """ return self.localisedFamilyName.get(languageCode) def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: """Get the complete design location of this source, from its :attr:`designLocation` and the document's axis defaults. .. versionadded:: 5.0 """ result: AnisotropicLocationDict = {} for axis in doc.axes: if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] else: result[axis.name] = axis.map_forward(axis.default) return result class RuleDescriptor(SimpleDescriptor): """Represents the rule descriptor element: a set of glyph substitutions to trigger conditionally in some parts of the designspace. .. code:: python r1 = RuleDescriptor() r1.name = "unique.rule.name" r1.conditionSets.append([dict(name="weight", minimum=-10, maximum=10), dict(...)]) r1.conditionSets.append([dict(...), dict(...)]) r1.subs.append(("a", "a.alt")) .. code:: xml <!-- optional: list of substitution rules --> <rules> <rule name="vertical.bars"> <conditionset> <condition minimum="250.000000" maximum="750.000000" name="weight"/> <condition minimum="100" name="width"/> <condition minimum="10" maximum="40" name="optical"/> </conditionset> <sub name="cent" with="cent.alt"/> <sub name="dollar" with="dollar.alt"/> </rule> </rules> """ _attrs = ['name', 'conditionSets', 'subs'] # what do we need here def evaluateRule(rule, location): """Return True if any of the rule's conditionsets matches the given location.""" return any(evaluateConditions(c, location) for c in rule.conditionSets) def evaluateConditions(conditions, location): """Return True if all the conditions matches the given location. - If a condition has no minimum, check for < maximum. - If a condition has no maximum, check for > minimum. """ for cd in conditions: value = location[cd['name']] if cd.get('minimum') is None: if value > cd['maximum']: return False elif cd.get('maximum') is None: if cd['minimum'] > value: return False elif not cd['minimum'] <= value <= cd['maximum']: return False return True def processRules(rules, location, glyphNames): """Apply these rules at this location to these glyphnames. Return a new list of glyphNames with substitutions applied. - rule order matters """ newNames = [] for rule in rules: if evaluateRule(rule, location): for name in glyphNames: swap = False for a, b in rule.subs: if name == a: swap = True break if swap: newNames.append(b) else: newNames.append(name) glyphNames = newNames newNames = [] return glyphNames AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]] SimpleLocationDict = Dict[str, float] def clearLocation(self, axisName: Optional[str] = None): """Clear all location-related fields. Ensures that :attr:``designLocation`` and :attr:``userLocation`` are dictionaries (possibly empty if clearing everything). In order to update the location of this instance wholesale, a user should first clear all the fields, then change the field(s) for which they have data. .. code:: python instance.clearLocation() instance.designLocation = {'Weight': (34, 36.5), 'Width': 100} instance.userLocation = {'Opsz': 16} In order to update a single axis location, the user should only clear that axis, then edit the values: .. code:: python instance.clearLocation('Weight') instance.designLocation['Weight'] = (34, 36.5) Args: axisName: if provided, only clear the location for that axis. .. versionadded:: 5.0 """ self.locationLabel = None if axisName is None: self.designLocation = {} self.userLocation = {} else: if self.designLocation is None: self.designLocation = {} if axisName in self.designLocation: del self.designLocation[axisName] if self.userLocation is None: self.userLocation = {} if axisName in self.userLocation: del self.userLocation[axisName] def getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]: """Get the :class:`LocationLabelDescriptor` instance that matches this instances's :attr:`locationLabel`. Raises if the named label can't be found. .. versionadded:: 5.0 """ if self.locationLabel is None: return None label = doc.getLocationLabel(self.locationLabel) if label is None: raise DesignSpaceDocumentError( 'InstanceDescriptor.getLocationLabelDescriptor(): ' f'unknown location label `{self.locationLabel}` in instance `{self.name}`.' ) return label def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: """Get the complete design location of this instance, by combining data from the various location fields, default axis values and mappings, and top-level location labels. The source of truth for this instance's location is determined for each axis independently by taking the first not-None field in this list: - ``locationLabel``: the location along this axis is the same as the matching STAT format 4 label. No anisotropy. - ``designLocation[axisName]``: the explicit design location along this axis, possibly anisotropic. - ``userLocation[axisName]``: the explicit user location along this axis. No anisotropy. - ``axis.default``: default axis value. No anisotropy. .. versionadded:: 5.0 """ label = self.getLocationLabelDescriptor(doc) if label is not None: return doc.map_forward(label.userLocation) # type: ignore result: AnisotropicLocationDict = {} for axis in doc.axes: if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] elif axis.name in self.userLocation: result[axis.name] = axis.map_forward(self.userLocation[axis.name]) else: result[axis.name] = axis.map_forward(axis.default) return result def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict: """Get the complete user location for this instance. .. seealso:: :meth:`getFullDesignLocation` .. versionadded:: 5.0 """ return doc.map_backward(self.getFullDesignLocation(doc)) def tagForAxisName(name): # try to find or make a tag name for this axis name names = { 'weight': ('wght', dict(en = 'Weight')), 'width': ('wdth', dict(en = 'Width')), 'optical': ('opsz', dict(en = 'Optical Size')), 'slant': ('slnt', dict(en = 'Slant')), 'italic': ('ital', dict(en = 'Italic')), } if name.lower() in names: return names[name.lower()] if len(name) < 4: tag = name + "*" * (4 - len(name)) else: tag = name[:4] return tag, dict(en=name) class AbstractAxisDescriptor(SimpleDescriptor): flavor = "axis" class AxisDescriptor(AbstractAxisDescriptor): """ Simple container for the axis data. Add more localisations? .. code:: python a1 = AxisDescriptor() a1.minimum = 1 a1.maximum = 1000 a1.default = 400 a1.name = "weight" a1.tag = "wght" a1.labelNames['fa-IR'] = "" a1.labelNames['en'] = "Wght" a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)] a1.axisOrdering = 1 a1.axisLabels = [ AxisLabelDescriptor(name="Regular", userValue=400, elidable=True) ] doc.addAxis(a1) """ _attrs = ['tag', 'name', 'maximum', 'minimum', 'default', 'map', 'axisOrdering', 'axisLabels'] def map_forward(self, v): """Maps value from axis mapping's input (user) to output (design).""" from fontTools.varLib.models import piecewiseLinearMap if not self.map: return v return piecewiseLinearMap(v, {k: v for k, v in self.map}) def map_backward(self, v): """Maps value from axis mapping's output (design) to input (user).""" from fontTools.varLib.models import piecewiseLinearMap if isinstance(v, tuple): v = v[0] if not self.map: return v return piecewiseLinearMap(v, {v: k for k, v in self.map}) class DiscreteAxisDescriptor(AbstractAxisDescriptor): """Container for discrete axis data. Use this for axes that do not interpolate. The main difference from a continuous axis is that a continuous axis has a ``minimum`` and ``maximum``, while a discrete axis has a list of ``values``. Example: an Italic axis with 2 stops, Roman and Italic, that are not compatible. The axis still allows to bind together the full font family, which is useful for the STAT table, however it can't become a variation axis in a VF. .. code:: python a2 = DiscreteAxisDescriptor() a2.values = [0, 1] a2.name = "Italic" a2.tag = "ITAL" a2.labelNames['fr'] = "Italique" a2.map = [(0, 0), (1, -11)] a2.axisOrdering = 2 a2.axisLabels = [ AxisLabelDescriptor(name="Roman", userValue=0, elidable=True) ] doc.addAxis(a2) .. versionadded:: 5.0 """ flavor = "axis" _attrs = ('tag', 'name', 'values', 'default', 'map', 'axisOrdering', 'axisLabels') def map_forward(self, value): """Maps value from axis mapping's input to output. Returns value unchanged if no mapping entry is found. Note: for discrete axes, each value must have its mapping entry, if you intend that value to be mapped. """ return next((v for k, v in self.map if k == value), value) def map_backward(self, value): """Maps value from axis mapping's output to input. Returns value unchanged if no mapping entry is found. Note: for discrete axes, each value must have its mapping entry, if you intend that value to be mapped. """ if isinstance(value, tuple): value = value[0] return next((k for k, v in self.map if v == value), value) class AxisLabelDescriptor(SimpleDescriptor): """Container for axis label data. Analogue of OpenType's STAT data for a single axis (formats 1, 2 and 3). All values are user values. See: `OTSpec STAT Axis value table, format 1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_ The STAT format of the Axis value depends on which field are filled-in, see :meth:`getFormat` .. versionadded:: 5.0 """ flavor = "label" _attrs = ('userMinimum', 'userValue', 'userMaximum', 'name', 'elidable', 'olderSibling', 'linkedUserValue', 'labelNames') def getFormat(self) -> int: """Determine which format of STAT Axis value to use to encode this label. =========== ========= =========== =========== =============== STAT Format userValue userMinimum userMaximum linkedUserValue =========== ========= =========== =========== =============== 1 2 3 =========== ========= =========== =========== =============== """ if self.linkedUserValue is not None: return 3 if self.userMinimum is not None or self.userMaximum is not None: return 2 return 1 def write(self, pretty=True, encoding="UTF-8", xml_declaration=True): self.root.attrib['format'] = ".".join(str(i) for i in self.effectiveFormatTuple) if self.documentObject.axes or self.documentObject.elidedFallbackName is not None: axesElement = ET.Element("axes") if self.documentObject.elidedFallbackName is not None: axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName self.root.append(axesElement) for axisObject in self.documentObject.axes: self._addAxis(axisObject) if self.documentObject.locationLabels: labelsElement = ET.Element("labels") for labelObject in self.documentObject.locationLabels: self._addLocationLabel(labelsElement, labelObject) self.root.append(labelsElement) if self.documentObject.rules: if getattr(self.documentObject, "rulesProcessingLast", False): attributes = {"processing": "last"} else: attributes = {} self.root.append(ET.Element("rules", attributes)) for ruleObject in self.documentObject.rules: self._addRule(ruleObject) if self.documentObject.sources: self.root.append(ET.Element("sources")) for sourceObject in self.documentObject.sources: self._addSource(sourceObject) if self.documentObject.variableFonts: variableFontsElement = ET.Element("variable-fonts") for variableFont in self.documentObject.variableFonts: self._addVariableFont(variableFontsElement, variableFont) self.root.append(variableFontsElement) if self.documentObject.instances: self.root.append(ET.Element("instances")) for instanceObject in self.documentObject.instances: self._addInstance(instanceObject) if self.documentObject.lib: self._addLib(self.root, self.documentObject.lib, 2) tree = ET.ElementTree(self.root) tree.write( self.path, encoding=encoding, method='xml', xml_declaration=xml_declaration, pretty_print=pretty, ) def _getEffectiveFormatTuple(self): """Try to use the version specified in the document, or a sufficiently recent version to be able to encode what the document contains. """ minVersion = self.documentObject.formatTuple if ( any( isinstance(axis, DiscreteAxisDescriptor) or axis.axisOrdering is not None or axis.axisLabels for axis in self.documentObject.axes ) or self.documentObject.locationLabels or any( source.localisedFamilyName for source in self.documentObject.sources ) or self.documentObject.variableFonts or any( instance.locationLabel or instance.userLocation for instance in self.documentObject.instances ) ): if minVersion < (5, 0): minVersion = (5, 0) return minVersion def _makeLocationElement(self, locationObject, name=None): """ Convert Location dict to a locationElement.""" locElement = ET.Element("location") if name is not None: locElement.attrib['name'] = name validatedLocation = self.documentObject.newDefaultLocation() for axisName, axisValue in locationObject.items(): if axisName in validatedLocation: # only accept values we know validatedLocation[axisName] = axisValue for dimensionName, dimensionValue in validatedLocation.items(): dimElement = ET.Element('dimension') dimElement.attrib['name'] = dimensionName if type(dimensionValue) == tuple: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0]) dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue) locElement.append(dimElement) return locElement, validatedLocation def intOrFloat(self, num): if int(num) == num: return "%d" % num return ("%f" % num).rstrip('0').rstrip('.') def _addRule(self, ruleObject): # if none of the conditions have minimum or maximum values, do not add the rule. ruleElement = ET.Element('rule') if ruleObject.name is not None: ruleElement.attrib['name'] = ruleObject.name for conditions in ruleObject.conditionSets: conditionsetElement = ET.Element('conditionset') for cond in conditions: if cond.get('minimum') is None and cond.get('maximum') is None: # neither is defined, don't add this condition continue conditionElement = ET.Element('condition') conditionElement.attrib['name'] = cond.get('name') if cond.get('minimum') is not None: conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum')) if cond.get('maximum') is not None: conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum')) conditionsetElement.append(conditionElement) if len(conditionsetElement): ruleElement.append(conditionsetElement) for sub in ruleObject.subs: subElement = ET.Element('sub') subElement.attrib['name'] = sub[0] subElement.attrib['with'] = sub[1] ruleElement.append(subElement) if len(ruleElement): self.root.findall('.rules')[0].append(ruleElement) class BaseDocReader(LogMixin): axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass = AxisLabelDescriptor locationLabelDescriptorClass = LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontsDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor def locationFromElement(self, element): """Read a nested ``<location>`` element inside the given ``element``. .. versionchanged:: 5.0 Return a tuple of (designLocation, userLocation) """ elementLocation = (None, None) for locationElement in element.findall('.location'): elementLocation = self.readLocationElement(locationElement) break return elementLocation def readLocationElement(self, locationElement): """Read a ``<location>`` element. .. versionchanged:: 5.0 Return a tuple of (designLocation, userLocation) """ if self._strictAxisNames and not self.documentObject.axes: raise DesignSpaceDocumentError("No axes defined") userLoc = {} designLoc = {} for dimensionElement in locationElement.findall(".dimension"): dimName = dimensionElement.attrib.get("name") if self._strictAxisNames and dimName not in self.axisDefaults: # In case the document contains no axis definitions, self.log.warning("Location with undefined axis: \"%s\".", dimName) continue userValue = xValue = yValue = None try: userValue = dimensionElement.attrib.get('uservalue') if userValue is not None: userValue = float(userValue) except ValueError: self.log.warning("ValueError in readLocation userValue %3.3f", userValue) try: xValue = dimensionElement.attrib.get('xvalue') if xValue is not None: xValue = float(xValue) except ValueError: self.log.warning("ValueError in readLocation xValue %3.3f", xValue) try: yValue = dimensionElement.attrib.get('yvalue') if yValue is not None: yValue = float(yValue) except ValueError: self.log.warning("ValueError in readLocation yValue %3.3f", yValue) if userValue is None == xValue is None: raise DesignSpaceDocumentError(f'Exactly one of uservalue="" or xvalue="" must be provided for location dimension "{dimName}"') if yValue is not None: if xValue is None: raise DesignSpaceDocumentError(f'Missing xvalue="" for the location dimension "{dimName}"" with yvalue="{yValue}"') designLoc[dimName] = (xValue, yValue) elif xValue is not None: designLoc[dimName] = xValue else: userLoc[dimName] = userValue return designLoc, userLoc def readLibElement(self, libElement, instanceObject): """Read the lib element for the given instance.""" instanceObject.lib = plistlib.fromtree(libElement[0]) def readInfoElement(self, infoElement, instanceObject): """ Read the info element.""" instanceObject.info = True def readGlyphElement(self, glyphElement, instanceObject): """ Read the glyph element, which could look like either one of these: .. code-block:: xml <glyph name="b" unicode="0x62"/> <glyph name="b"/> <glyph name="b"> <master location="location-token-bbb" source="master-token-aaa2"/> <master glyphname="b.alt1" location="location-token-ccc" source="master-token-aaa3"/> <note> This is an instance from an anisotropic interpolation. </note> </glyph> """ glyphData = {} glyphName = glyphElement.attrib.get('name') if glyphName is None: raise DesignSpaceDocumentError("Glyph object without name attribute") mute = glyphElement.attrib.get("mute") if mute == "1": glyphData['mute'] = True # unicode unicodes = glyphElement.attrib.get('unicode') if unicodes is not None: try: unicodes = [int(u, 16) for u in unicodes.split(" ")] glyphData['unicodes'] = unicodes except ValueError: raise DesignSpaceDocumentError("unicode values %s are not integers" % unicodes) for noteElement in glyphElement.findall('.note'): glyphData['note'] = noteElement.text break designLocation, userLocation = self.locationFromElement(glyphElement) if userLocation: raise DesignSpaceDocumentError(f'<glyph> element "{glyphName}" must only have design locations (using xvalue="").') if designLocation is not None: glyphData['instanceLocation'] = designLocation glyphSources = None for masterElement in glyphElement.findall('.masters/master'): fontSourceName = masterElement.attrib.get('source') designLocation, userLocation = self.locationFromElement(masterElement) if userLocation: raise DesignSpaceDocumentError(f'<master> element "{fontSourceName}" must only have design locations (using xvalue="").') masterGlyphName = masterElement.attrib.get('glyphname') if masterGlyphName is None: # if we don't read a glyphname, use the one we have masterGlyphName = glyphName d = dict(font=fontSourceName, location=designLocation, glyphName=masterGlyphName) if glyphSources is None: glyphSources = [] glyphSources.append(d) if glyphSources is not None: glyphData['masters'] = glyphSources instanceObject.glyphs[glyphName] = glyphData def readLib(self): """Read the lib element for the whole document.""" for libElement in self.root.findall(".lib"): self.documentObject.lib = plistlib.fromtree(libElement[0]) class DesignSpaceDocument(LogMixin, AsDictMixin): """The DesignSpaceDocument object can read and write ``.designspace`` data. It imports the axes, sources, variable fonts and instances to very basic **descriptor** objects that store the data in attributes. Data is added to the document by creating such descriptor objects, filling them with data and then adding them to the document. This makes it easy to integrate this object in different contexts. The **DesignSpaceDocument** object can be subclassed to work with different objects, as long as they have the same attributes. Reader and Writer objects can be subclassed as well. **Note:** Python attribute names are usually camelCased, the corresponding `XML <document-xml-structure>`_ attributes are usually all lowercase. .. code:: python from fontTools.designspaceLib import DesignSpaceDocument doc = DesignSpaceDocument.fromfile("some/path/to/my.designspace") doc.formatVersion doc.elidedFallbackName doc.axes doc.locationLabels doc.rules doc.rulesProcessingLast doc.sources doc.variableFonts doc.instances doc.lib """ def tostring(self, encoding=None): """Returns the designspace as a string. Default encoding ``utf-8``.""" if encoding is str or ( encoding is not None and encoding.lower() == "unicode" ): f = StringIO() xml_declaration = False elif encoding is None or encoding == "utf-8": f = BytesIO() encoding = "UTF-8" xml_declaration = True else: raise ValueError("unsupported encoding: '%s'" % encoding) writer = self.writerClass(f, self) writer.write(encoding=encoding, xml_declaration=xml_declaration) return f.getvalue() def read(self, path): """Read a designspace file from ``path`` and populates the fields of ``self`` with the data. """ if hasattr(path, "__fspath__"): # support os.PathLike objects path = path.__fspath__() self.path = path self.filename = os.path.basename(path) reader = self.readerClass(path, self) reader.read() if self.sources: self.findDefault() def write(self, path): """Write this designspace to ``path``.""" if hasattr(path, "__fspath__"): # support os.PathLike objects path = path.__fspath__() self.path = path self.filename = os.path.basename(path) self.updatePaths() writer = self.writerClass(path, self) writer.write() def updatePaths(self): """ Right before we save we need to identify and respond to the following situations: In each descriptor, we have to do the right thing for the filename attribute. :: case 1. descriptor.filename == None descriptor.path == None -- action: write as is, descriptors will not have a filename attr. useless, but no reason to interfere. case 2. descriptor.filename == "../something" descriptor.path == None -- action: write as is. The filename attr should not be touched. case 3. descriptor.filename == None descriptor.path == "~/absolute/path/there" -- action: calculate the relative path for filename. We're not overwriting some other value for filename, it should be fine case 4. descriptor.filename == '../somewhere' descriptor.path == "~/absolute/path/there" -- action: there is a conflict between the given filename, and the path. So we know where the file is relative to the document. Can't guess why they're different, we just choose for path to be correct and update filename. """ assert self.path is not None for descriptor in self.sources + self.instances: if descriptor.path is not None: # case 3 and 4: filename gets updated and relativized descriptor.filename = self._posixRelativePath(descriptor.path) def addSource(self, sourceDescriptor: SourceDescriptor): """Add the given ``sourceDescriptor`` to ``doc.sources``.""" self.sources.append(sourceDescriptor) def addSourceDescriptor(self, **kwargs): """Instantiate a new :class:`SourceDescriptor` using the given ``kwargs`` and add it to ``doc.sources``. """ source = self.writerClass.sourceDescriptorClass(**kwargs) self.addSource(source) return source def addInstance(self, instanceDescriptor: InstanceDescriptor): """Add the given ``instanceDescriptor`` to :attr:`instances`.""" self.instances.append(instanceDescriptor) def addInstanceDescriptor(self, **kwargs): """Instantiate a new :class:`InstanceDescriptor` using the given ``kwargs`` and add it to :attr:`instances`. """ instance = self.writerClass.instanceDescriptorClass(**kwargs) self.addInstance(instance) return instance def addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]): """Add the given ``axisDescriptor`` to :attr:`axes`.""" self.axes.append(axisDescriptor) def addAxisDescriptor(self, **kwargs): """Instantiate a new :class:`AxisDescriptor` using the given ``kwargs`` and add it to :attr:`axes`. The axis will be and instance of :class:`DiscreteAxisDescriptor` if the ``kwargs`` provide a ``value``, or a :class:`AxisDescriptor` otherwise. """ if "values" in kwargs: axis = self.writerClass.discreteAxisDescriptorClass(**kwargs) else: axis = self.writerClass.axisDescriptorClass(**kwargs) self.addAxis(axis) return axis def addRule(self, ruleDescriptor: RuleDescriptor): """Add the given ``ruleDescriptor`` to :attr:`rules`.""" self.rules.append(ruleDescriptor) def addRuleDescriptor(self, **kwargs): """Instantiate a new :class:`RuleDescriptor` using the given ``kwargs`` and add it to :attr:`rules`. """ rule = self.writerClass.ruleDescriptorClass(**kwargs) self.addRule(rule) return rule def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor): """Add the given ``variableFontDescriptor`` to :attr:`variableFonts`. .. versionadded:: 5.0 """ self.variableFonts.append(variableFontDescriptor) def addVariableFontDescriptor(self, **kwargs): """Instantiate a new :class:`VariableFontDescriptor` using the given ``kwargs`` and add it to :attr:`variableFonts`. .. versionadded:: 5.0 """ variableFont = self.writerClass.variableFontDescriptorClass(**kwargs) self.addVariableFont(variableFont) return variableFont def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor): """Add the given ``locationLabelDescriptor`` to :attr:`locationLabels`. .. versionadded:: 5.0 """ self.locationLabels.append(locationLabelDescriptor) def addLocationLabelDescriptor(self, **kwargs): """Instantiate a new :class:`LocationLabelDescriptor` using the given ``kwargs`` and add it to :attr:`locationLabels`. .. versionadded:: 5.0 """ locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs) self.addLocationLabel(locationLabel) return locationLabel def newDefaultLocation(self): """Return a dict with the default location in design space coordinates.""" # Without OrderedDict, output XML would be non-deterministic. # https://github.com/LettError/designSpaceDocument/issues/10 loc = collections.OrderedDict() for axisDescriptor in self.axes: loc[axisDescriptor.name] = axisDescriptor.map_forward( axisDescriptor.default ) return loc def labelForUserLocation(self, userLocation: SimpleLocationDict) -> Optional[LocationLabelDescriptor]: """Return the :class:`LocationLabel` that matches the given ``userLocation``, or ``None`` if no such label exists. .. versionadded:: 5.0 """ return next( (label for label in self.locationLabels if label.userLocation == userLocation), None ) def updateFilenameFromPath(self, masters=True, instances=True, force=False): """Set a descriptor filename attr from the path and this document path. If the filename attribute is not None: skip it. """ if masters: for descriptor in self.sources: if descriptor.filename is not None and not force: continue if self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path) if instances: for descriptor in self.instances: if descriptor.filename is not None and not force: continue if self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path) def newAxisDescriptor(self): """Ask the writer class to make us a new axisDescriptor.""" return self.writerClass.getAxisDecriptor() def newSourceDescriptor(self): """Ask the writer class to make us a new sourceDescriptor.""" return self.writerClass.getSourceDescriptor() def newInstanceDescriptor(self): """Ask the writer class to make us a new instanceDescriptor.""" return self.writerClass.getInstanceDescriptor() def getAxisOrder(self): """Return a list of axis names, in the same order as defined in the document.""" names = [] for axisDescriptor in self.axes: names.append(axisDescriptor.name) return names def getAxis(self, name): """Return the axis with the given ``name``, or ``None`` if no such axis exists.""" for axisDescriptor in self.axes: if axisDescriptor.name == name: return axisDescriptor return None def getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]: """Return the top-level location label with the given ``name``, or ``None`` if no such label exists. .. versionadded:: 5.0 """ for label in self.locationLabels: if label.name == name: return label return None def map_forward(self, userLocation: SimpleLocationDict) -> SimpleLocationDict: """Map a user location to a design location. Assume that missing coordinates are at the default location for that axis. Note: the output won't be anisotropic, only the xvalue is set. .. versionadded:: 5.0 """ return { axis.name: axis.map_forward(userLocation.get(axis.name, axis.default)) for axis in self.axes } def map_backward(self, designLocation: AnisotropicLocationDict) -> SimpleLocationDict: """Map a design location to a user location. Assume that missing coordinates are at the default location for that axis. When the input has anisotropic locations, only the xvalue is used. .. versionadded:: 5.0 """ return { axis.name: ( axis.map_backward(designLocation[axis.name]) if axis.name in designLocation else axis.default ) for axis in self.axes } def findDefault(self): """Set and return SourceDescriptor at the default location or None. The default location is the set of all `default` values in user space of all axes. This function updates the document's :attr:`default` value. .. versionchanged:: 5.0 Allow the default source to not specify some of the axis values, and they are assumed to be the default. See :meth:`SourceDescriptor.getFullDesignLocation()` """ self.default = None # Convert the default location from user space to design space before comparing # it against the SourceDescriptor locations (always in design space). defaultDesignLocation = self.newDefaultLocation() for sourceDescriptor in self.sources: if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation: self.default = sourceDescriptor return sourceDescriptor return None def normalizeLocation(self, location): """Return a dict with normalized axis values.""" from fontTools.varLib.models import normalizeValue new = {} for axis in self.axes: if axis.name not in location: # skipping this dimension it seems continue value = location[axis.name] # 'anisotropic' location, take first coord only if isinstance(value, tuple): value = value[0] triple = [ axis.map_forward(v) for v in (axis.minimum, axis.default, axis.maximum) ] new[axis.name] = normalizeValue(value, triple) return new def normalize(self): """ Normalise the geometry of this designspace: - scale all the locations of all masters and instances to the -1 - 0 - 1 value. - we need the axis data to do the scaling, so we do those last. """ # masters for item in self.sources: item.location = self.normalizeLocation(item.location) # instances for item in self.instances: # glyph masters for this instance for _, glyphData in item.glyphs.items(): glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation']) for glyphMaster in glyphData['masters']: glyphMaster['location'] = self.normalizeLocation(glyphMaster['location']) item.location = self.normalizeLocation(item.location) # the axes for axis in self.axes: # scale the map first newMap = [] for inputValue, outputValue in axis.map: newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name) newMap.append((inputValue, newOutputValue)) if newMap: axis.map = newMap # finally the axis values minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name) maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name) default = self.normalizeLocation({axis.name: axis.default}).get(axis.name) # and set them in the axis.minimum axis.minimum = minimum axis.maximum = maximum axis.default = default # now the rules for rule in self.rules: newConditionSets = [] for conditions in rule.conditionSets: newConditions = [] for cond in conditions: if cond.get('minimum') is not None: minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name']) else: minimum = None if cond.get('maximum') is not None: maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name']) else: maximum = None newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum)) newConditionSets.append(newConditions) rule.conditionSets = newConditionSets def loadSourceFonts(self, opener, **kwargs): """Ensure SourceDescriptor.font attributes are loaded, and return list of fonts. Takes a callable which initializes a new font object (e.g. TTFont, or defcon.Font, etc.) from the SourceDescriptor.path, and sets the SourceDescriptor.font attribute. If the font attribute is already not None, it is not loaded again. Fonts with the same path are only loaded once and shared among SourceDescriptors. For example, to load UFO sources using defcon: designspace = DesignSpaceDocument.fromfile("path/to/my.designspace") designspace.loadSourceFonts(defcon.Font) Or to load masters as FontTools binary fonts, including extra options: designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False) Args: opener (Callable): takes one required positional argument, the source.path, and an optional list of keyword arguments, and returns a new font object loaded from the path. **kwargs: extra options passed on to the opener function. Returns: List of font objects in the order they appear in the sources list. """ # we load fonts with the same source.path only once loaded = {} fonts = [] for source in self.sources: if source.font is not None: # font already loaded fonts.append(source.font) continue if source.path in loaded: source.font = loaded[source.path] else: if source.path is None: raise DesignSpaceDocumentError( "Designspace source '%s' has no 'path' attribute" % (source.name or "<Unknown>") ) source.font = opener(source.path, **kwargs) loaded[source.path] = source.font fonts.append(source.font) return fonts def getVariableFonts(self) -> List[VariableFontDescriptor]: """Return all variable fonts defined in this document, or implicit variable fonts that can be built from the document's continuous axes. In the case of Designspace documents before version 5, the whole document was implicitly describing a variable font that covers the whole space. In version 5 and above documents, there can be as many variable fonts as there are locations on discrete axes. .. seealso:: :func:`splitInterpolable` .. versionadded:: 5.0 """ if self.variableFonts: return self.variableFonts variableFonts = [] discreteAxes = [] rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = [] for axis in self.axes: if isinstance(axis, DiscreteAxisDescriptor): discreteAxes.append(axis) else: rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name)) valueCombinations = itertools.product(*[axis.values for axis in discreteAxes]) for values in valueCombinations: basename = None if self.filename is not None: basename = os.path.splitext(self.filename)[0] + "-VF" if self.path is not None: basename = os.path.splitext(os.path.basename(self.path))[0] + "-VF" if basename is None: basename = "VF" axisNames = "".join([f"-{axis.tag}{value}" for axis, value in zip(discreteAxes, values)]) variableFonts.append(VariableFontDescriptor( name=f"{basename}{axisNames}", axisSubsets=rangeAxisSubsets + [ ValueAxisSubsetDescriptor(name=axis.name, userValue=value) for axis, value in zip(discreteAxes, values) ] )) return variableFonts def deepcopyExceptFonts(self): """Allow deep-copying a DesignSpace document without deep-copying attached UFO fonts or TTFont objects. The :attr:`font` attribute is shared by reference between the original and the copy. .. versionadded:: 5.0 """ fonts = [source.font for source in self.sources] try: for source in self.sources: source.font = None res = copy.deepcopy(self) for source, font in zip(res.sources, fonts): res.font = font return res finally: for source, font in zip(self.sources, fonts): source.font = font
40.406579
147
0.612266
4af82655248e89ae648896a2197ee327a71bd7a6
3,230
py
Python
ax/models/torch/posterior_mean.py
dme65/Ax
c460eab90d464df87e6478b5765fd02fb5126adb
[ "MIT" ]
1
2021-01-11T18:16:28.000Z
2021-01-11T18:16:28.000Z
ax/models/torch/posterior_mean.py
dme65/Ax
c460eab90d464df87e6478b5765fd02fb5126adb
[ "MIT" ]
null
null
null
ax/models/torch/posterior_mean.py
dme65/Ax
c460eab90d464df87e6478b5765fd02fb5126adb
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Optional, Tuple import torch from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.monte_carlo import qSimpleRegret from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective from botorch.acquisition.utils import get_infeasible_cost from botorch.models.model import Model from botorch.utils import ( get_objective_weights_transform, get_outcome_constraint_transforms, ) from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization from botorch.utils.transforms import squeeze_last_dim from torch import Tensor def get_PosteriorMean( model: Model, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, X_observed: Optional[Tensor] = None, X_pending: Optional[Tensor] = None, **kwargs: Any, ) -> AcquisitionFunction: r"""Instantiates a PosteriorMean acquisition function. Note: If no OutcomeConstraints given, return an analytic acquisition function. This requires {optimizer_kwargs: {joint_optimization: True}} or an optimizer that does not assume pending point support. Args: objective_weights: The objective is to maximize a weighted sum of the columns of f(x). These are the weights. outcome_constraints: A tuple of (A, b). For k outcome constraints and m outputs at f(x), A is (k x m) and b is (k x 1) such that A f(x) <= b. (Not used by single task models) X_observed: A tensor containing points observed for all objective outcomes and outcomes that appear in the outcome constraints (if there are any). X_pending: A tensor containing points whose evaluation is pending (i.e. that have been submitted for evaluation) present for all objective outcomes and outcomes that appear in the outcome constraints (if there are any). Returns: PosteriorMean: The instantiated acquisition function. """ if X_observed is None: raise ValueError("There are no feasible observed points.") # construct Objective module if kwargs.get("chebyshev_scalarization", False): obj_tf = get_chebyshev_scalarization( weights=objective_weights, Y=squeeze_last_dim(torch.stack(kwargs.get("Ys")).transpose(0, 1)), ) else: obj_tf = get_objective_weights_transform(objective_weights) if outcome_constraints is None: objective = GenericMCObjective(objective=obj_tf) else: con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) objective = ConstrainedMCObjective( objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost ) # Use qSimpleRegret, not analytic posterior, to handle arbitrary objective fns. acq_func = qSimpleRegret(model, objective=objective) return acq_func
41.948052
84
0.728793
4af8cc653e14393ff950e095171d139b4a633baf
2,240
py
Python
src/drivers/velodyne_nodes/test/velodyne_node.test.py
fanyu2021/fyAutowareAuto
073661c0634de671ff01bda8a316a5ce10c96ca9
[ "Apache-2.0" ]
null
null
null
src/drivers/velodyne_nodes/test/velodyne_node.test.py
fanyu2021/fyAutowareAuto
073661c0634de671ff01bda8a316a5ce10c96ca9
[ "Apache-2.0" ]
null
null
null
src/drivers/velodyne_nodes/test/velodyne_node.test.py
fanyu2021/fyAutowareAuto
073661c0634de671ff01bda8a316a5ce10c96ca9
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 the Autoware Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Co-developed by Tier IV, Inc. and Apex.AI, Inc. import ament_index_python import launch import launch.actions import launch_ros.actions import lidar_integration # Test cases are created automatically by the lidar_integration package. We just need to # instantiate them active = lidar_integration.make_active_tests() after_shutdown = lidar_integration.make_post_shutdown_tests()
32
89
0.692857
4af90b86c50a3ccef625f31f883eb2072c6ed40c
1,425
py
Python
example.py
manhcuogntin4/Color-transfer
14b139efa86bb49a07a118c905d9d82cd7ad10d3
[ "MIT" ]
null
null
null
example.py
manhcuogntin4/Color-transfer
14b139efa86bb49a07a118c905d9d82cd7ad10d3
[ "MIT" ]
null
null
null
example.py
manhcuogntin4/Color-transfer
14b139efa86bb49a07a118c905d9d82cd7ad10d3
[ "MIT" ]
1
2020-04-13T13:17:58.000Z
2020-04-13T13:17:58.000Z
# USAGE # python example.py --source images/ocean_sunset.jpg --target images/ocean_day.jpg # import the necessary packages from color_transfer import color_transfer import numpy as np import argparse import cv2 # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-s", "--source", required = True, help = "Path to the source image") ap.add_argument("-t", "--target", required = True, help = "Path to the target image") ap.add_argument("-o", "--output", help = "Path to the output image (optional)") args = vars(ap.parse_args()) # load the images source = cv2.imread(args["source"]) target = cv2.imread(args["target"]) # transfer the color distribution from the source image # to the target image transfer = color_transfer(source, target) # check to see if the output image should be saved if args["output"] is not None: cv2.imwrite(args["output"], transfer) # show the images and wait for a key press show_image("Source", source) show_image("Target", target) show_image("Transfer", transfer) cv2.waitKey(0)
30.978261
82
0.733333
4afa3809e5300d1250cfab7d62f27391e130c231
9,060
py
Python
scripts/registration_pipeline.py
heethesh/Argoverse-HDMap-Update
61e9bf965a1fa7a0c74a2671457a2778d849bfe5
[ "Apache-2.0" ]
null
null
null
scripts/registration_pipeline.py
heethesh/Argoverse-HDMap-Update
61e9bf965a1fa7a0c74a2671457a2778d849bfe5
[ "Apache-2.0" ]
null
null
null
scripts/registration_pipeline.py
heethesh/Argoverse-HDMap-Update
61e9bf965a1fa7a0c74a2671457a2778d849bfe5
[ "Apache-2.0" ]
1
2020-09-08T04:32:21.000Z
2020-09-08T04:32:21.000Z
import copy import numpy as np import open3d as o3d from tqdm import tqdm from scipy import stats import utils_o3d as utils if __name__ == '__main__': run()
37.90795
119
0.703422
4afb20e82e1f9cc5d13cde9492b76ec1886669d1
36,825
py
Python
neo4j/aio/__init__.py
michaelcraige/neo4j-python-driver
27d0ce3f1941c4b29d0f050c6186a4f48ae4d30a
[ "Apache-2.0" ]
1
2021-05-18T14:11:39.000Z
2021-05-18T14:11:39.000Z
neo4j/aio/__init__.py
michaelcraige/neo4j-python-driver
27d0ce3f1941c4b29d0f050c6186a4f48ae4d30a
[ "Apache-2.0" ]
null
null
null
neo4j/aio/__init__.py
michaelcraige/neo4j-python-driver
27d0ce3f1941c4b29d0f050c6186a4f48ae4d30a
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) 2002-2019 "Neo4j," # Neo4j Sweden AB [http://neo4j.com] # # This file is part of Neo4j. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from asyncio import ( IncompleteReadError, Lock, StreamReader, StreamReaderProtocol, StreamWriter, get_event_loop, wait, ) from collections import deque from logging import getLogger from os import strerror from random import choice from ssl import SSLError from sys import platform, version_info from time import perf_counter from neo4j.addressing import Address from neo4j.aio._collections import WaitingList from neo4j.aio._mixins import Addressable, Breakable from neo4j.errors import ( BoltError, BoltConnectionError, BoltSecurityError, BoltConnectionBroken, BoltHandshakeError, Neo4jAvailabilityError, ) from neo4j.api import Version from neo4j.conf import Config, PoolConfig from neo4j.meta import version as neo4j_version from neo4j.routing import RoutingTable log = getLogger(__name__) MAGIC = b"\x60\x60\xB0\x17" async def close(self): """ Close the connection. """ if self.closed: return if not self.broken: log.debug("[#%04X] S: <HANGUP>", self.local_address.port_number) self.__writer.write_eof() self.__writer.close() try: await self.__writer.wait_closed() except BoltConnectionBroken: pass self.__closed = True # async def main(): # from neo4j.debug import watch; watch("neo4j") # neo4j = await Neo4j.open(":17601 :17602 :17603", auth=("neo4j", "password")) # await neo4j.update_routing_table() # print(neo4j.routing_table) # # # if __name__ == "__main__": # run(main())
37.34787
107
0.609993
4afb4cc5b7dcd90ef9395d1a97095b2b0c885c49
1,831
py
Python
python/setup.py
bubriks/feature-store-api
fa286f257b87a09c081e86811b853b3e564ce197
[ "Apache-2.0" ]
49
2020-09-07T17:43:11.000Z
2021-12-28T10:41:03.000Z
python/setup.py
bubriks/feature-store-api
fa286f257b87a09c081e86811b853b3e564ce197
[ "Apache-2.0" ]
132
2020-08-06T12:12:09.000Z
2022-03-29T16:28:25.000Z
python/setup.py
bubriks/feature-store-api
fa286f257b87a09c081e86811b853b3e564ce197
[ "Apache-2.0" ]
35
2020-08-06T12:09:02.000Z
2022-01-10T08:50:45.000Z
import os import imp from setuptools import setup, find_packages __version__ = imp.load_source( "hsfs.version", os.path.join("hsfs", "version.py") ).__version__ setup( name="hsfs", version=__version__, install_requires=[ "pyhumps==1.6.1", "requests", "furl", "boto3", "pandas", "numpy", "pyjks", "mock", "avro==1.10.2", "sqlalchemy", "PyMySQL", ], extras_require={ "dev": [ "pytest", "flake8", "black"], "docs": [ "mkdocs==1.1.2", "mkdocs-material==6.2.2", "mike==0.5.5", "sphinx==3.5.4", "keras_autodoc @ git+https://git@github.com/moritzmeister/keras-autodoc@split-tags-properties", "markdown-include"], "hive": ["pyhopshive[thrift]"] }, author="Logical Clocks AB", author_email="moritz@logicalclocks.com", description="HSFS: An environment independent client to interact with the Hopsworks Featurestore", license="Apache License 2.0", keywords="Hopsworks, Feature Store, Spark, Machine Learning, MLOps, DataOps", url="https://github.com/logicalclocks/feature-store-api", download_url="https://github.com/logicalclocks/feature-store-api/releases/tag/" + __version__, packages=find_packages(), long_description=read("../README.md"), long_description_content_type="text/markdown", classifiers=[ "Development Status :: 5 - Production/Stable", "Topic :: Utilities", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", "Intended Audience :: Developers", ], )
28.609375
107
0.589842
4afc50cce044689d528dbbb6c10aa634c6f79ad7
87
py
Python
src/server_py3/aps/src/wes/api/v1/users/__init__.py
kfrime/yonder
cd2f491c24f8552aeadd6ee48c601e1194a2e082
[ "MIT" ]
null
null
null
src/server_py3/aps/src/wes/api/v1/users/__init__.py
kfrime/yonder
cd2f491c24f8552aeadd6ee48c601e1194a2e082
[ "MIT" ]
12
2020-01-04T03:30:02.000Z
2021-06-02T01:22:45.000Z
src/server_py3/aps/src/wes/api/v1/users/__init__.py
kfrime/yonder
cd2f491c24f8552aeadd6ee48c601e1194a2e082
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from . import signup, signin, signout, update, info, detail
21.75
60
0.701149
4afdfa383647f87d1307da42b1ada1260c01b5fa
127
py
Python
hubconf.py
jamesmcclain/pytorch-multi-class-focal-loss
de74657769e07dc40be838a6277dea269bfddad0
[ "MIT" ]
81
2020-10-20T23:46:47.000Z
2022-03-16T15:56:40.000Z
hubconf.py
jamesmcclain/pytorch-multi-class-focal-loss
de74657769e07dc40be838a6277dea269bfddad0
[ "MIT" ]
3
2020-10-12T13:34:02.000Z
2022-03-29T13:39:26.000Z
hubconf.py
jamesmcclain/pytorch-multi-class-focal-loss
de74657769e07dc40be838a6277dea269bfddad0
[ "MIT" ]
12
2020-10-09T18:36:50.000Z
2021-12-02T06:37:04.000Z
# Optional list of dependencies required by the package dependencies = ['torch'] from focal_loss import FocalLoss, focal_loss
25.4
55
0.80315
4aff2d34953f2e2be532801520dad5c0dc9065e8
15,609
py
Python
autotest/ogr/ogr_gpx.py
HongqiangWei/gdal
f7c427926438cc39d31e4459fa6401321f8e62f0
[ "MIT" ]
3
2016-07-25T16:30:13.000Z
2022-02-11T11:09:08.000Z
autotest/ogr/ogr_gpx.py
HongqiangWei/gdal
f7c427926438cc39d31e4459fa6401321f8e62f0
[ "MIT" ]
null
null
null
autotest/ogr/ogr_gpx.py
HongqiangWei/gdal
f7c427926438cc39d31e4459fa6401321f8e62f0
[ "MIT" ]
null
null
null
#!/usr/bin/env python ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite # Purpose: Test GPX driver functionality. # Author: Even Rouault <even dot rouault at mines dash paris dot org> # ############################################################################### # Copyright (c) 2007, Even Rouault <even dot rouault at mines dash paris dot org> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import os import sys import string sys.path.append( '../pymod' ) import gdaltest import ogrtest import ogr import osr import gdal ############################################################################### # Test waypoints gpx layer. ############################################################################### # Test routes gpx layer. ############################################################################### # Test route_points gpx layer. ############################################################################### # Test tracks gpx layer. ############################################################################### # Test route_points gpx layer. ############################################################################### # Copy our small gpx file to a new gpx file. ############################################################################### # Output extra fields as <extensions>. ############################################################################### # Output extra fields as <extensions>. ############################################################################### # gdaltest_list = [ ogr_gpx_init, ogr_gpx_1, ogr_gpx_2, ogr_gpx_3, ogr_gpx_4, ogr_gpx_5, ogr_gpx_6, # Rerun test 1, 2 and 4 with generated tmp/tmp.gpx ogr_gpx_1, ogr_gpx_2, ogr_gpx_4, ogr_gpx_7, ogr_gpx_8, ogr_gpx_cleanup ] if __name__ == '__main__': gdaltest.setup_run( 'ogr_gpx' ) gdaltest.run_tests( gdaltest_list ) gdaltest.summarize()
27.19338
120
0.599206
4aff7e3e13035260de4953a62861c9d0ec4fffb5
22,377
py
Python
mwp_solver/models/sausolver.py
max-stack/MWP-SS-Metrics
01268f2d6da716596216b04de4197e345b96c219
[ "MIT" ]
null
null
null
mwp_solver/models/sausolver.py
max-stack/MWP-SS-Metrics
01268f2d6da716596216b04de4197e345b96c219
[ "MIT" ]
null
null
null
mwp_solver/models/sausolver.py
max-stack/MWP-SS-Metrics
01268f2d6da716596216b04de4197e345b96c219
[ "MIT" ]
null
null
null
# Code Taken from https://github.com/LYH-YF/MWPToolkit # -*- encoding: utf-8 -*- # @Author: Yihuai Lan # @Time: 2021/08/21 04:59:55 # @File: sausolver.py import random import torch from torch import nn import copy from module.Encoder.rnn_encoder import BasicRNNEncoder from module.Embedder.basic_embedder import BasicEmbedder from module.Decoder.tree_decoder import SARTreeDecoder from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding from module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule from module.Strategy.beam_search import TreeBeam from loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy from loss.mse_loss import MSELoss from utils.utils import copy_list from utils.enum_type import NumMask, SpecialTokens
48.751634
154
0.591634
4affd0e7b393c14db6c40989539fbd205424aa8e
8,128
py
Python
rosetta/tests/test_parallel.py
rafacarrascosa/rosetta
d5a964756b4f51e1032df40ee24f18398e3193b7
[ "BSD-3-Clause" ]
1
2015-01-21T06:00:46.000Z
2015-01-21T06:00:46.000Z
rosetta/tests/test_parallel.py
rafacarrascosa/rosetta
d5a964756b4f51e1032df40ee24f18398e3193b7
[ "BSD-3-Clause" ]
null
null
null
rosetta/tests/test_parallel.py
rafacarrascosa/rosetta
d5a964756b4f51e1032df40ee24f18398e3193b7
[ "BSD-3-Clause" ]
null
null
null
import unittest from functools import partial import pandas as pd from pandas.util.testing import assert_frame_equal, assert_series_equal import numpy as np import threading from StringIO import StringIO from rosetta.parallel import parallel_easy, pandas_easy from rosetta.parallel.threading_easy import threading_easy, LockIterateApply # A couple functions for testing parallel easy # Must be defined outside of the test class for some reason. abfunc = partial(_abfunc, 2, 3)
33.866667
79
0.637549
ab0004198f8e66f5be455567544099aa471f9197
3,349
py
Python
modules/helper/subtitles/subtitles.py
sdelcore/video-event-notifier-old
16bd322f2b81efbb3e08e63ed407ab098d610c88
[ "MIT" ]
null
null
null
modules/helper/subtitles/subtitles.py
sdelcore/video-event-notifier-old
16bd322f2b81efbb3e08e63ed407ab098d610c88
[ "MIT" ]
null
null
null
modules/helper/subtitles/subtitles.py
sdelcore/video-event-notifier-old
16bd322f2b81efbb3e08e63ed407ab098d610c88
[ "MIT" ]
null
null
null
import time import srt import re import datetime from mqtthandler import MQTTHandler INIT_STATUS={ "video": { "title": None, "series_title": None, "season": None, "episode": None }, "time": None, "events": None }
33.828283
133
0.561959
ab0084518a26b1bf65b7efbbe0be36485aedb9e2
1,165
py
Python
thecsvparser.py
rbago/CEBD1160_Class4_hwk
1012c81663dc60ea9d139d96f368f8289d4b363e
[ "MIT" ]
null
null
null
thecsvparser.py
rbago/CEBD1160_Class4_hwk
1012c81663dc60ea9d139d96f368f8289d4b363e
[ "MIT" ]
null
null
null
thecsvparser.py
rbago/CEBD1160_Class4_hwk
1012c81663dc60ea9d139d96f368f8289d4b363e
[ "MIT" ]
null
null
null
#!/usr/bin/env python import os import numpy as np import pandas as pd os.getcwd() # Request for the filename # Current version of this script works only with TSV type files mainFilename = input('Input your file name (diabetes.tab.txt or housing.data.txt): ') print() # To create proper dataframe, transforming it with numpy # Then changing it with pandas filenameData = np.genfromtxt(mainFilename, dtype='str') filenameData = pd.DataFrame(filenameData) # Obtains first row to identify header is string or numeric headers = filenameData.iloc[0] try: pd.to_numeric(headers) except: filenameData = pd.DataFrame(filenameData.values[1:], columns=headers) # Changes strings to numbers (self identifies for float or integer) filenameData = filenameData.apply(pd.to_numeric) # Obtains the mean and standard deviation of the columns listMean = filenameData.mean() listStd = filenameData.std() print(filenameData) # Prints out the results print('Mean for each column:') for idx in filenameData.columns: print(idx,':',listMean[idx]) print() print('Standard deviation for each column:') for idx in filenameData.columns: print(idx,':',listStd[idx])
25.326087
85
0.758798
ab0219367d5f8fd8173529e4b59eaffa00517b4a
3,057
py
Python
donkeycar/tests/test_web_socket.py
wenxichen/donkeycar
d70ee60d35d7e0e004b885e6f6062fb51916dad1
[ "MIT" ]
12
2019-06-28T21:58:01.000Z
2021-01-08T14:25:12.000Z
donkeycar/tests/test_web_socket.py
wenxichen/donkeycar
d70ee60d35d7e0e004b885e6f6062fb51916dad1
[ "MIT" ]
6
2020-11-07T19:27:10.000Z
2021-01-23T22:47:37.000Z
donkeycar/tests/test_web_socket.py
Heavy02011/donkeycar
5a23b0fee170596e29c80826c3db0d3a4c4c5392
[ "MIT" ]
9
2019-07-13T10:12:31.000Z
2020-07-27T10:27:03.000Z
from donkeycar.parts.web_controller.web import WebSocketCalibrateAPI from functools import partial from tornado import testing import tornado.websocket import tornado.web import tornado.ioloop import json from unittest.mock import Mock from donkeycar.parts.actuator import PWMSteering, PWMThrottle
34.348315
80
0.682368
ab026e12b9cf96fdf582b2fdd6e78d761d952e59
5,709
py
Python
misc/trac_plugins/IncludeMacro/includemacro/macros.py
weese/seqan
1acb1688969c7b61497f2328af54b4d11228a484
[ "BSD-3-Clause" ]
1
2017-10-24T20:37:58.000Z
2017-10-24T20:37:58.000Z
misc/trac_plugins/IncludeMacro/includemacro/macros.py
weese/seqan
1acb1688969c7b61497f2328af54b4d11228a484
[ "BSD-3-Clause" ]
10
2015-03-02T16:45:39.000Z
2015-06-23T14:02:13.000Z
misc/trac_plugins/IncludeMacro/includemacro/macros.py
weese/seqan
1acb1688969c7b61497f2328af54b4d11228a484
[ "BSD-3-Clause" ]
2
2015-02-24T19:07:54.000Z
2015-04-08T13:53:24.000Z
# TracIncludeMacro macros import re import urllib2 from StringIO import StringIO from trac.core import * from trac.wiki.macros import WikiMacroBase from trac.wiki.formatter import system_message from trac.wiki.model import WikiPage from trac.mimeview.api import Mimeview, get_mimetype, Context from trac.perm import IPermissionRequestor from genshi.core import escape from genshi.input import HTMLParser, ParseError from genshi.filters.html import HTMLSanitizer __all__ = ['IncludeMacro']
40.204225
135
0.584341
ab02c90f464edb9291e3105cd07e5c1bd2aaec14
12,497
py
Python
packages/google/cloud/logging/client.py
rjcuevas/Email-Frontend-AngularJS-
753dbd190582ed953058c9e15c2be920716c7985
[ "MIT" ]
null
null
null
packages/google/cloud/logging/client.py
rjcuevas/Email-Frontend-AngularJS-
753dbd190582ed953058c9e15c2be920716c7985
[ "MIT" ]
null
null
null
packages/google/cloud/logging/client.py
rjcuevas/Email-Frontend-AngularJS-
753dbd190582ed953058c9e15c2be920716c7985
[ "MIT" ]
null
null
null
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Client for interacting with the Google Stackdriver Logging API.""" import os try: from google.cloud.gapic.logging.v2.config_service_v2_api import ( ConfigServiceV2Api as GeneratedSinksAPI) from google.cloud.gapic.logging.v2.logging_service_v2_api import ( LoggingServiceV2Api as GeneratedLoggingAPI) from google.cloud.gapic.logging.v2.metrics_service_v2_api import ( MetricsServiceV2Api as GeneratedMetricsAPI) from google.cloud.logging._gax import _LoggingAPI as GAXLoggingAPI from google.cloud.logging._gax import _MetricsAPI as GAXMetricsAPI from google.cloud.logging._gax import _SinksAPI as GAXSinksAPI except ImportError: # pragma: NO COVER _HAVE_GAX = False GeneratedLoggingAPI = GAXLoggingAPI = None GeneratedMetricsAPI = GAXMetricsAPI = None GeneratedSinksAPI = GAXSinksAPI = None else: _HAVE_GAX = True from google.cloud.client import JSONClient from google.cloud.environment_vars import DISABLE_GRPC from google.cloud.logging.connection import Connection from google.cloud.logging.connection import _LoggingAPI as JSONLoggingAPI from google.cloud.logging.connection import _MetricsAPI as JSONMetricsAPI from google.cloud.logging.connection import _SinksAPI as JSONSinksAPI from google.cloud.logging.entries import ProtobufEntry from google.cloud.logging.entries import StructEntry from google.cloud.logging.entries import TextEntry from google.cloud.logging.logger import Logger from google.cloud.logging.metric import Metric from google.cloud.logging.sink import Sink _DISABLE_GAX = os.getenv(DISABLE_GRPC, False) _USE_GAX = _HAVE_GAX and not _DISABLE_GAX def _entry_from_resource(self, resource, loggers): """Detect correct entry type from resource and instantiate. :type resource: dict :param resource: one entry resource from API response :type loggers: dict or None :param loggers: A mapping of logger fullnames -> loggers. If not passed, the entry will have a newly-created logger. :rtype: One of: :class:`google.cloud.logging.entries.TextEntry`, :class:`google.cloud.logging.entries.StructEntry`, :class:`google.cloud.logging.entries.ProtobufEntry` :returns: the entry instance, constructed via the resource """ if 'textPayload' in resource: return TextEntry.from_api_repr(resource, self, loggers) elif 'jsonPayload' in resource: return StructEntry.from_api_repr(resource, self, loggers) elif 'protoPayload' in resource: return ProtobufEntry.from_api_repr(resource, self, loggers) raise ValueError('Cannot parse log entry resource') def list_entries(self, projects=None, filter_=None, order_by=None, page_size=None, page_token=None): """Return a page of log entries. See: https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/list :type projects: list of strings :param projects: project IDs to include. If not passed, defaults to the project bound to the client. :type filter_: str :param filter_: a filter expression. See: https://cloud.google.com/logging/docs/view/advanced_filters :type order_by: str :param order_by: One of :data:`~google.cloud.logging.ASCENDING` or :data:`~google.cloud.logging.DESCENDING`. :type page_size: int :param page_size: maximum number of entries to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of entries. If not passed, the API will return the first page of entries. :rtype: tuple, (list, str) :returns: list of :class:`google.cloud.logging.entry.TextEntry`, plus a "next page token" string: if not None, indicates that more entries can be retrieved with another call (pass that value as ``page_token``). """ if projects is None: projects = [self.project] resources, token = self.logging_api.list_entries( projects=projects, filter_=filter_, order_by=order_by, page_size=page_size, page_token=page_token) loggers = {} entries = [self._entry_from_resource(resource, loggers) for resource in resources] return entries, token def sink(self, name, filter_=None, destination=None): """Creates a sink bound to the current client. :type name: str :param name: the name of the sink to be constructed. :type filter_: str :param filter_: (optional) the advanced logs filter expression defining the entries exported by the sink. If not passed, the instance should already exist, to be refreshed via :meth:`Sink.reload`. :type destination: str :param destination: destination URI for the entries exported by the sink. If not passed, the instance should already exist, to be refreshed via :meth:`Sink.reload`. :rtype: :class:`google.cloud.logging.sink.Sink` :returns: Sink created with the current client. """ return Sink(name, filter_, destination, client=self) def list_sinks(self, page_size=None, page_token=None): """List sinks for the project associated with this client. See: https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/list :type page_size: int :param page_size: maximum number of sinks to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of sinks. If not passed, the API will return the first page of sinks. :rtype: tuple, (list, str) :returns: list of :class:`google.cloud.logging.sink.Sink`, plus a "next page token" string: if not None, indicates that more sinks can be retrieved with another call (pass that value as ``page_token``). """ resources, token = self.sinks_api.list_sinks( self.project, page_size, page_token) sinks = [Sink.from_api_repr(resource, self) for resource in resources] return sinks, token def metric(self, name, filter_=None, description=''): """Creates a metric bound to the current client. :type name: str :param name: the name of the metric to be constructed. :type filter_: str :param filter_: the advanced logs filter expression defining the entries tracked by the metric. If not passed, the instance should already exist, to be refreshed via :meth:`Metric.reload`. :type description: str :param description: the description of the metric to be constructed. If not passed, the instance should already exist, to be refreshed via :meth:`Metric.reload`. :rtype: :class:`google.cloud.logging.metric.Metric` :returns: Metric created with the current client. """ return Metric(name, filter_, client=self, description=description) def list_metrics(self, page_size=None, page_token=None): """List metrics for the project associated with this client. See: https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/list :type page_size: int :param page_size: maximum number of metrics to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of metrics. If not passed, the API will return the first page of metrics. :rtype: tuple, (list, str) :returns: list of :class:`google.cloud.logging.metric.Metric`, plus a "next page token" string: if not None, indicates that more metrics can be retrieved with another call (pass that value as ``page_token``). """ resources, token = self.metrics_api.list_metrics( self.project, page_size, page_token) metrics = [Metric.from_api_repr(resource, self) for resource in resources] return metrics, token
41.244224
96
0.634632
ab046a08c26c0e97b20f9dd2cde86b39dde408b7
1,468
py
Python
tests/test_core/test_graph_objs/test_instantiate_hierarchy.py
wwwidonja/changed_plotly
1bda35a438539a97c84a3ab3952e95e8848467bd
[ "MIT" ]
null
null
null
tests/test_core/test_graph_objs/test_instantiate_hierarchy.py
wwwidonja/changed_plotly
1bda35a438539a97c84a3ab3952e95e8848467bd
[ "MIT" ]
null
null
null
tests/test_core/test_graph_objs/test_instantiate_hierarchy.py
wwwidonja/changed_plotly
1bda35a438539a97c84a3ab3952e95e8848467bd
[ "MIT" ]
null
null
null
from __future__ import absolute_import from unittest import TestCase import os import importlib import inspect from plotly.basedatatypes import BasePlotlyType, BaseFigure datatypes_root = "new_plotly/graph_objs" datatype_modules = [ dirpath.replace("/", ".") for dirpath, _, _ in os.walk(datatypes_root) if not dirpath.endswith("__pycache__") ]
34.952381
87
0.557221
ab04f30d858425d5d5583ebc3b9cb9eb5ad46681
4,184
py
Python
mycli/packages/special/main.py
lyrl/mycli
d62eefdc819a11ecdb97d93dd7ad1922d28a3795
[ "BSD-3-Clause" ]
10,997
2015-07-27T06:59:04.000Z
2022-03-31T07:49:26.000Z
mycli/packages/special/main.py
lyrl/mycli
d62eefdc819a11ecdb97d93dd7ad1922d28a3795
[ "BSD-3-Clause" ]
937
2015-07-29T09:25:30.000Z
2022-03-30T23:54:03.000Z
mycli/packages/special/main.py
lyrl/mycli
d62eefdc819a11ecdb97d93dd7ad1922d28a3795
[ "BSD-3-Clause" ]
799
2015-07-27T13:13:49.000Z
2022-03-29T21:24:39.000Z
import logging from collections import namedtuple from . import export log = logging.getLogger(__name__) NO_QUERY = 0 PARSED_QUERY = 1 RAW_QUERY = 2 SpecialCommand = namedtuple('SpecialCommand', ['handler', 'command', 'shortcut', 'description', 'arg_type', 'hidden', 'case_sensitive']) COMMANDS = {} def show_keyword_help(cur, arg): """ Call the built-in "show <command>", to display help for an SQL keyword. :param cur: cursor :param arg: string :return: list """ keyword = arg.strip('"').strip("'") query = "help '{0}'".format(keyword) log.debug(query) cur.execute(query) if cur.description and cur.rowcount > 0: headers = [x[0] for x in cur.description] return [(None, cur, headers, '')] else: return [(None, None, None, 'No help found for {0}.'.format(keyword))]
34.578512
91
0.649618
ab058609cdca528f1ad9cfbfcbc124dc3d65d20b
180
py
Python
core/sample_fuzzer/data_generators/base.py
ShreyasTheOne/Super-Duper-Fuzzer
b667e2dca3e49a370634ad4b0bd826aca06136b7
[ "MIT" ]
null
null
null
core/sample_fuzzer/data_generators/base.py
ShreyasTheOne/Super-Duper-Fuzzer
b667e2dca3e49a370634ad4b0bd826aca06136b7
[ "MIT" ]
null
null
null
core/sample_fuzzer/data_generators/base.py
ShreyasTheOne/Super-Duper-Fuzzer
b667e2dca3e49a370634ad4b0bd826aca06136b7
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod
15
35
0.655556
ab073bf68fc63959db0a0aa37e1caf26b750286a
466
py
Python
Mon_08_06/convert2.py
TungTNg/itc110_python
589ca1398f26d39b05a0b798100df0b05e556e3c
[ "Apache-2.0" ]
null
null
null
Mon_08_06/convert2.py
TungTNg/itc110_python
589ca1398f26d39b05a0b798100df0b05e556e3c
[ "Apache-2.0" ]
null
null
null
Mon_08_06/convert2.py
TungTNg/itc110_python
589ca1398f26d39b05a0b798100df0b05e556e3c
[ "Apache-2.0" ]
null
null
null
# convert2.py # A program to convert Celsius temps to Fahrenheit. # This version issues heat and cold warnings. main()
33.285714
66
0.650215
ab078e438c6b69f3703aa8808d1800eb956179af
5,082
py
Python
homeassistant/components/wolflink/__init__.py
basicpail/core
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
[ "Apache-2.0" ]
11
2018-02-16T15:35:47.000Z
2020-01-14T15:20:00.000Z
homeassistant/components/wolflink/__init__.py
basicpail/core
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
[ "Apache-2.0" ]
77
2020-07-16T16:43:09.000Z
2022-03-31T06:14:37.000Z
homeassistant/components/wolflink/__init__.py
Vaarlion/core
f3de8b9f28de01abf72c0f5bb0b457eb1841f201
[ "Apache-2.0" ]
11
2020-12-16T13:48:14.000Z
2022-02-01T00:28:05.000Z
"""The Wolf SmartSet Service integration.""" from datetime import timedelta import logging from httpx import ConnectError, ConnectTimeout from wolf_smartset.token_auth import InvalidAuth from wolf_smartset.wolf_client import FetchFailed, ParameterReadError, WolfClient from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_PASSWORD, CONF_USERNAME from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from .const import ( COORDINATOR, DEVICE_GATEWAY, DEVICE_ID, DEVICE_NAME, DOMAIN, PARAMETERS, ) _LOGGER = logging.getLogger(__name__) PLATFORMS = ["sensor"]
36.042553
128
0.674538
ab088360fae7f84bdf36c27b8f0ab99458367940
932
py
Python
src/levenshtein_distance.py
chunribu/python-algorithms
0483df09b5b4f93bd96712d78e3ad34bcb7e57cc
[ "MIT" ]
null
null
null
src/levenshtein_distance.py
chunribu/python-algorithms
0483df09b5b4f93bd96712d78e3ad34bcb7e57cc
[ "MIT" ]
null
null
null
src/levenshtein_distance.py
chunribu/python-algorithms
0483df09b5b4f93bd96712d78e3ad34bcb7e57cc
[ "MIT" ]
null
null
null
# test ld = LevenshteinDistance() ld.solve('kitten','sitting') ld.show()
35.846154
79
0.47103
ab09f37cf048afa31bbd4f9b957124d830dcd972
24,156
py
Python
pyapprox/benchmarks/test_spectral_diffusion.py
ConnectedSystems/pyapprox
4f405654c707cba83d211f327c0f0fdbc95efa29
[ "MIT" ]
26
2019-12-16T02:21:15.000Z
2022-03-17T09:59:18.000Z
pyapprox/benchmarks/test_spectral_diffusion.py
ConnectedSystems/pyapprox
4f405654c707cba83d211f327c0f0fdbc95efa29
[ "MIT" ]
9
2020-03-03T03:04:55.000Z
2021-08-19T22:50:42.000Z
pyapprox/benchmarks/test_spectral_diffusion.py
ConnectedSystems/pyapprox
4f405654c707cba83d211f327c0f0fdbc95efa29
[ "MIT" ]
7
2020-03-02T03:49:17.000Z
2021-02-17T02:07:53.000Z
import numpy as np import unittest from pyapprox.benchmarks.spectral_diffusion import ( kronecker_product_2d, chebyshev_derivative_matrix, SteadyStateDiffusionEquation2D, SteadyStateDiffusionEquation1D ) from pyapprox.univariate_polynomials.quadrature import gauss_jacobi_pts_wts_1D import pyapprox as pya if __name__ == "__main__": spectral_diffusion_test_suite = \ unittest.TestLoader().loadTestsFromTestCase(TestSpectralDiffusion2D) unittest.TextTestRunner(verbosity=2).run(spectral_diffusion_test_suite)
43.368043
140
0.573729
ab0b27f4e0cbd65087dec9065d3e682653bf37df
2,145
py
Python
torchdrug/layers/flow.py
wconnell/torchdrug
a710097cb4ad4c48e0de0d18fbb77ef0e806cdc8
[ "Apache-2.0" ]
772
2021-08-10T05:03:46.000Z
2022-03-31T12:48:31.000Z
torchdrug/layers/flow.py
wconnell/torchdrug
a710097cb4ad4c48e0de0d18fbb77ef0e806cdc8
[ "Apache-2.0" ]
77
2021-08-12T16:19:15.000Z
2022-03-30T14:32:14.000Z
torchdrug/layers/flow.py
wconnell/torchdrug
a710097cb4ad4c48e0de0d18fbb77ef0e806cdc8
[ "Apache-2.0" ]
90
2021-08-11T16:27:13.000Z
2022-03-28T11:41:53.000Z
import torch from torch import nn from torch.nn import functional as F from torchdrug import layers
33.515625
94
0.640559
ab0b550c21847a65b30237096b5b109cb3b79405
1,531
py
Python
olctools/accessoryFunctions/metadataprinter.py
lowandrew/OLCTools
c74e9d18e2ebe0159aa824e095091045ed227e95
[ "MIT" ]
1
2020-02-29T19:12:48.000Z
2020-02-29T19:12:48.000Z
olctools/accessoryFunctions/metadataprinter.py
lowandrew/OLCTools
c74e9d18e2ebe0159aa824e095091045ed227e95
[ "MIT" ]
3
2017-09-11T18:33:00.000Z
2019-02-01T18:03:29.000Z
olctools/accessoryFunctions/metadataprinter.py
lowandrew/OLCTools
c74e9d18e2ebe0159aa824e095091045ed227e95
[ "MIT" ]
1
2017-07-25T15:40:36.000Z
2017-07-25T15:40:36.000Z
#!/usr/bin/env python3 import logging import json import os __author__ = 'adamkoziol'
39.25641
112
0.595689
ab0be1bc504d57d2eb757539f99f93b6066eb5bb
6,424
py
Python
mindware/estimators.py
aman-gupta-1995/Machine-Learning-Mindware
8b3050720711730520683c89949e3dbdfb168961
[ "MIT" ]
27
2021-07-19T09:03:34.000Z
2022-03-31T06:19:23.000Z
mindware/estimators.py
aman-gupta-1995/Machine-Learning-Mindware
8b3050720711730520683c89949e3dbdfb168961
[ "MIT" ]
4
2021-07-15T12:17:10.000Z
2022-01-26T17:16:58.000Z
mindware/estimators.py
aman-gupta-1995/Machine-Learning-Mindware
8b3050720711730520683c89949e3dbdfb168961
[ "MIT" ]
17
2020-05-12T20:24:50.000Z
2021-07-11T03:31:38.000Z
import numpy as np from sklearn.utils.multiclass import type_of_target from mindware.base_estimator import BaseEstimator from mindware.components.utils.constants import type_dict, MULTILABEL_CLS, IMG_CLS, TEXT_CLS, OBJECT_DET from mindware.components.feature_engineering.transformation_graph import DataNode
35.103825
104
0.60523
ab0c1dad4d8d1784a1b379e00273da750a4c7145
871
py
Python
AnimeSpider/spiders/AinmeLinkList.py
xiaowenwen1995/AnimeSpider
11c676b772508fd4e14565a7adbfc7336d69b982
[ "MIT" ]
7
2020-02-26T15:58:13.000Z
2021-11-14T15:48:01.000Z
AnimeSpider/spiders/AinmeLinkList.py
xiaowenwen1995/AnimeSpider
11c676b772508fd4e14565a7adbfc7336d69b982
[ "MIT" ]
1
2020-07-23T06:44:19.000Z
2020-07-23T16:12:28.000Z
AnimeSpider/spiders/AinmeLinkList.py
xiaowenwen1995/AnimeSpider
11c676b772508fd4e14565a7adbfc7336d69b982
[ "MIT" ]
1
2021-04-01T09:22:51.000Z
2021-04-01T09:22:51.000Z
# -*- coding: utf-8 -*- import scrapy import json import os import codecs from AnimeSpider.items import AnimespiderItem
32.259259
93
0.626866
ab0c88ee9a64231ee456e7fc458722131e67cfd9
34
py
Python
Module 1/Chapter 7/prog1.py
PacktPublishing/Raspberry-Pi-Making-Amazing-Projects-Right-from-Scratch-
49fd30ca8e1e30e7d85cf14e9dcb6e1d24d4a445
[ "MIT" ]
3
2016-11-15T02:27:27.000Z
2017-12-28T20:14:21.000Z
Module 1/Chapter 7/prog1.py
PacktPublishing/Raspberry-Pi-Making-Amazing-Projects-Right-from-Scratch-
49fd30ca8e1e30e7d85cf14e9dcb6e1d24d4a445
[ "MIT" ]
null
null
null
Module 1/Chapter 7/prog1.py
PacktPublishing/Raspberry-Pi-Making-Amazing-Projects-Right-from-Scratch-
49fd30ca8e1e30e7d85cf14e9dcb6e1d24d4a445
[ "MIT" ]
5
2017-12-28T20:16:05.000Z
2021-04-22T17:30:05.000Z
import cv2 print cv2.__version__
11.333333
22
0.823529
ab0c9b875cdd149ecbc52dfdd37e40b2bcaaf2e8
416
py
Python
setup.py
darlenew/pytest-testplan
85ef0c196efced681b6559328b3db3d409b2612d
[ "MIT" ]
null
null
null
setup.py
darlenew/pytest-testplan
85ef0c196efced681b6559328b3db3d409b2612d
[ "MIT" ]
null
null
null
setup.py
darlenew/pytest-testplan
85ef0c196efced681b6559328b3db3d409b2612d
[ "MIT" ]
null
null
null
"""Setup for pytest-testplan plugin.""" from setuptools import setup setup( name='pytest-testplan', version='0.1.0', description='A pytest plugin to generate a CSV test report.', author='Darlene Wong', author_email='darlene.py@gmail.com', license='MIT', py_modules=['pytest_testplan'], install_requires=['pytest'], entry_points={'pytest11': ['testplan = pytest_testplan', ]}, )
26
65
0.673077
ab0e2a7ca0afb7293dad4730992d135dc62fe897
2,271
py
Python
examples/industrial_quality_inspection/train_yolov3.py
petr-kalinin/PaddleX
e4f08b50dab01f3720570702a071188d1efd4042
[ "Apache-2.0" ]
1
2021-09-26T16:00:54.000Z
2021-09-26T16:00:54.000Z
examples/industrial_quality_inspection/train_yolov3.py
gq5227246/PaddleX
80b97ae4c9d7a290f9e7908d5cd54b7b053c2072
[ "Apache-2.0" ]
null
null
null
examples/industrial_quality_inspection/train_yolov3.py
gq5227246/PaddleX
80b97ae4c9d7a290f9e7908d5cd54b7b053c2072
[ "Apache-2.0" ]
1
2021-06-04T19:57:53.000Z
2021-06-04T19:57:53.000Z
# GPU # https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' from paddlex.det import transforms import paddlex as pdx # aluminum_dataset = 'https://bj.bcebos.com/paddlex/examples/industrial_quality_inspection/datasets/aluminum_inspection.tar.gz' pdx.utils.download_and_decompress(aluminum_dataset, path='./') # transforms # API https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html train_transforms = transforms.Compose([ transforms.MixupImage(mixup_epoch=250), transforms.RandomDistort(), transforms.RandomExpand(), transforms.RandomCrop(), transforms.Resize( target_size=608, interp='RANDOM'), transforms.RandomHorizontalFlip(), transforms.Normalize() ]) eval_transforms = transforms.Compose([ transforms.Resize( target_size=608, interp='CUBIC'), transforms.Normalize() ]) # # APIhttps://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection train_dataset = pdx.datasets.VOCDetection( data_dir='aluminum_inspection', file_list='aluminum_inspection/train_list.txt', label_list='aluminum_inspection/labels.txt', transforms=train_transforms, shuffle=True) eval_dataset = pdx.datasets.VOCDetection( data_dir='aluminum_inspection', file_list='aluminum_inspection/val_list.txt', label_list='aluminum_inspection/labels.txt', transforms=eval_transforms) # # VisualDLhttps://paddlex.readthedocs.io/zh_CN/develop/train/visualdl.html num_classes = len(train_dataset.labels) # API: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-yolov3 model = pdx.det.YOLOv3(num_classes=num_classes, backbone='MobileNetV3_large') # API: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#train # https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html model.train( num_epochs=400, train_dataset=train_dataset, train_batch_size=8, eval_dataset=eval_dataset, warmup_steps=4000, learning_rate=0.000125, lr_decay_epochs=[240, 320], save_dir='output/yolov3_mobilenetv3', use_vdl=True)
37.85
125
0.784236
ab0f9c740d041f9e373bd50dbfcabb4859e2d6c4
950
py
Python
api/migrations/0004_auto_20210107_2032.py
bartoszper/Django-REST-API-movierater
a145f087d9c59167ea3503dde5fa74ab7f3e3e72
[ "MIT" ]
null
null
null
api/migrations/0004_auto_20210107_2032.py
bartoszper/Django-REST-API-movierater
a145f087d9c59167ea3503dde5fa74ab7f3e3e72
[ "MIT" ]
null
null
null
api/migrations/0004_auto_20210107_2032.py
bartoszper/Django-REST-API-movierater
a145f087d9c59167ea3503dde5fa74ab7f3e3e72
[ "MIT" ]
null
null
null
# Generated by Django 3.1.4 on 2021-01-07 19:32 from django.db import migrations, models import django.db.models.deletion
32.758621
137
0.570526
ab0fb9e929f14279551c419b287e78a48d3a92f4
1,882
py
Python
wooey/migrations/0009_script_versioning.py
macdaliot/Wooey
3a0f40e3b3ab4d905f9acc72f5cd5d6453e14834
[ "BSD-3-Clause" ]
1
2020-11-05T15:04:33.000Z
2020-11-05T15:04:33.000Z
wooey/migrations/0009_script_versioning.py
macdaliot/Wooey
3a0f40e3b3ab4d905f9acc72f5cd5d6453e14834
[ "BSD-3-Clause" ]
null
null
null
wooey/migrations/0009_script_versioning.py
macdaliot/Wooey
3a0f40e3b3ab4d905f9acc72f5cd5d6453e14834
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import wooey.models.mixins
39.208333
126
0.609989
ab102bf2e193beb384ebdf3e74b5f3f77d47c463
3,976
py
Python
vendor/munkireport/firewall/scripts/firewall.py
menamegaly/MR
18d042639d9b45ca81a9b58659f45c6e2c3ac87f
[ "MIT" ]
null
null
null
vendor/munkireport/firewall/scripts/firewall.py
menamegaly/MR
18d042639d9b45ca81a9b58659f45c6e2c3ac87f
[ "MIT" ]
null
null
null
vendor/munkireport/firewall/scripts/firewall.py
menamegaly/MR
18d042639d9b45ca81a9b58659f45c6e2c3ac87f
[ "MIT" ]
null
null
null
#!/usr/bin/python """ Firewall for munkireport. By Tuxudo Will return all details about how the firewall is configured """ import subprocess import os import sys import platform import re import plistlib import json sys.path.insert(0,'/usr/local/munki') sys.path.insert(0, '/usr/local/munkireport') from munkilib import FoundationPlist def get_firewall_info(): '''Uses system profiler to get firewall info for the machine.''' cmd = ['/usr/sbin/system_profiler', 'SPFirewallDataType', '-xml'] proc = subprocess.Popen(cmd, shell=False, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output, unused_error) = proc.communicate() try: plist = plistlib.readPlistFromString(output) # system_profiler xml is an array firewall_dict = plist[0] items = firewall_dict['_items'] return items except Exception: return {} def flatten_firewall_info(array): '''Un-nest firewall info, return array with objects with relevant keys''' firewall = {} for obj in array: for item in obj: if item == '_items': out = out + flatten_firewall_info(obj['_items']) elif item == 'spfirewall_services': for service in obj[item]: if obj[item][service] == "spfirewall_allow_all": obj[item][service] = 1 else: obj[item][service] = 0 firewall['services'] = json.dumps(obj[item]) elif item == 'spfirewall_applications': for application in obj[item]: if obj[item][application] == "spfirewall_allow_all": obj[item][application] = 1 else: obj[item][application] = 0 firewall['applications'] = json.dumps(obj[item]) return firewall def main(): """Main""" # Skip manual check if len(sys.argv) > 1: if sys.argv[1] == 'manualcheck': print 'Manual check: skipping' exit(0) # Create cache dir if it does not exist cachedir = '%s/cache' % os.path.dirname(os.path.realpath(__file__)) if not os.path.exists(cachedir): os.makedirs(cachedir) # Set the encoding # The "ugly hack" :P reload(sys) sys.setdefaultencoding('utf8') # Get results result = dict() info = get_firewall_info() result = merge_two_dicts(flatten_firewall_info(info), get_alf_preferences()) # Write firewall results to cache output_plist = os.path.join(cachedir, 'firewall.plist') FoundationPlist.writePlist(result, output_plist) #print FoundationPlist.writePlistToString(result) if __name__ == "__main__": main()
31.307087
80
0.591549
ab106d29ef2283cbdeaf70023cd678e7c5de8158
842
py
Python
cf_step/metrics.py
dpoulopoulos/cf_step
c0ed1d0fbdedb863a630e90a7c7b6f95141a3e30
[ "Apache-2.0" ]
25
2020-02-16T10:14:17.000Z
2022-03-08T07:15:55.000Z
cf_step/metrics.py
dpoulopoulos/cf_step
c0ed1d0fbdedb863a630e90a7c7b6f95141a3e30
[ "Apache-2.0" ]
2
2020-05-19T15:42:58.000Z
2021-10-02T23:36:23.000Z
cf_step/metrics.py
dpoulopoulos/cf_step
c0ed1d0fbdedb863a630e90a7c7b6f95141a3e30
[ "Apache-2.0" ]
7
2020-02-24T04:35:00.000Z
2022-01-20T03:26:51.000Z
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/metrics.ipynb (unless otherwise specified). __all__ = ['recall_at_k', 'precision_at_k'] # Cell from typing import List # Cell def recall_at_k(predictions: List[int], targets: List[int], k: int = 10) -> float: """Computes `Recall@k` from the given predictions and targets sets.""" predictions_set = set(predictions[:k]) targets_set = set(targets) result = len(targets_set & predictions_set) / float(len(targets_set)) return result # Cell def precision_at_k(predictions: List[int], targets: List[int], k: int = 10) -> float: """Computes `Precision@k` from the given predictions and targets sets.""" predictions_set = set(predictions[:k]) targets_set = set(targets) result = len(targets_set & predictions_set) / float(len(predictions_set)) return result
38.272727
91
0.709026
ab10a7d42774f492876454acc8afc34598c448bf
15,849
py
Python
bicycleparameters/period.py
sandertyu/Simple-Geometry-Plot
6fa4dfb50aebc4215818f75ff56f916fc32f8cfa
[ "BSD-2-Clause-FreeBSD" ]
20
2015-07-06T06:25:07.000Z
2021-12-10T19:36:33.000Z
bicycleparameters/period.py
sandertyu/Simple-Geometry-Plot
6fa4dfb50aebc4215818f75ff56f916fc32f8cfa
[ "BSD-2-Clause-FreeBSD" ]
52
2015-11-10T16:21:02.000Z
2022-03-03T11:46:52.000Z
bicycleparameters/period.py
sandertyu/Simple-Geometry-Plot
6fa4dfb50aebc4215818f75ff56f916fc32f8cfa
[ "BSD-2-Clause-FreeBSD" ]
12
2015-07-13T23:32:58.000Z
2021-12-09T18:42:16.000Z
#!/usr/bin/env/ python import os from math import pi import numpy as np from numpy import ma from scipy.optimize import leastsq import matplotlib.pyplot as plt from uncertainties import ufloat # local modules from .io import load_pendulum_mat_file def average_rectified_sections(data): '''Returns a slice of an oscillating data vector based on the max and min of the mean of the sections created by retifiying the data. Parameters ---------- data : ndarray, shape(n,) Returns ------- data : ndarray, shape(m,) A slice where m is typically less than n. Notes ----- This is a function to try to handle the fact that some of the data from the torsional pendulum had a beating like phenomena and we only want to select a section of the data that doesn't seem to exhibit the phenomena. ''' # subtract the mean so that there are zero crossings meanSubData = data - np.mean(data) # find the zero crossings zeroCrossings = np.where(np.diff(np.sign(meanSubData)))[0] # add a zero to the beginning crossings = np.concatenate((np.array([0]), zeroCrossings)) # find the mean value of the rectified sections and the local indice secMean = [] localMeanInd = [] for sec in np.split(np.abs(meanSubData), zeroCrossings): localMeanInd.append(np.argmax(sec)) secMean.append(np.mean(sec)) meanInd = [] # make the global indices for i, val in enumerate(crossings): meanInd.append(val + localMeanInd[i]) # only take the top part of the data because some the zero crossings can be # a lot at one point mainly due to the resolution of the daq box threshold = np.mean(secMean) secMeanOverThresh = [] indice = [] for i, val in enumerate(secMean): if val > threshold: secMeanOverThresh.append(val) indice.append(meanInd[i]) # now return the data based on the max value and the min value maxInd = indice[np.argmax(secMeanOverThresh)] minInd = indice[np.argmin(secMeanOverThresh)] return data[maxInd:minInd] def calc_periods_for_files(directory, filenames, forkIsSplit): '''Calculates the period for all filenames in directory. Parameters ---------- directory : string This is the path to the RawData directory. filenames : list List of all the mat file names in the RawData directory. forkIsSplit : boolean True if the fork is broken into a handlebar and fork and false if the fork and handlebar was measured together. Returns ------- periods : dictionary Contains all the periods for the mat files in the RawData directory. ''' periods = {} def pathParts(path): '''Splits a path into a list of its parts.''' components = [] while True: (path,tail) = os.path.split(path) if tail == "": components.reverse() return components components.append(tail) pathToRawDataParts = pathParts(directory) pathToRawDataParts.pop() pathToBicycleDir = os.path.join(pathToRawDataParts[0], pathToRawDataParts[1], pathToRawDataParts[2]) pathToPlotDir = os.path.join(pathToBicycleDir, 'Plots', 'PendulumFit') # make sure there is a place to save the plots if not os.path.exists(pathToPlotDir): os.makedirs(pathToPlotDir) for f in filenames: print("Calculating the period for:", f) # load the pendulum data pathToMatFile = os.path.join(directory, f) matData = load_pendulum_mat_file(pathToMatFile) # generate a variable name for this period periodKey = get_period_key(matData, forkIsSplit) # calculate the period sampleRate = get_sample_rate(matData) pathToPlotFile = os.path.join(pathToPlotDir, os.path.splitext(f)[0] + '.png') period = get_period_from_truncated(matData['data'], sampleRate, pathToPlotFile) print("The period is:", period, "\n") # either append the the period or if it isn't there yet, then # make a new list try: periods[periodKey].append(period) except KeyError: periods[periodKey] = [period] # now average all the periods for k, v in periods.items(): if k.startswith('T'): periods[k] = np.mean(v) return periods def check_for_period(mp, forkIsSplit): '''Returns whether the fork is split into two pieces and whether the period calculations need to happen again. Parameters ---------- mp : dictionary Dictionary the measured parameters. forkIsSplit : boolean True if the fork is broken into a handlebar and fork and false if the fork and handlebar was measured together. Returns ------- forcePeriodCalc : boolean True if there wasn't enough period data in mp, false if there was. forkIsSplit : boolean True if the fork is broken into a handlebar and fork and false if the fork and handlebar was measured together. ''' forcePeriodCalc = False #Check to see if mp contains at enough periods to not need # recalculation ncTSum = 0 ntTSum = 0 for key in mp.keys(): # check for any periods in the keys if key[:2] == 'Tc': ncTSum += 1 elif key[:2] == 'Tt': ntTSum += 1 # if there isn't enough data then force the period cals again if forkIsSplit: if ncTSum < 5 or ntTSum < 11: forcePeriodCalc = True else: if ncTSum < 4 or ntTSum < 8: forcePeriodCalc = True return forcePeriodCalc def fit_goodness(ym, yp): ''' Calculate the goodness of fit. Parameters ---------- ym : ndarray, shape(n,) The vector of measured values. yp : ndarry, shape(n,) The vector of predicted values. Returns ------- rsq : float The r squared value of the fit. SSE : float The error sum of squares. SST : float The total sum of squares. SSR : float The regression sum of squares. ''' SSR = np.sum((yp - np.mean(ym))**2) SST = np.sum((ym - np.mean(ym))**2) SSE = SST - SSR rsq = SSR / SST return rsq, SSE, SST, SSR def get_period(data, sampleRate, pathToPlotFile): '''Returns the period and uncertainty for data resembling a decaying oscillation. Parameters ---------- data : ndarray, shape(n,) A time series that resembles a decaying oscillation. sampleRate : int The frequency that data was sampled at. pathToPlotFile : string A path to the file to print the plots. Returns ------- T : ufloat The period of oscillation and its uncertainty. ''' y = data x = np.linspace(0., (len(y) - 1) / float(sampleRate), num=len(y)) def fitfunc(p, t): '''Decaying oscillation function.''' a = p[0] b = np.exp(-p[3] * p[4] * t) c = p[1] * np.sin(p[4] * np.sqrt(1 - p[3]**2) * t) d = p[2] * np.cos(p[4] * np.sqrt(1 - p[3]**2) * t) return a + b * (c + d) # initial guesses #p0 = np.array([1.35, -.5, -.75, 0.01, 3.93]) # guess from delft #p0 = np.array([2.5, -.75, -.75, 0.001, 4.3]) # guess from ucd p0 = make_guess(data, sampleRate) # tries to make a good guess # create the error function errfunc = lambda p, t, y: fitfunc(p, t) - y # minimize the error function p1, success = leastsq(errfunc, p0[:], args=(x, y)) lscurve = fitfunc(p1, x) # find the uncertainty in the fit parameters rsq, SSE, SST, SSR = fit_goodness(y, lscurve) sigma = np.sqrt(SSE / (len(y) - len(p0))) # calculate the jacobian L = jac_fitfunc(p1, x) # the Hessian H = np.dot(L.T, L) # the covariance matrix U = sigma**2. * np.linalg.inv(H) # the standard deviations sigp = np.sqrt(U.diagonal()) # natural frequency wo = ufloat(p1[4], sigp[4]) # damping ratio zeta = ufloat(p1[3], sigp[3]) # damped natural frequency wd = (1. - zeta**2.)**(1. / 2.) * wo # damped natural frequency (hz) fd = wd / 2. / pi # period T = 1. / fd # plot the data and save it to file fig = plt.figure() plot_osfit(x, y, lscurve, p1, rsq, T, m=np.max(x), fig=fig) plt.savefig(pathToPlotFile) plt.close() # return the period return T def get_period_key(matData, forkIsSplit): '''Returns a dictionary key for the period entries. Parameters ---------- matData : dictionary The data imported from a pendulum mat file. forkIsSplit : boolean True if the fork is broken into a handlebar and fork and false if the fork and handlebar was measured together. Returns ------- key : string A key of the form 'T[pendulum][part][orientation]'. For example, if it is the frame that was hung as a torsional pendulum at the second orientation angle then the key would be 'TtB2'. ''' # set up the subscripting for the period key subscripts = {'Fwheel': 'F', 'Rwheel': 'R', 'Frame': 'B', 'Flywheel': 'D'} # the Flywheel is for the gyro bike and it actually represents the front # wheel and the flywheel as one rigid body. It was easier to measure the # the inertia this way. So...the to get the actual flywheel inertia, one # must subtract the inertia of the Fwheel, F, from the Flywheel, D. if forkIsSplit: subscripts['Fork'] = 'S' subscripts['Handlebar'] = 'G' else: subscripts['Fork'] = 'H' try: subscripts[matData['rod']] = 'P' except KeyError: subscripts['Rod'] = 'P' # used to convert word ordinals to numbers ordinal = {'First' : '1', 'Second' : '2', 'Third' : '3', 'Fourth' : '4', 'Fifth' : '5', 'Sixth' : '6'} try: orienWord = matData['angleOrder'] except: orienWord = matData['angle'] pend = matData['pendulum'][0].lower() part = subscripts[matData['part']] orienNum = ordinal[orienWord] return 'T' + pend + part + orienNum def get_sample_rate(matData): '''Returns the sample rate for the data.''' if 'ActualRate' in matData.keys(): sampleRate = matData['ActualRate'] else: sampleRate = matData['sampleRate'] return sampleRate def jac_fitfunc(p, t): ''' Calculate the Jacobian of a decaying oscillation function. Uses the analytical formulations of the partial derivatives. Parameters ---------- p : the five parameters of the equation t : time vector Returns ------- jac : The jacobian, the partial of the vector function with respect to the parameters vector. A 5 x N matrix where N is the number of time steps. ''' jac = np.zeros((len(p), len(t))) e = np.exp(-p[3] * p[4] * t) dampsq = np.sqrt(1 - p[3]**2) s = np.sin(dampsq * p[4] * t) c = np.cos(dampsq * p[4] * t) jac[0] = np.ones_like(t) jac[1] = e * s jac[2] = e * c jac[3] = (-p[4] * t * e * (p[1] * s + p[2] * c) + e * (-p[1] * p[3] * p[4] * t / dampsq * c + p[2] * p[3] * p[4] * t / dampsq * s)) jac[4] = (-p[3] * t * e * (p[1] * s + p[2] * c) + e * dampsq * t * (p[1] * c - p[2] * s)) return jac.T def make_guess(data, sampleRate): '''Returns a decent starting point for fitting the decaying oscillation function. ''' p = np.zeros(5) # the first unknown is the shift along the y axis p[0] = np.mean(data) # work with the mean subtracted data from now on data = data - p[0] # what is the initial slope of the curve if data[10] > data[0]: slope = 1 else: slope = -1 # the second is the amplitude for the sin function p[1] = slope * np.max(data) / 2 # the third is the amplitude for the cos function p[2] = slope * np.max(data) # the fourth is the damping ratio and is typically small, 0.001 < zeta < 0.02 p[3] = 0.001 # the fifth is the undamped natural frequency # first remove the data around zero dataMasked = ma.masked_inside(data, -0.1, 0.1) # find the zero crossings zeroCrossings = np.where(np.diff(np.sign(dataMasked)))[0] # remove redundant crossings zero = [] for i, v in enumerate(zeroCrossings): if abs(v - zeroCrossings[i - 1]) > 20: zero.append(v) # get the samples per period samplesPerPeriod = 2*np.mean(np.diff(zero)) # now the frequency p[4] = (samplesPerPeriod / float(sampleRate) /2. / pi)**-1 if np.isnan(p[4]): p[4] = 4. return p def plot_osfit(t, ym, yf, p, rsq, T, m=None, fig=None): '''Plot fitted data over the measured Parameters ---------- t : ndarray (n,) Measurement time in seconds ym : ndarray (n,) The measured voltage yf : ndarray (n,) p : ndarray (5,) The fit parameters for the decaying osicallation fucntion rsq : float The r squared value of y (the fit) T : float The period m : float The maximum value to plot Returns ------- fig : the figure ''' # figure properties figwidth = 4. # in inches goldenMean = (np.sqrt(5) - 1.0) / 2.0 figsize = [figwidth, figwidth * goldenMean] params = {#'backend': 'ps', 'axes.labelsize': 8, 'axes.titlesize': 8, 'text.fontsize': 8, 'legend.fontsize': 8, 'xtick.labelsize': 6, 'ytick.labelsize': 6, 'text.usetex': True, #'figure.figsize': figsize } if fig: fig = fig else: fig = plt.figure(2) fig.set_size_inches(figsize) plt.rcParams.update(params) ax1 = plt.axes([0.125, 0.125, 0.9-0.125, 0.65]) #if m == None: #end = len(t) #else: #end = t[round(m/t[-1]*len(t))] ax1.plot(t, ym, '.', markersize=2) plt.plot(t, yf, 'k-') plt.xlabel('Time [s]') plt.ylabel('Amplitude [V]') equation = r'$f(t)={0:1.2f}+e^{{-({3:1.3f})({4:1.1f})t}}\left[{1:1.2f}\sin{{\sqrt{{1-{3:1.3f}^2}}{4:1.1f}t}}+{2:1.2f}\cos{{\sqrt{{1-{3:1.3f}^2}}{4:1.1f}t}}\right]$'.format(p[0], p[1], p[2], p[3], p[4]) rsquare = '$r^2={0:1.3f}$'.format(rsq) period = '$T={0} s$'.format(T) plt.title(equation + '\n' + rsquare + ', ' + period) plt.legend(['Measured', 'Fit']) if m is not None: plt.xlim((0, m)) else: pass return fig def select_good_data(data, percent): '''Returns a slice of the data from the index at maximum value to the index at a percent of the maximum value. Parameters ---------- data : ndarray, shape(1,) This should be a decaying function. percent : float The percent of the maximum to clip. This basically snips of the beginning and end of the data so that the super damped tails are gone and also any weirdness at the beginning. ''' meanSub = data - np.mean(data) maxVal = np.max(np.abs(meanSub)) maxInd = np.argmax(np.abs(meanSub)) for i, v in reversed(list(enumerate(meanSub))): if v > percent * maxVal: minInd = i break return data[maxInd:minInd]
30.188571
205
0.588618
ab11420721f9d57dfd242653355836e981c854b9
11,597
py
Python
tectosaur2/analyze.py
tbenthompson/BIE_tutorials
02cd56ab7e63e36afc4a10db17072076541aab77
[ "MIT" ]
1
2021-06-18T18:02:55.000Z
2021-06-18T18:02:55.000Z
tectosaur2/analyze.py
tbenthompson/BIE_tutorials
02cd56ab7e63e36afc4a10db17072076541aab77
[ "MIT" ]
null
null
null
tectosaur2/analyze.py
tbenthompson/BIE_tutorials
02cd56ab7e63e36afc4a10db17072076541aab77
[ "MIT" ]
1
2021-07-14T19:47:00.000Z
2021-07-14T19:47:00.000Z
import time import warnings import matplotlib.pyplot as plt import numpy as np import sympy as sp from .global_qbx import global_qbx_self from .mesh import apply_interp_mat, gauss_rule, panelize_symbolic_surface, upsample # prep step 2: find the minimum distance at which integrals are computed # to the required tolerance
40.407666
117
0.553419
ab124c9df35a8bbc9dcccb03c7e6c3a2b66cccd5
277
py
Python
celery-getting-started/celeryconfig.py
hustbeta/python-examples
9052a080cb27b1c8c2bc36222ece409e236ba076
[ "MIT" ]
null
null
null
celery-getting-started/celeryconfig.py
hustbeta/python-examples
9052a080cb27b1c8c2bc36222ece409e236ba076
[ "MIT" ]
null
null
null
celery-getting-started/celeryconfig.py
hustbeta/python-examples
9052a080cb27b1c8c2bc36222ece409e236ba076
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- BROKER_URL = 'amqp://guest@localhost//' CELERY_ACCEPT_CONTENT = ['json'], CELERY_RESULT_BACKEND = 'amqp://guest@localhost//' CELERY_RESULT_SERIALIZER = 'json' CELERY_TASK_SERIALIZER = 'json' CELERY_TIMEZONE = 'Asia/Shanghai' CELERY_ENABLE_UTC = False
25.181818
50
0.740072
ab12e147b947ecb1407493bd0e3516505005e115
476
py
Python
smartnlp/utils/basic_log.py
msgi/nlp-tour
ffed8c32da69c2427c92a7043f47bfc91e7feb64
[ "Apache-2.0" ]
1,559
2019-05-27T03:43:29.000Z
2022-03-31T05:35:04.000Z
smartnlp/utils/basic_log.py
msgi/nlp-tour
ffed8c32da69c2427c92a7043f47bfc91e7feb64
[ "Apache-2.0" ]
5
2019-07-10T11:55:05.000Z
2020-05-08T12:01:31.000Z
smartnlp/utils/basic_log.py
msgi/nlp-tour
ffed8c32da69c2427c92a7043f47bfc91e7feb64
[ "Apache-2.0" ]
403
2019-06-14T03:36:17.000Z
2022-03-30T08:09:08.000Z
import logging as log
21.636364
106
0.55042
ab143c1e766e4bf7477a807945495619e156d263
729
py
Python
Examples/IMAP/FilteringMessagesFromIMAPMailbox.py
Muzammil-khan/Aspose.Email-Python-Dotnet
04ca3a6f440339f3ddf316218f92d15d66f24e7e
[ "MIT" ]
5
2019-01-28T05:17:12.000Z
2020-04-14T14:31:34.000Z
Examples/IMAP/FilteringMessagesFromIMAPMailbox.py
Muzammil-khan/Aspose.Email-Python-Dotnet
04ca3a6f440339f3ddf316218f92d15d66f24e7e
[ "MIT" ]
1
2019-01-28T16:07:26.000Z
2021-11-25T10:59:52.000Z
Examples/IMAP/FilteringMessagesFromIMAPMailbox.py
Muzammil-khan/Aspose.Email-Python-Dotnet
04ca3a6f440339f3ddf316218f92d15d66f24e7e
[ "MIT" ]
6
2018-07-16T14:57:34.000Z
2020-08-30T05:59:52.000Z
import aspose.email from aspose.email.clients.imap import ImapClient from aspose.email.clients import SecurityOptions from aspose.email.clients.imap import ImapQueryBuilder import datetime as dt if __name__ == '__main__': run()
31.695652
78
0.739369
ab149d0949672fc58bdb20c8bbee5cb7134e800f
2,363
py
Python
Python.FancyBear/settings.py
010001111/Vx-Suites
6b4b90a60512cce48aa7b87aec5e5ac1c4bb9a79
[ "MIT" ]
2
2021-02-04T06:47:45.000Z
2021-07-28T10:02:10.000Z
Python.FancyBear/settings.py
010001111/Vx-Suites
6b4b90a60512cce48aa7b87aec5e5ac1c4bb9a79
[ "MIT" ]
null
null
null
Python.FancyBear/settings.py
010001111/Vx-Suites
6b4b90a60512cce48aa7b87aec5e5ac1c4bb9a79
[ "MIT" ]
null
null
null
# Server UID SERVER_UID = 45158729 # Setup Logging system ######################################### # import os from FileConsoleLogger import FileConsoleLogger ServerLogger = FileConsoleLogger( os.path.join(os.path.dirname(os.path.abspath(__file__)), "_w3server.log") ) W3Logger = FileConsoleLogger( os.path.join(os.path.dirname(os.path.abspath(__file__)), "_w3.log") ) # # Setup Level 2 Protocol - P2Scheme ######################################### # from P2Scheme import P2Scheme P2_URL_TOKEN = '760e25f9eb3124'.decode('hex') P2_SUBJECT_TOKEN = '\x55\xaa\x63\x68\x69\x6e\x61' P2_DATA_TOKEN = '\x55\xaa\x63\x68\x69\x6e\x61' # P2_DATA_TOKEN = 'd85a8c54fbe5e6'.decode('hex') MARK = 'itwm=' B64_JUNK_LEN = 9 BIN_JUNK_LEN = 4 P2_Scheme = P2Scheme(_url_token=P2_URL_TOKEN, _data_token=P2_DATA_TOKEN, _mark=MARK, _subj_token=P2_SUBJECT_TOKEN,\ _b64junk_len=B64_JUNK_LEN, _binary_junk_len=BIN_JUNK_LEN) # # Setup Level 3 Protocol - P3Scheme ######################################### # from P3Scheme import P3Scheme # P3_PRIVATE_TOKEN = 'a20e25f9aa3fe4'.decode('hex') P3_SERVICE_TOKEN = '015a1354acf1b1'.decode('hex') # P3_Scheme = P3Scheme(private_token=P3_PRIVATE_TOKEN, service_token=P3_SERVICE_TOKEN) # # Setup HTTP checker # #from HTTPHeadersChecker import HTTPHeadersChecker # #HTTPChecker = HTTPHeadersChecker() # Setup LocalStorage # from FSLocalStorage import FSLocalStorage LocalStorage = FSLocalStorage() ############################################################ # Initialize Server instance # # #from W3Server import W3Server #MAIN_HANDLER = W3Server(p2_scheme=P2_Scheme, p3_scheme=P3_Scheme, http_checker=HTTPChecker, local_storage=LocalStorage, logger=ServerLogger) ############################################################ # Mail Parameters POP3_MAIL_IP = 'pop.gmail.com' POP3_PORT = 995 POP3_ADDR = 'jassnovember30@gmail.com' POP3_PASS = '30Jass11' SMTP_MAIL_IP = 'smtp.gmail.com' SMTP_PORT = 587 SMTP_TO_ADDR = 'userdf783@mailtransition.com' SMTP_FROM_ADDR = 'ginabetz75@gmail.com' SMTP_PASS = '75Gina75' # C&C Parametrs # XAS_IP = '104.152.187.66' XAS_GATE = '/updates/' ############################################################ # Setup P3 communication # wsgi2 # LS_TIMEOUT = 1 # big loop timeout FILES_PER_ITER = 5 # count of requests per iter ############################################################
28.46988
141
0.650444
ab15b741f60036ff0c954f49e3dc89246a1bf5bf
46,930
py
Python
tools/fileinfo/features/certificates-info/test.py
HoundThe/retdec-regression-tests
760639deb1ee52e88a14523b4a908d3e69d6fcd3
[ "MIT" ]
null
null
null
tools/fileinfo/features/certificates-info/test.py
HoundThe/retdec-regression-tests
760639deb1ee52e88a14523b4a908d3e69d6fcd3
[ "MIT" ]
null
null
null
tools/fileinfo/features/certificates-info/test.py
HoundThe/retdec-regression-tests
760639deb1ee52e88a14523b4a908d3e69d6fcd3
[ "MIT" ]
null
null
null
from regression_tests import *
57.938272
163
0.721372
ab180e51c94b6c40621e5bfdc60ef7316f264c2c
111
py
Python
app/darn.py
AmitSrourDev/darn
c04b681881620ffed2e1e0788d9cd80da7f806c4
[ "BSD-2-Clause" ]
null
null
null
app/darn.py
AmitSrourDev/darn
c04b681881620ffed2e1e0788d9cd80da7f806c4
[ "BSD-2-Clause" ]
null
null
null
app/darn.py
AmitSrourDev/darn
c04b681881620ffed2e1e0788d9cd80da7f806c4
[ "BSD-2-Clause" ]
null
null
null
import subprocess
18.5
34
0.612613
ab191577acbffa7ecc550d7138ce7e0984e85baf
2,380
py
Python
virt/ansible-latest/lib/python2.7/site-packages/ansible/plugins/become/runas.py
lakhlaifi/RedHat-Ansible
27c5077cced9d416081fcd5d69ea44bca0317fa4
[ "Apache-2.0" ]
1
2020-03-29T18:41:01.000Z
2020-03-29T18:41:01.000Z
ansible/ansible/plugins/become/runas.py
SergeyCherepanov/ansible
875711cd2fd6b783c812241c2ed7a954bf6f670f
[ "MIT" ]
7
2020-09-07T17:27:56.000Z
2022-03-02T06:25:46.000Z
ansible/ansible/plugins/become/runas.py
SergeyCherepanov/ansible
875711cd2fd6b783c812241c2ed7a954bf6f670f
[ "MIT" ]
1
2020-03-22T01:04:48.000Z
2020-03-22T01:04:48.000Z
# -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ become: runas short_description: Run As user description: - This become plugins allows your remote/login user to execute commands as another user via the windows runas facility. author: ansible (@core) version_added: "2.8" options: become_user: description: User you 'become' to execute the task ini: - section: privilege_escalation key: become_user - section: runas_become_plugin key: user vars: - name: ansible_become_user - name: ansible_runas_user env: - name: ANSIBLE_BECOME_USER - name: ANSIBLE_RUNAS_USER required: True become_flags: description: Options to pass to runas, a space delimited list of k=v pairs default: '' ini: - section: privilege_escalation key: become_flags - section: runas_become_plugin key: flags vars: - name: ansible_become_flags - name: ansible_runas_flags env: - name: ANSIBLE_BECOME_FLAGS - name: ANSIBLE_RUNAS_FLAGS become_pass: description: password ini: - section: runas_become_plugin key: password vars: - name: ansible_become_password - name: ansible_become_pass - name: ansible_runas_runas env: - name: ANSIBLE_BECOME_PASS - name: ANSIBLE_RUNAS_PASS notes: - runas is really implemented in the powershell module handler and as such can only be used with winrm connections. - This plugin ignores the 'become_exe' setting as it uses an API and not an executable. """ from ansible.plugins.become import BecomeBase
34
127
0.589496
ab1934eb319ba24ec86beec019a4db6fcc3fa01b
698
py
Python
2017/lab_dh/utils.py
JustHitTheCore/ctf_workshops
d50e8a5c90e80cdae3e17a92bce83955f0618570
[ "MIT" ]
7
2016-12-27T17:32:43.000Z
2019-03-27T07:15:23.000Z
2017/lab_dh/utils.py
JustHitTheCore/ctf_workshops
d50e8a5c90e80cdae3e17a92bce83955f0618570
[ "MIT" ]
1
2017-12-07T02:21:29.000Z
2017-12-07T02:21:29.000Z
2017/lab_dh/utils.py
JustHitTheCore/ctf_workshops
d50e8a5c90e80cdae3e17a92bce83955f0618570
[ "MIT" ]
1
2017-01-08T01:49:05.000Z
2017-01-08T01:49:05.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' ~Gros ''' from hashlib import sha256 import random def add_padding(data, block_size=16): """add PKCS#7 padding""" size = block_size - (len(data)%block_size) return data+chr(size)*size def strip_padding(data, block_size=16): """strip PKCS#7 padding""" padding = ord(data[-1]) if padding == 0 or padding > block_size or data[-padding:] != chr(padding)*padding: raise Exception("Invalid padding") return data[:-padding]
22.516129
87
0.659026