blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
db75fe7c8c874f31cb4f81cf4169470d68c8a848 | efe3c9ad40200e6a4cc54ade2867e455687eb11b | /games/migrations/0008_auto_20180331_2134.py | 3d437be4fabb2fe3d4a517cd5225dc4bec5a3e20 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | andrewhstead/stream-three-project | bec3b70b354b812d1a875ee4e305377038fe179b | 60e5f946455f12019a266b8231737435702ff95e | refs/heads/master | 2023-06-23T17:53:09.379297 | 2023-06-13T16:09:22 | 2023-06-13T16:09:22 | 126,410,294 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-31 20:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('games', '0007_auto_20180331_2132'),
]
operations = [
migrations.AlterField(
model_name='season',
name='champion',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='champion', to='teams.Team'),
),
migrations.AlterField(
model_name='season',
name='finalist',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='finalist', to='teams.Team'),
),
]
| [
"andrew@andrewstead.co.uk"
] | andrew@andrewstead.co.uk |
0b5aa09befc5d6623bbda3bceae433a27773e85b | 154f24699c63858703b4538afc0213594f08b0b6 | /scripts/make_csv.py | 25e613e9b21cdb2dd288b978acb42fd68ac8a778 | [] | no_license | inpho/loc-ucsd | 225f936d2ba88df057ea5f82f60053141686a80e | a9c7e7fded841bd0376f944b102f151260ae5e44 | refs/heads/master | 2020-05-30T03:53:19.006317 | 2014-03-03T23:45:55 | 2014-03-03T23:45:55 | 8,609,923 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,453 | py | import csv
import json
htrc1314 = []
htrc86 = []
htrc6 = []
volumes6=["uc2.ark+=13960=t5w66bs1h",
"uc2.ark+=13960=t6057f659",
"uc2.ark+=13960=t74t6gs0m",
"uc2.ark+=13960=t0ht2h954",
"uc2.ark+=13960=t05x26n0d",
"uc2.ark+=13960=t5p84550z"]
with open('htrc86.json') as jsonfile:
data = json.load(jsonfile)
volumes86 = data.keys()
with open('../data/csv/htrc_lcco.csv', 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['id'] in volumes6:
htrc6.append({'id' : row['id'],
'lccn' : row['id2'],
'full_call' : row['full_call'],
'collection' : 'htrc6'})
elif row['id'] in volumes86:
htrc86.append({'id' : row['id'],
'lccn' : row['id2'],
'full_call' : row['full_call'],
'collection' : 'htrc86'})
else:
htrc1314.append({'id' : row['id'],
'lccn' : row['id2'],
'full_call' : row['full_call'],
'collection': 'htrc1314'})
print len(htrc1314), len(htrc86), len(htrc6)
with open('../data/htrc/all.csv', 'w') as newfile:
writer = csv.DictWriter(newfile, ['collection', 'id', 'lccn'], extrasaction='ignore')
for row in htrc6:
writer.writerow(row)
for row in htrc86:
writer.writerow(row)
| [
"jaimie.murdock@gmail.com"
] | jaimie.murdock@gmail.com |
7810e6d0ddb5c42ab6ac22acf76b020cae5fbfe5 | 4d6975caece0acdc793a41e8bc6d700d8c2fec9a | /hackerrank/stacks_and_queues/minMaxRiddle.py | 1213aa229e6ad165d187dd216a80db0c7da76999 | [] | no_license | guiconti/workout | 36a3923f2381d6e7023e127100409b3a2e7e4ccb | 5162d14cd64b720351eb30161283e8727cfcf376 | refs/heads/master | 2021-08-03T10:32:02.108714 | 2021-07-26T04:38:14 | 2021-07-26T04:38:14 | 221,025,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the riddle function below.
def riddle(arr):
size = 1
mxwindows = []
# size variation from 1 to s
while size <= len(arr):
windows = []
# looping the given array for a specfic size windows
for i in range(len(arr) - size + 1):
s = 0
window = []
# make a list - size of s
while s < size:
window.append(arr[i + s])
s += 1
# attach a min size to windows list
windows.append(min(window))
size += 1
# get max from all mins of windows
mxwindows.append(max(windows))
return mxwindows
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = riddle(arr)
fptr.write(' '.join(map(str, res)))
fptr.write('\n')
fptr.close()
| [
"guibasconti@gmail.com"
] | guibasconti@gmail.com |
94e38d57514d665ad47c7511ea77607377e6c50f | 9962f123178e8ecd13ab806957c759092f8e4dea | /runner | 4ca1228feec61ade89bed3167b63eae599d86ff9 | [] | no_license | mrcleanspleen/Otto | b1a40af241459a00d5b9c43fc3d94401940cd835 | ca5662bd57a672f76eb52b9138892ec6658a9aa2 | refs/heads/master | 2021-01-01T17:40:48.157654 | 2017-08-06T22:52:54 | 2017-08-06T22:52:54 | 98,131,827 | 0 | 0 | null | 2017-07-23T23:34:27 | 2017-07-23T23:34:27 | null | UTF-8 | Python | false | false | 308 | #!/usr/bin/env python3
import os, sys
if len(sys.argv) > 1:
message = ' '.join(sys.argv[1:])
else:
message = input('message > ')
os.system("cd ~/go/src/Otto;go run main.go \"{}|~|{}|~|{}|~|{}\"".format(message,"someone","iMessage;+;chat508017006480766416","/Users/Peter/go/src/Otto/settings.json"))
| [
"peter.a.stenger@gmail.com"
] | peter.a.stenger@gmail.com | |
1686c97446c59e59d5ac28c92c003dd5acb9e0d9 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2692487_0/Python/jessethegame/jam.py | 1d7b96443962d2c3ac3bc0d727483d6e97400d75 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | import sys
def run(func):
with open(sys.argv[1]) as handle:
count = int(handle.readline())
for x in range(count):
yield func(handle)
def output(data):
return '\n'.join('Case #{}: {}'.format(*pair) for pair in enumerate(data, start=1))
def test(func):
print output(run(func))
def func(handle):
A, N = map(int, handle.readline().split(' '))
S = map(int, handle.readline().split(' '))
ss = sorted(S)
ops = 0
eat = 0
stk = 0
last = len(ss)
while ss:
if A == 1:
return last
if A > ss[0]:
A += ss.pop(0)
eat += 1
stk = max(stk - 1, 0)
if not stk:
last = ops + len(ss)
else:
ops += 1
stk += 1
A += A - 1
return min(last, ops)
test(func)
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
f3fd5b33b3f748d30f2c5d9c8263f923560685a8 | 728e9c50fa2c1d71f1c3b40185646ef9fcafa632 | /Tests/Crawl.py | 26775c3c89265e86b899d60d2fe24ab1de8ee8c2 | [] | no_license | ChethanaSR/Python_SDK | 7d9d8bc16ae092f695cf79c43d8d5b12e37ba193 | 524b17a70c91c58a591fe9a70f81e3f8b513546e | refs/heads/master | 2020-07-28T00:46:35.691339 | 2019-09-18T08:39:36 | 2019-09-18T08:39:36 | 209,258,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,782 | py | import os
import openpyxl
import time
import dicttoxml
from datetime import datetime
from xmldiff import main
import pandas
import csv
import xml.etree.ElementTree as et
from dateutil.parser import parse
def xml_csv_comp_filter_for_timepoint():
#csv_file = "C:\\Users\\rchethana\\Documents\\SDK_Automation\\XML-TestData\\Keyword_SeriesID\\386599657,210461702,117868108,384678537,118117308.csv"
xml_file = ":\\Users\\rchethana\\Documents\\SDK_Automation\\XML-ActualData\\Keyword_SeriesID\\2019-05-28,12-23-09 PM\\2019-05-28,12-23-09 PM.xml"
series_id = []
tree = et.parse( xml_file )
root = tree.getroot()
series = root.findall( ".//entity_id" )
for id in range (len(series)):
series_id.append(series[id].text)
print series_id
xml_csv_comp_filter_for_timepoint()
# verifyoption = "No. of Obs"
# series = root.findall( ".//entity_id" )
#
#
#
#
# series_date =[]
# series_value = []
# series_id_cvs = []
# I = 1
# for id in range (len(series)):
# filterdate = []
# filetrvalue = []
# date = root.findall( ".//item[" +str(I)+ "]/time_points/item/date" )
# value = root.findall( ".//item["+str(I)+"]/time_points/item/value" )
# year_in_date = datetime.strptime( date[id].text, '%Y-%m-%d' ).year
# series_id.append( series[id].text )
# with open(csv_file ) as csvfile:
# readCSV = csv.reader( csvfile, delimiter=',' )
# for row in readCSV:
# if row[0] == "Series ID":
# for num in range( 1, len( series ) + 1 ): series_id_cvs.append( row[num] )
#
# if row[0]== str(year_in_date) and series_id_cvs[id]== series[id].text:
# print value[id].text | [
"rchethana@shravas.com"
] | rchethana@shravas.com |
c45bf780b6a96b8e03e6aa3578470e50da49f78a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03062/s756793407.py | 4429176fdd3b72784590ffe51d12a6c3b81dbeca | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | N = int(input())
A = list(map(int,input().split()))
minus_cnt = 0
abs_sum = 0
min_abs_val = 1000000000
for i in range(N):
val = A[i]
abs_val = abs(val)
abs_sum += abs_val
if abs_val < min_abs_val:
min_abs_val = abs_val
if val < 0:
minus_cnt += 1
B_max_sum = abs_sum
if minus_cnt % 2 != 0:
B_max_sum -= (min_abs_val * 2)
print(B_max_sum)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c7469e01212ca09d05235f188a78d379f4e444ea | 14a77f28e43f078a0737b2dbaf682210739ba6c0 | /decisions/factories/event.py | 85bb70e33993b14609466b85a83255812030ddd6 | [] | no_license | tuomas777/paatos | f58e15ee6c5c7bd87d6e015f6bf9eae020aba77b | 79fe20768f6dd6c7ed8bae22eaf20961bbba8fa7 | refs/heads/master | 2020-04-05T22:43:21.254187 | 2016-11-22T07:40:58 | 2016-11-22T07:40:58 | 68,003,356 | 0 | 0 | null | 2016-11-24T14:01:26 | 2016-09-12T11:30:31 | Python | UTF-8 | Python | false | false | 315 | py | import factory
from faker import Faker
from decisions.models import Event
fake = Faker()
fake.seed(7)
class EventFactory(factory.django.DjangoModelFactory):
class Meta:
model = Event
name = fake.text(max_nb_chars=20)
description = fake.paragraph(nb_sentences=5)
start_date = fake.date()
| [
"tuomas.haapala@anders.fi"
] | tuomas.haapala@anders.fi |
f7c1891ca323b5da4d72a7b2ea564c4f7f9cb834 | 6a44e772dfdec969f5e2af430f0bf3a35eb73c4e | /src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/StorageManagementClient/storagemanagementclient/models/usage_name.py | a1f3796d70a9efdf4d82846aaa69862e31effa9d | [
"MIT"
] | permissive | lurumad/autorest | ecc4b1de223e4b4cdd226a3cf922a6940dbddd34 | fef0c4c9e7fdb5c851bdb095d5a2ff93572d452e | refs/heads/master | 2021-01-12T11:07:39.298341 | 2016-11-04T03:12:08 | 2016-11-04T03:12:08 | 72,835,570 | 1 | 0 | null | 2016-11-04T09:58:50 | 2016-11-04T09:58:50 | null | UTF-8 | Python | false | false | 1,088 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UsageName(Model):
"""The Usage Names.
:param value: Gets a string describing the resource name.
:type value: str
:param localized_value: Gets a localized string describing the resource
name.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(self, value=None, localized_value=None):
self.value = value
self.localized_value = localized_value
| [
"noreply@github.com"
] | lurumad.noreply@github.com |
a9055249ac9757566ae617d69563511b808fd3eb | 8e2404c7bcfd28329bed789839192b2c4e85ea1b | /Hackerrank/QueuesUsingTwoStacks.py | 9b00afd7b1e775d9d4a9da9534e24dd79876baf2 | [] | no_license | Pabitra-26/Problem-Solved | 408bd51bbffc69f8c5e1def92797c2e6f027f91d | c27de1dd6c4ad14444fa5ee911a16186c200a7f9 | refs/heads/master | 2023-07-30T16:51:28.062349 | 2021-09-27T06:06:54 | 2021-09-27T06:06:54 | 269,935,039 | 2 | 0 | null | 2021-09-27T06:06:55 | 2020-06-06T09:39:33 | Python | UTF-8 | Python | false | false | 1,000 | py | # Problem name: Queues using two stacks
# Description: A queue is an abstract data type that maintains the order in which elements were added to it, allowing the oldest elements to be removed from the front and new elements to be added to the rear. This is called a First-In-First-Out (FIFO) data structure because the first element added to the queue
# (i.e., the one that has been waiting the longest) is always the first one to be removed.
# Strategy: we can use a single list instead
class Queue(object):
def __init__(self):
self.S1=[]
def enQueue(self,element):
self.S1.append(element)
def deQueue(self):
self.S1.pop(0)
def print_q(self):
print(self.S1[0])
if __name__=="__main__":
n=int(input())
que=Queue()
for i in range(n):
l=list(map(int,input().split()))
if(l[0]==1):
que.enQueue(l[1])
elif(l[0]==2):
que.deQueue()
else:
que.print_q()
| [
"noreply@github.com"
] | Pabitra-26.noreply@github.com |
1a1e4043f7335ec73f9c0f3e8eae6e2840b83d9e | 9af2ae16b962c5dbed8497df9513f4f39dc4a3c4 | /muke_class/chp2/a5_keras_dnn.py | 6b2757395bffe590dce2acc95d0470020230563b | [] | no_license | Freshield/LEARN_Tensorflow2 | 4170d2158c2e73485fcc7828b09ea96834efe838 | e2e139525aeac504949929330ef4b58cf91816c6 | refs/heads/master | 2021-07-04T05:38:47.709291 | 2020-11-02T09:18:19 | 2020-11-02T09:18:19 | 194,585,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,844 | py | #coding=utf-8
"""
@Author: Freshield
@License: (C) Copyright 2018, BEIJING LINKING MEDICAL TECHNOLOGY CO., LTD.
@Contact: yangyufresh@163.com
@File: a5_keras_dnn.py
@Time: 2019-04-07 15:17
@Last_update: 2019-04-07 15:17
@Desc: None
@==============================================@
@ _____ _ _ _ _ @
@ | __|___ ___ ___| |_|_|___| |_| | @
@ | __| _| -_|_ -| | | -_| | . | @
@ |__| |_| |___|___|_|_|_|___|_|___| @
@ Freshield @
@==============================================@
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from tensorflow.python import keras
print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
fashion_mnist = keras.datasets.fashion_mnist
(x_train_all, y_train_all), (x_test, y_test) = fashion_mnist.load_data()
x_valid, x_train = x_train_all[:5000], x_train_all[5000:]
y_valid, y_train = y_train_all[:5000], y_train_all[5000:]
print(x_valid.shape, y_valid.shape)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(
x_train.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)
x_valid_scaled = scaler.transform(
x_valid.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)
x_test_scaled = scaler.transform(
x_test.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=x_train_scaled.shape[1:]))
for i in range(20):
model.add(keras.layers.Dense(100, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
print(model.summary())
logdir = 'data/dnn-callbacks'
if not os.path.exists(logdir):
os.mkdir(logdir)
output_model_file = os.path.join(logdir, 'fashion_minst_model.h5')
callbacks = [
keras.callbacks.TensorBoard(logdir),
keras.callbacks.ModelCheckpoint(output_model_file,
save_best_only=True),
keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3)
]
history = model.fit(x_train_scaled, y_train, epochs=10,
validation_data=(x_valid_scaled, y_valid),
callbacks=callbacks)
def plot_learning_curves(history):
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 3)
plt.show()
plot_learning_curves(history)
model.evaluate(x_test_scaled, y_test)
| [
"zxdsw199182@gmail.com"
] | zxdsw199182@gmail.com |
32507525464d06c8ae7fcde041d434d4c05239d5 | 2154d0221e29a86850a1b83e4302f6e3e3f7fa5d | /mock_example/MySQLdb_fun.py | b9a062f6ef8f96908705df3b76dba64e7143ce26 | [] | no_license | aaqqxx/simple_for_life | 3b8805c6791da6a3a7f42c069dc1ee7d2b8d3649 | 9ad6d61a56216d04250cd89aeaeda63c11942d0a | refs/heads/master | 2020-04-04T09:18:59.396540 | 2015-04-28T11:22:55 | 2015-04-28T11:22:55 | 20,906,518 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | __author__ = 'XingHua'
import MySQLdb
def Foo():
conn = MySQLdb.connect(host='localhost',
user='root', passwd='abc123', db='test')
cursor = conn.cursor()
cursor.execute('SELECT * FROM people')
id, name = cursor.fetchone()
print id, name
if __name__ == '__main__':
Foo() | [
"aaqqxx1910@gmail.com"
] | aaqqxx1910@gmail.com |
7a362af386f8e7f440301bda660837fbb6449fd7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04000/s468061454.py | c59bfc6f1c8081389083fc960c22dd4c3ba08cda | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | import sys
input=sys.stdin.readline
h,w,n = map(int, input().split())
l=[list(map(int,input().split())) for i in range(n)]
from collections import defaultdict
d = defaultdict(int)
for tmp in l:
y=tmp[0]-1
x=tmp[1]-1
for i in [-1,0,1]:
for j in [-1,0,1]:
if 1<=x+i<w-1 and 1<=y+j <h-1:
s = str(x+i) + "_" + str(y+j)
d[s]+=1
import collections
lis = list(d.values())
c = collections.Counter(lis)
ans=[0]*10
for itm in c.items():
ans[itm[0]]=itm[1]
ans[0]=(h-2)*(w-2)-sum(ans)
for a in ans:
print(a) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
050fdaa4b74799a5ef129833fa9542b5f9cd5b3f | 479ad544171d18a4da1bd109a44fa5c42e075a17 | /fantom_util/fantom_util/graph_tools/named_entity_merging.py | 4906e466e823da456fda67718befeee0e58b34ec | [] | no_license | kth-social-robotics/fantombot | c329a7ec716dff4e74ad2420c46239705d285647 | 1fef1bc98a6aab2a6534e74f7d464b758162fe87 | refs/heads/master | 2020-06-03T00:29:36.239228 | 2019-08-27T13:41:51 | 2019-08-27T13:41:51 | 191,361,328 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | # want to look through utterances of the children of this node, match them with movies in named entity model and
# merge those that are in there and don't have any children
from fantom_util.database import db_session
from fantom_util.database.models import Node
from fantom_util.feature_extraction.named_entities import named_entities_model
from sqlalchemy.orm import joinedload
# For testing NODE_ID = 649613
nem = named_entities_model()
def named_entity_merge(node_id):
nodes = (
db_session.query(Node)
.options(joinedload(Node.utterances), joinedload(Node.node_utterances))
.filter(Node.parent_id == node_id)
.all()
)
to_merge = []
categories = ["movies", "musicians", "bands"]
for node in nodes:
done = False
if not node.children:
for utterance in node.utterances:
utterance_text = utterance.utterance_text
# print(utterance_text)
for category in categories:
for item in nem[category]:
if f" {item.lower()} " in f" {utterance_text} ":
print(f"found {item} in {utterance_text}")
to_merge.append(node)
done = True
if done:
break
if done:
break
if done:
break
return to_merge
| [
"pjjonell@kth.se"
] | pjjonell@kth.se |
5738a33d087f1c4d0d34dd5308dddb244d93680c | 7246faf9a222269ce2612613f58dc5ff19091f10 | /baekjoon/1000~2999/1764_듣보잡.py | 5de8248dba6fbb77fcee6925ba8bf1bb62089cef | [] | no_license | gusdn3477/Algorithm_Study | 87a2eb72a8488d9263a86db70dadc7944434d41d | 3fefe1dcb40122157845ffc542f41cb097711cc8 | refs/heads/main | 2023-08-30T12:18:21.412945 | 2021-09-28T13:00:11 | 2021-09-28T13:00:11 | 308,364,230 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | N, M = map(int, input().split())
arr = []
arr2 = []
for i in range(N):
arr.append(input())
for i in range(M):
arr2.append(input())
a = list(set(arr) & set(arr2))
a.sort()
print(len(a))
for i in a:
print(i) | [
"gusdn3477@naver.com"
] | gusdn3477@naver.com |
482c535cae406f32ff74169addb0665a8ded5130 | 1c6866a37fddb455d5dd9a9db100415cd83b2429 | /sanic官方教程/请求数据/1-query_string.py | ea1c30bdd144eb58accc93598360f7fde417f8ef | [] | no_license | zb14755456464/sanic | e860bc140eab8725aa1410096edecd511163121d | 66462923b367c52edab15df6f33705b215f75174 | refs/heads/master | 2021-01-24T08:12:36.021336 | 2019-05-21T12:41:30 | 2019-05-21T12:41:30 | 122,973,882 | 0 | 1 | null | 2019-05-21T12:39:57 | 2018-02-26T13:29:12 | Python | UTF-8 | Python | false | false | 455 | py | from sanic.response import json
from sanic import Sanic
app = Sanic(__name__)
@app.route("/query_string")
def query_string(request):
# http://127.0.0.1:8000/query_string/?a=1&b=2&a=2
# {"args":{"a":["1","2"],"b":["2"]},"url":"http:\/\/0.0.0.0:8000\/query_string\/?a=1&b=2","query_string":"a=1&b=2"}
return json({"args": request.args, "url": request.url, "query_string": request.query_string})
if __name__ == '__main__':
app.run()
| [
"1273844671@qq.com"
] | 1273844671@qq.com |
f3beb823bc4165ce7b933f2a83ac81d6221ac32d | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1/Smetterleen/qual1.py | 654f790f055710cb8fefb7ee19fcf863c683bdcd | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 891 | py | '''
Created on Apr 9, 2016
@author: joep
'''
import os
ds_type = 'large'
BASE = os.path.dirname(os.path.realpath(__file__))
inf = open(os.path.join(BASE, 'A-large.in'.format(ds_type)), 'r')
outf = open(os.path.join(BASE, '{}.out'.format(ds_type)), 'w+')
cases = int(inf.readline())
for case in range(cases):
n = int(inf.readline())
if n == 0:
c_n = 'INSOMNIA'
else:
i = 1
digs = set()
while True:
c_n = str(i * n)
digs.update(set(c_n))
done = True
for dig in '0123456789':
if dig not in digs:
done = False
break
if done:
break
i += 1
outf.write('Case #{}: {}\n'.format(case + 1, c_n))
print('Finished {}'.format(case + 1))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
61c1eed9e08a33eaff443e4f2b977508ffb9b5d3 | f47ec2cffc71196679bb165d4c7d6de3b6884e33 | /src/pretalx/submission/forms/tag.py | 183749f9de658767e5f72ff6820a9051bcd32ec1 | [
"Apache-2.0"
] | permissive | toshywoshy/pretalx | 874bed725df48db47f118ff021340d0d34eca98a | 14619a4cb7d46df1434c8835abbac6f155c37626 | refs/heads/master | 2023-01-08T11:16:44.992557 | 2021-11-02T12:24:30 | 2021-11-02T12:24:30 | 179,450,372 | 0 | 0 | NOASSERTION | 2023-01-06T22:04:49 | 2019-04-04T08:00:38 | Python | UTF-8 | Python | false | false | 933 | py | from django import forms
from django.utils.translation import ugettext_lazy as _
from i18nfield.forms import I18nModelForm
from pretalx.common.mixins.forms import I18nHelpText, ReadOnlyFlag
from pretalx.submission.models import Tag
class TagForm(ReadOnlyFlag, I18nHelpText, I18nModelForm):
def __init__(self, *args, event=None, **kwargs):
self.event = event
super().__init__(*args, **kwargs)
self.fields["color"].widget.attrs["class"] = "colorpickerfield"
def clean_tag(self):
tag = self.cleaned_data["tag"]
qs = self.event.tags.all()
if self.instance and self.instance.pk:
qs = qs.exclude(pk=self.instance.pk)
if any(str(s.tag) == str(tag) for s in qs):
raise forms.ValidationError(_("You already have a tag by this name!"))
return tag
class Meta:
model = Tag
fields = ("tag", "description", "color", "public")
| [
"r@rixx.de"
] | r@rixx.de |
cfb3ff4f069ff67a31f55f2ba1d579cd20f195c9 | 1cf380b819a399c3f58a7ad13f5daeb5659cead3 | /wrf_management/real.py | 72fdb321a3d7e8e409ac03baf1527db36cdaafb4 | [] | no_license | daliagachc/wrf_management | dd88cf5d6279457f4e2b414acfa0d0cbaaad3873 | 4ee88c668ed0252e68713aa756b74344ecada615 | refs/heads/master | 2021-06-13T09:39:08.477315 | 2021-04-09T14:43:21 | 2021-04-09T14:43:21 | 171,271,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | # project name: wrf_management
# created by diego aliaga daliaga_at_chacaltaya.edu.bo
import glob
import os
import pathlib
from collections import OrderedDict
import wrf_management.project_global_constants as gc
import sqlite3 as sq
import pandas as pd
import wrf_management.run_utilities as ru
import f90nml
def skim_namelist_copy_real(
input_path, output_path, *, date, duration_h
):
old_dic = f90nml.read(os.path.join(input_path, 'namelist.input'))
s_dt = pd.to_datetime(date)
e_dt = s_dt + pd.Timedelta(duration_h, 'h')
d_list = [
['start_year', s_dt.year],
['start_month', s_dt.month],
['start_day', s_dt.day],
['start_hour', s_dt.hour],
['end_year', e_dt.year],
['end_month', e_dt.month],
['end_day', e_dt.day],
['end_hour', e_dt.hour],
# ['end_second', e_dt.second],
]
for k, v in d_list:
# print(k)
# print(v)
old_dic['time_control'][k] = 4*[v]
f90nml.write(
old_dic,
os.path.join(output_path, 'namelist.input'),
force=True
)
return old_dic
def get_met_files(
*, job_path, met_pref
):
met_path = pathlib.Path(job_path).parent
pre_path = os.path.join(met_path, met_pref)
# print(pre_path)
file_list = glob.glob(os.path.join(pre_path, 'met_em.d' + '*'))
return file_list
def link_met_files(
*,
job_path, met_pref
):
met_list = get_met_files(
job_path=job_path,
met_pref=met_pref)
df = pd.DataFrame(met_list, columns=['source'])
df['base_name'] = df['source'].apply(
lambda p: os.path.basename(p)
)
df['dest'] = df['base_name'].apply(
lambda bn: os.path.join(job_path, bn)
)
df.apply(
lambda r: ru.relink(r['source'], r['dest']),
axis=1
)
| [
"diego.aliaga@helsinki.fi"
] | diego.aliaga@helsinki.fi |
8d938e45ee0ced99172d7b4a614d66203d5bb8f6 | 933ed73cdf117fc6c88c1ebba7a17b82807a16e8 | /docs/02.AI_ML/code-1905/day06/demo09_cv.py | c74d27eecf9adec52575b547693ae52533582b4a | [
"Apache-2.0"
] | permissive | wan230114/PythonNote | c4fff80f6f3849ed0b0346526d3c6197a4867d2c | f4989a44c03efdcea3f7aa3484e3470e7fd983eb | refs/heads/master | 2023-01-28T14:51:07.304066 | 2023-01-12T18:38:16 | 2023-01-12T18:38:16 | 242,367,069 | 5 | 6 | Apache-2.0 | 2021-01-05T23:35:10 | 2020-02-22T15:45:01 | JavaScript | UTF-8 | Python | false | false | 395 | py | """
demo09_cv.py 词袋模型
"""
import nltk.tokenize as tk
import sklearn.feature_extraction.text as ft
doc = 'The brown dog is running. ' \
'The black dog is in the black room. ' \
'Running in the room is forbidden.'
# 对doc按照句子进行拆分
sents = tk.sent_tokenize(doc)
# 构建词袋模型
cv = ft.CountVectorizer()
bow = cv.fit_transform(sents)
print(bow.toarray()) | [
"1170101471@qq.com"
] | 1170101471@qq.com |
648086fac69850c8b63b8de728580fbc618e210f | 6061ebee9fbce8eb5b48ed7ccd2aecb196156598 | /modulo03-estruturascondicionais/exercicios/exercicio11.py | 19004de489822a3a78156123ac1ad86c8d4f0533 | [] | no_license | DarioCampagnaCoutinho/logica-programacao-python | fdc64871849bea5f5bbf2c342db5fda15778110b | b494bb6ef226c89f4bcfc66f964987046aba692d | refs/heads/master | 2023-02-24T11:45:29.551278 | 2021-01-26T22:02:49 | 2021-01-26T22:02:49 | 271,899,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | horas = int(input('Horas:'))
if horas >= 0 and horas <= 11:
print('Bom Dia')
elif horas >= 12 and horas <= 17:
print("Boa Tarde")
elif horas >= 18 and horas <= 23:
print("Boa Noite")
else:
print("Digite Novamente")
| [
"campagnacoutinho67@gmail.com"
] | campagnacoutinho67@gmail.com |
939bf81f702844980e7c9e5256af2fa6085d426a | 2c7b6ceffd09dae72d18a573a82d3a4c1d105e06 | /EXAMPLES/defaultdict_fruitnames.py | 14e449c1024ce3c9c3eee0cfc15d25f4b4c44901 | [] | no_license | feleHaile/20180813JPL | c23144a2139bc256e86a81a4402dc6ace0bb2791 | 09af77d98a9eeea193760aacff52b21fac8fc920 | refs/heads/master | 2020-05-15T22:31:14.922844 | 2018-08-16T21:59:40 | 2018-08-16T21:59:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | #!/usr/bin/env python
#
from collections import defaultdict
from pprint import pprint
fruits = ["pomegranate","cherry","apricot","date","apple","lemon","kiwi",
"orange","lime","watermelon","guava","papaya","fig","pear","banana",
"tamarind","persimmon","elderberry","peach","blueberry","lychee",
"grape" ]
fruits_by_first_letter = defaultdict(list) # <1>
for fruit in fruits:
first_letter = fruit[0] # <2>
fruits_by_first_letter[first_letter].append(fruit) # <3>
pprint(fruits_by_first_letter) # <4>
| [
"jstrickler@gmail.com"
] | jstrickler@gmail.com |
f897bc4ebb70621584bc23fe028dcaa3e6e152ec | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_6695.py | 6799ccfe76a1aae3be3037e8f110140980d21e5d | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | # What is the proper way to track indexes in python?
for index, entry in enumerate(longList):
if entry == 'foo':
print index
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
0a7c1810b71bc1d74a6427857b8fba73dcd596f5 | e9ed8174f0e2f52f858f0dd8b9206eb57388ece2 | /JssProject/JssProject/asgi.py | 54480fbeb22c73b1ed8de2d0c993cab07e905d18 | [] | no_license | Tedhoon/JasoseolProject | bb061dc1ed0cf0a0842a2c046c4434ccb80263a5 | 9500edabb35242f2974443a8b0fa43e5e3435484 | refs/heads/master | 2022-11-11T03:40:04.564877 | 2020-06-28T07:02:28 | 2020-06-28T07:02:28 | 275,432,188 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
ASGI config for JssProject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'JssProject.settings')
application = get_asgi_application()
| [
"gt0305@likelion.org"
] | gt0305@likelion.org |
62f6fefadf2e85580056e2d9ccbd8b06d40759b8 | 0203e5a6d7beb1e0f83113dac4c167b171756f24 | /lib/spdk-19.10/test/json_config/clear_config.py | 5328e6dc19c40496c6295697584810b4c20d3e50 | [
"BSD-3-Clause"
] | permissive | Wonchul08Lee/poseidonos | eaafe277fc56a0f5b5fcca3b70acc9bfe5d5d1ae | 6fe410cdf88f3243ad9210f763c2b5a2f7e8b46a | refs/heads/main | 2023-03-30T13:41:09.660647 | 2021-04-08T06:43:26 | 2021-04-08T06:43:26 | 355,819,746 | 0 | 0 | BSD-3-Clause | 2021-04-08T08:17:27 | 2021-04-08T08:17:26 | null | UTF-8 | Python | false | false | 7,593 | py | #!/usr/bin/env python3
import os
import sys
import argparse
import logging
sys.path.append(os.path.join(os.path.dirname(__file__), "../../scripts"))
import rpc # noqa
from rpc.client import print_dict, JSONRPCException # noqa
def get_bdev_name_key(bdev):
bdev_name_key = 'name'
if 'method' in bdev and bdev['method'] == 'bdev_split_create':
bdev_name_key = "base_bdev"
return bdev_name_key
def get_bdev_name(bdev):
bdev_name = None
if 'params' in bdev:
if 'name' in bdev['params']:
bdev_name = bdev['params']['name']
elif 'base_name' in bdev['params']:
bdev_name = bdev['params']['base_name']
elif 'base_bdev' in bdev['params']:
bdev_name = bdev['params']['base_bdev']
if 'method' in bdev and bdev['method'] == 'bdev_error_create':
bdev_name = "EE_%s" % bdev_name
return bdev_name
def get_bdev_delete_method(bdev):
delete_method_map = {'bdev_malloc_create': "bdev_malloc_delete",
'bdev_null_create': "bdev_null_delete",
'bdev_rbd_create': "bdev_rbd_delete",
'bdev_pmem_create': "bdev_pmem_delete",
'bdev_aio_create': "bdev_aio_delete",
'bdev_error_create': "bdev_error_delete",
'construct_split_vbdev': "destruct_split_vbdev",
'bdev_virtio_attach_controller': "remove_virtio_bdev",
'bdev_crypto_create': "bdev_crypto_delete",
'bdev_delay_create': "bdev_delay_delete",
'bdev_passthru_create': "bdev_passthru_delete",
'bdev_compress_create': 'bdev_compress_delete',
}
destroy_method = None
if 'method' in bdev:
construct_method = bdev['method']
if construct_method in list(delete_method_map.keys()):
destroy_method = delete_method_map[construct_method]
return destroy_method
def clear_bdev_subsystem(args, bdev_config):
rpc_bdevs = args.client.call("bdev_get_bdevs")
for bdev in bdev_config:
bdev_name_key = get_bdev_name_key(bdev)
bdev_name = get_bdev_name(bdev)
destroy_method = get_bdev_delete_method(bdev)
if destroy_method:
args.client.call(destroy_method, {bdev_name_key: bdev_name})
nvme_controllers = args.client.call("bdev_nvme_get_controllers")
for ctrlr in nvme_controllers:
args.client.call('bdev_nvme_detach_controller', {'name': ctrlr['name']})
''' Disable and reset hotplug '''
rpc.bdev.bdev_nvme_set_hotplug(args.client, False)
def get_nvmf_destroy_method(nvmf):
delete_method_map = {'nvmf_create_subsystem': "nvmf_delete_subsystem"}
try:
return delete_method_map[nvmf['method']]
except KeyError:
return None
def clear_nvmf_subsystem(args, nvmf_config):
for nvmf in nvmf_config:
destroy_method = get_nvmf_destroy_method(nvmf)
if destroy_method:
args.client.call(destroy_method, {'nqn': nvmf['params']['nqn']})
def get_iscsi_destroy_method(iscsi):
delete_method_map = {'iscsi_create_portal_group': "iscsi_delete_portal_group",
'iscsi_create_initiator_group': "iscsi_delete_initiator_group",
'iscsi_create_target_node': "iscsi_delete_target_node",
'iscsi_set_options': None
}
return delete_method_map[iscsi['method']]
def get_iscsi_name(iscsi):
if 'name' in iscsi['params']:
return iscsi['params']['name']
else:
return iscsi['params']['tag']
def get_iscsi_name_key(iscsi):
if iscsi['method'] == 'iscsi_create_target_node':
return "name"
else:
return 'tag'
def clear_iscsi_subsystem(args, iscsi_config):
for iscsi in iscsi_config:
destroy_method = get_iscsi_destroy_method(iscsi)
if destroy_method:
args.client.call(destroy_method, {get_iscsi_name_key(iscsi): get_iscsi_name(iscsi)})
def get_nbd_destroy_method(nbd):
delete_method_map = {'nbd_start_disk': "nbd_stop_disk"
}
return delete_method_map[nbd['method']]
def clear_nbd_subsystem(args, nbd_config):
for nbd in nbd_config:
destroy_method = get_nbd_destroy_method(nbd)
if destroy_method:
args.client.call(destroy_method, {'nbd_device': nbd['params']['nbd_device']})
def clear_net_framework_subsystem(args, net_framework_config):
pass
def clear_copy_subsystem(args, copy_config):
pass
def clear_interface_subsystem(args, interface_config):
pass
def clear_vhost_subsystem(args, vhost_config):
for vhost in reversed(vhost_config):
if 'method' in vhost:
method = vhost['method']
if method in ['vhost_scsi_controller_add_target']:
args.client.call("vhost_scsi_controller_remove_target",
{"ctrlr": vhost['params']['ctrlr'],
"scsi_target_num": vhost['params']['scsi_target_num']})
elif method in ['vhost_create_scsi_controller', 'vhost_create_blk_controller',
'vhost_create_nvme_controller']:
args.client.call("vhost_delete_controller", {'ctrlr': vhost['params']['ctrlr']})
def call_test_cmd(func):
def rpc_test_cmd(*args, **kwargs):
try:
func(*args, **kwargs)
except JSONRPCException as ex:
print((ex.message))
exit(1)
return rpc_test_cmd
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Clear config command')
parser.add_argument('-s', dest='server_addr', default='/var/tmp/spdk.sock')
parser.add_argument('-p', dest='port', default=5260, type=int)
parser.add_argument('-t', dest='timeout', default=60.0, type=float)
parser.add_argument('-v', dest='verbose', action='store_const', const="INFO",
help='Set verbose mode to INFO', default="ERROR")
parser.add_argument('--verbose', dest='verbose', choices=['DEBUG', 'INFO', 'ERROR'],
help="""Set verbose level. """)
subparsers = parser.add_subparsers(help='RPC methods')
@call_test_cmd
def clear_config(args):
for subsystem_item in reversed(args.client.call('framework_get_subsystems')):
args.subsystem = subsystem_item['subsystem']
clear_subsystem(args)
p = subparsers.add_parser('clear_config', help="""Clear configuration of all SPDK subsystems and targets using JSON RPC""")
p.set_defaults(func=clear_config)
@call_test_cmd
def clear_subsystem(args):
config = args.client.call('framework_get_config', {"name": args.subsystem})
if config is None:
return
if args.verbose:
print("Calling clear_%s_subsystem" % args.subsystem)
globals()["clear_%s_subsystem" % args.subsystem](args, config)
p = subparsers.add_parser('clear_subsystem', help="""Clear configuration of SPDK subsystem using JSON RPC""")
p.add_argument('--subsystem', help="""Subsystem name""")
p.set_defaults(func=clear_subsystem)
args = parser.parse_args()
with rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, log_level=getattr(logging, args.verbose.upper())) as client:
try:
args.client = client
args.func(args)
except JSONRPCException as ex:
print((ex.message))
exit(1)
| [
"poseidonos@samsung.net"
] | poseidonos@samsung.net |
a38336c56207fb0e1d51bcc216cb54f334e4f6c4 | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/test/python/35ca52050200eb7a7d7dee4ba4da4e4a6c9da1320007_populate_computer.py | 35ca52050200eb7a7d7dee4ba4da4e4a6c9da132 | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 658 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def initial_data(apps, schema_editor):
SC = apps.get_model('ship', 'ShipComputer')
SC(name='None', beamattack=0).save()
SC(name='Electronic', beamattack=25).save()
SC(name='Optronic', beamattack=50).save()
SC(name='Positronic', beamattack=75).save()
SC(name='Cybertronic', beamattack=100).save()
SC(name='Moleculartronic', beamattack=125).save()
class Migration(migrations.Migration):
dependencies = [
('ship', '0006_auto_20141004_0839'),
]
operations = [
migrations.RunPython(initial_data),
]
| [
"aliostad+github@gmail.com"
] | aliostad+github@gmail.com |
463f2b8c0b5c8c46db28eb503109fe4db542aa86 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/common/dossiers2/custom/helpers.py | f05bfc017fe3b9514391e58a82f275ee50d3af3d | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,786 | py | # 2017.05.04 15:28:51 Střední Evropa (letní čas)
# Embedded file name: scripts/common/dossiers2/custom/helpers.py
from dossiers2.custom.records import RECORDS, RECORD_INDICES
from dossiers2.custom.cache import getCache
def getTankExpertRequirements(vehTypeFrags, nationID = -1):
cache = getCache()
killedVehTypes = set(vehTypeFrags.iterkeys())
res = {'tankExpert': cache['vehiclesInTrees'] - killedVehTypes}
if nationID == -1:
nationIDs = cache['nationsWithVehiclesInTree']
else:
nationIDs = [nationID]
vehiclesInTreesByNation = cache['vehiclesInTreesByNation']
for nationIdx in nationIDs:
res[''.join(['tankExpert', str(nationIdx)])] = vehiclesInTreesByNation[nationIdx] - killedVehTypes
return res
def getMechanicEngineerRequirements(defaultUnlocks, unlocks, nationID = -1):
cache = getCache()
vehiclesInTreesByNation = cache['vehiclesInTreesByNation']
res = {'mechanicEngineer': cache['vehiclesInTrees'] - defaultUnlocks - unlocks}
if nationID == -1:
nationIDs = cache['nationsWithVehiclesInTree']
else:
nationIDs = [nationID]
for nationIdx in nationIDs:
res[''.join(['mechanicEngineer', str(nationIdx)])] = vehiclesInTreesByNation[nationIdx] - defaultUnlocks - unlocks
return res
def getRecordMaxValue(block, record):
recordPacking = RECORDS[RECORD_INDICES[block, record]]
if recordPacking[2] == 'b' or recordPacking[2] == 'bs':
return 1
raise recordPacking[2] == 'p' or AssertionError
return recordPacking[4]
def updateTankExpert(dossierDescr, vehTypeFrags, nationID):
res = getTankExpertRequirements(vehTypeFrags, nationID)
for record, value in res.iteritems():
if len(value) == 0:
dossierDescr['achievements'][record] = True
dossierDescr.addPopUp('achievements', record, True)
def updateMechanicEngineer(dossierDescr, defaultUnlocks, unlocks, nationID):
res = getMechanicEngineerRequirements(defaultUnlocks, unlocks, nationID)
for record, value in res.iteritems():
if len(value) == 0:
dossierDescr['achievements'][record] = True
dossierDescr.addPopUp('achievements', record, True)
def updateRareAchievements(dossierDescr, achievements):
block = dossierDescr['rareAchievements']
for achievement in achievements:
if achievement > 0:
block.append(achievement)
elif achievement < 0:
try:
block.remove(abs(achievement))
except:
pass
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\dossiers2\custom\helpers.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:28:52 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
cd04e4a74fe6f82b608519b7387f62b22d627744 | b643abbcfb5dc46a2d311f179f740cbe44f6a922 | /manage.py | 420d5ab4bec8ad6485ebd9ef3286367cc3685146 | [] | no_license | safwanvk/productivity | e7126d2ce77649e80ada365ab4616baa91b289ec | 141598632da0acd6c47ff34446ccbef9f7b980ac | refs/heads/main | 2023-03-10T19:06:57.589215 | 2021-03-01T14:41:24 | 2021-03-01T14:41:24 | 342,141,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ProductivityApp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"safwanvalakundil@gmail.com"
] | safwanvalakundil@gmail.com |
7c22ed802147373cbca2af023716a1833bc12ada | 1adf769cf9234f9b6c619f808d2723b99451d679 | /rusentrel/classic_cv/ctx/ian_ef.py | 63e565c7b73f84faf7767733f0cbdada3e14c82b | [
"MIT"
] | permissive | DAVMARROS/attitude-extraction-with-attention-and-ds | 4e85fa154ead0cd9499aaedf5d752ac565f37b92 | fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d | refs/heads/master | 2023-02-09T04:56:24.090380 | 2020-12-30T10:09:34 | 2020-12-30T10:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.path.append('../../../')
from rusentrel.classic.ctx.ian_ef import run_testing_ian_ef
from rusentrel.classic_cv.common import CV_COUNT, \
classic_cv_common_callback_modification_func, \
CV_NAME_PREFIX
if __name__ == "__main__":
run_testing_ian_ef(
name_prefix=CV_NAME_PREFIX,
cv_count=CV_COUNT,
custom_callback_func=classic_cv_common_callback_modification_func)
| [
"kolyarus@yandex.ru"
] | kolyarus@yandex.ru |
aa37bb217f03cac2488f3606a6f1a5a26f41559f | 3d37f595a8aaaa7c5723ddbd6758ecac5147dce2 | /maximum-subarray/maximum-subarray.py | bf5f661dd48593b947036ae83c3e2e79e0d320f9 | [] | no_license | baggy2797/Leetcode | ec218b155ebb972cd793253f25c3e18117216703 | 469c1541579401768f7a1da55d504a9e8656b21e | refs/heads/main | 2023-06-24T17:03:42.708935 | 2021-07-16T22:31:24 | 2021-07-16T22:31:24 | 342,979,700 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | class Solution:
def maxSubArray(self, nums: List[int]) -> int:
globalMax = nums[0]
currMax = nums[0]
for i in range(1,len(nums)):
currMax = max(currMax+nums[i],nums[i])
globalMax = max(globalMax,currMax)
return globalMax | [
"bhagwataditya226@gmail.com"
] | bhagwataditya226@gmail.com |
d2303bbedd1e575b6704b5320400a954d0ca9015 | a5718006e28b394633c4e84e75e7941cb4c11a08 | /TD 1 probleme 55 Arthur Lambert.py | 16a3dfdd20ce6ec98cfeb59b80e0126829370564 | [] | no_license | mines-nancy-tcss5ac-2018/td1-ArthurLambert1 | ab883b105e7a5341524032a5bf89c866861f5dd4 | 4e65a396b52e2fe5a7452d78ca0f739260beb854 | refs/heads/master | 2020-03-31T00:16:09.032475 | 2018-10-05T20:41:03 | 2018-10-05T20:41:03 | 151,733,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | def nombreMiroir(x): #Retourne le nombre à l'envers
chaine = str(x)
chaineRenversee = ''
for elt in chaine:
chaineRenversee = elt + chaineRenversee
return int(chaineRenversee)
def testPalindrome(x): #Teste si un nombre est palindrome ou non
return x == nombreMiroir(x)
def testLychrel(n):
for i in range(50):
n += nombreMiroir(n)
if testPalindrome(n):
return 1
return 0
def solve55(n):
l = list(range(10, n + 1))
resultat = 0
for x in l:
if testLychrel(x) == 0:
resultat += 1
return resultat
assert solve55(10000) == 249
print(solve55(10000))
| [
"noreply@github.com"
] | mines-nancy-tcss5ac-2018.noreply@github.com |
594c3f8d10249ae15900bb0604641c8dd1f64ec3 | 933a4f98b3ab1df987bce525d20ca904b225140f | /scripts/slave/recipe_modules/buildbucket/tests/put.py | cd433ecae9b207c24dc57c1e52b530ea49713fe2 | [
"BSD-3-Clause"
] | permissive | mcgreevy/chromium-build | 3881c489b4d9be2f113da755487808b3593f8156 | f8e42c70146c1b668421ee6358dc550a955770a3 | refs/heads/master | 2020-12-30T12:32:15.685191 | 2017-05-17T06:58:18 | 2017-05-17T06:58:18 | 91,419,271 | 0 | 2 | NOASSERTION | 2020-07-22T09:27:35 | 2017-05-16T05:52:45 | Python | UTF-8 | Python | false | false | 1,518 | py | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'buildbucket',
'service_account',
'recipe_engine/properties',
]
def RunSteps(api):
example_bucket = 'master.user.username'
build_parameters = {
'builder_name': 'linux_perf_bisect',
'properties': {
'bisect_config': {
'bad_revision': '351054',
'bug_id': 537649,
'command': ('src/tools/perf/run_benchmark -v '
'--browser=release --output-format=chartjson '
'--also-run-disabled-tests speedometer'),
'good_revision': '351045',
'gs_bucket': 'chrome-perf',
'max_time_minutes': '20',
'metric': 'Total/Total',
'recipe_tester_name': 'linux_perf_bisect',
'repeat_count': '10',
'test_type': 'perf'
},
}
}
build_tags = {'master': 'overriden.master.url',
'builder': 'overriden_builder'}
service_account = api.service_account.get_json_path('username')
api.buildbucket.put(
[{'bucket': example_bucket,
'parameters': build_parameters,
'tags': build_tags,
'client_operation_id': 'random_client_op_id'}],
service_account)
def GenTests(api):
yield (
api.test('basic') +
api.properties(buildername='example_builder', buildnumber=123)
)
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
6a927d7f4c1bd6bbb8a99700a2b23ada0d6bd2ab | 91ff6fdf7b2ccc58869d6ad41842f230644952c1 | /requirements/venky_task/important/textwrap.py | 11f82c6c9c166647506b48126e9b1c74f97a3d77 | [] | no_license | KONASANI-0143/Dev | dd4564f54117f54ccfa003d1fcec4220e6cbe1f9 | 23d31fbeddcd303a7dc90ac9cfbe2c762d61c61e | refs/heads/master | 2023-08-14T15:59:59.012414 | 2021-10-13T14:54:49 | 2021-10-13T15:10:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | import textwrap
sample_text = '''
Python is a widely used high-level, general-purpose, interpreted,
dynamic programming language. Its design philosophy emphasizes
code readability, and its syntax allows programmers to express
concepts in fewer lines of code than possible in languages such
as C++ or Java.
'''
print(textwrap.fill(sample_text, width=50))
print(textwrap.dedent(sample_text))
| [
"harinadhareddypython@gmail.com"
] | harinadhareddypython@gmail.com |
37c0f4f4f38905db494572485aff0acda8d01ef6 | 21a15af3e5b38d1cb10a107c2b66b0f712b49753 | /final_project/combine_pred/clean_mgdb.py | 3c242cf452a390a095112304c7c42cb3ecf45c4a | [
"MIT"
] | permissive | dbirman/cs375 | 00793f07730e9d606e6c83125d1d16ad337f1a1c | 7aeac1ed57eff74cbecb3e1091b01f00d34629a8 | refs/heads/master | 2021-05-08T06:28:28.505935 | 2017-12-15T02:19:27 | 2017-12-15T02:19:27 | 106,620,325 | 0 | 2 | MIT | 2017-12-13T02:47:25 | 2017-10-11T23:33:22 | Jupyter Notebook | UTF-8 | Python | false | false | 2,111 | py | from __future__ import division, print_function, absolute_import
import os, sys
import numpy as np
import cPickle
import json
import copy
import argparse
import pymongo as pm
import gridfs
def get_parser():
parser = argparse.ArgumentParser(description='The script to delete the models saved in mongodb')
parser.add_argument('--nport', default = 27009, type = int, action = 'store', help = 'Port number of mongodb')
parser.add_argument('--expId', default = "combinet_alexnet_ncp_new_2", type = str, action = 'store', help = 'Name of experiment id')
parser.add_argument('--dbname', default = "combinet-test", type = str, action = 'store', help = 'Database name')
parser.add_argument('--collname', default = "combinet", type = str, action = 'store', help = 'Collection name')
return parser
def main():
parser = get_parser()
args = parser.parse_args()
load_conn = pm.MongoClient(port=args.nport)
collfs = gridfs.GridFS(load_conn[args.dbname], args.collname)
coll = collfs._GridFS__files
query = {'exp_id': args.expId, 'saved_filters': True}
count = collfs.find(query).count()
count_gfs = coll.find(query).count()
print(count, count_gfs)
find_res = coll.find(query)
print(find_res[0].keys())
print(find_res[0]['chunkSize'])
print(find_res[0]['filename'])
print(find_res[0]['_id'])
'''
loading_from = coll
fsbucket = gridfs.GridFSBucket(loading_from._Collection__database, bucket_name=loading_from.name.split('.')[0])
filename = os.path.basename(find_res[0]['filename'])
cache_filename = os.path.join('/home/chengxuz/.tfutils/tmp', filename)
load_dest = open(cache_filename, "w+")
load_dest.close()
load_dest = open(cache_filename, 'rwb+')
fsbucket.download_to_stream(find_res[0]['_id'], load_dest)
load_dest.close()
'''
#collfs.delete(find_res[0]['_id'])
loading_from = coll
fsbucket = gridfs.GridFSBucket(loading_from._Collection__database, bucket_name=loading_from.name.split('.')[0])
#fsbucket.delete(find_res[0]['_id'])
if __name__ == '__main__':
main()
| [
"ianeisenberg90@gmail.com"
] | ianeisenberg90@gmail.com |
3c10902f7900b2106e7a058e821f3ff2c015af73 | d0b52237c314fbae746a3922205c7f3c22c99498 | /server/runserver.py | bc997c42cae9f2a2a678909f3e02796ebcc9a5f0 | [] | no_license | kallebefelipe/spotify-albums | 5a7e9db6729351477e89ff5bdb7d7a10fd310393 | 44b16225a950f9a1580ede8c317af688b6d73dec | refs/heads/master | 2022-12-10T16:45:43.685587 | 2019-10-30T00:54:49 | 2019-10-30T00:54:49 | 217,175,675 | 0 | 0 | null | 2022-12-10T06:37:08 | 2019-10-24T00:08:25 | JavaScript | UTF-8 | Python | false | false | 618 | py | from flask import request, Flask
import requests
import json
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/token', methods=['GET'])
def login():
if request.method == 'GET':
url = "https://accounts.spotify.com/api/token"
payload = {
'grant_type': "client_credentials"
}
headers = {
'Authorization': "Basic MjBiMzk3ZGU5YmEyNDE0Yjk2NGJmNTVjZmNlYzllYzM6ZmY0MDYzZGM1MDAxNDFkZTlhNjBiNjI2ZjY1YmNiMDg=",
}
response = requests.request("POST", url, data=payload, headers=headers)
return json.loads(response.text)
| [
"kallebefelipe@gmail.com"
] | kallebefelipe@gmail.com |
35d34c6d612853fb568231e774c9d4634b8cd4de | 025f930f0d342d116604a185103d13826d7ac360 | /GenericApiViews2/api/views.py | a61030fbcd532dfad511ff531fd4e9a4cc35ca15 | [] | no_license | moinakmalkhan/Learn-Django-Rest-Framework | 196c30591ed43ef7722cb22dea600a5ddcc0b8cf | de2ce66779525647d582998450a47558b70376f9 | refs/heads/master | 2023-09-03T08:51:50.346047 | 2021-11-14T15:48:23 | 2021-11-14T15:48:23 | 426,378,883 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | from .serializers import StudentSerializer
from .models import Student
from rest_framework.generics import ListAPIView, CreateAPIView, RetrieveAPIView, UpdateAPIView, DestroyAPIView, ListCreateAPIView, RetrieveDestroyAPIView, RetrieveUpdateAPIView, RetrieveUpdateDestroyAPIView
class StudentList(ListAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
class StudentCreate(CreateAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
class StudentRetrieve(RetrieveAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
class StudentUpdate(UpdateAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
class StudentDestroy(DestroyAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
class StudentListCreate(ListCreateAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
class StudentRetrieveUpdate(RetrieveUpdateAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
class StudentRetrieveDestroy(RetrieveDestroyAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
class StudentRetrieveUpdateDestroy(RetrieveUpdateDestroyAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
| [
"moinakmalkhan@gmail.com"
] | moinakmalkhan@gmail.com |
61979b816a8ccba14cde24e7256aaea343707468 | ff23e5c890216a1a63278ecb40cd7ac79ab7a4cd | /clients/kratos/python/test/test_update_login_flow_with_totp_method.py | 1a1c70aa5b19aa111a665097c39a43f3b7ef81ef | [
"Apache-2.0"
] | permissive | ory/sdk | fcc212166a92de9d27b2dc8ff587dcd6919e53a0 | 7184e13464948d68964f9b605834e56e402ec78a | refs/heads/master | 2023-09-01T10:04:39.547228 | 2023-08-31T08:46:23 | 2023-08-31T08:46:23 | 230,928,630 | 130 | 85 | Apache-2.0 | 2023-08-14T11:09:31 | 2019-12-30T14:21:17 | C# | UTF-8 | Python | false | false | 1,061 | py | """
Ory Identities API
This is the API specification for Ory Identities with features such as registration, login, recovery, account verification, profile settings, password reset, identity management, session management, email and sms delivery, and more. # noqa: E501
The version of the OpenAPI document: v1.0.0
Contact: office@ory.sh
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_kratos_client
from ory_kratos_client.model.update_login_flow_with_totp_method import UpdateLoginFlowWithTotpMethod
class TestUpdateLoginFlowWithTotpMethod(unittest.TestCase):
"""UpdateLoginFlowWithTotpMethod unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateLoginFlowWithTotpMethod(self):
"""Test UpdateLoginFlowWithTotpMethod"""
# FIXME: construct object with mandatory attributes with example values
# model = UpdateLoginFlowWithTotpMethod() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"3372410+aeneasr@users.noreply.github.com"
] | 3372410+aeneasr@users.noreply.github.com |
50d95be7ef2ce5d4a134ccc436ebc29970f1a7f8 | afbae26b958b5ef20548402a65002dcc8e55b66a | /release/stubs.min/Autodesk/Revit/DB/__init___parts/AreaTag.py | 332d52a8e52d3635319402c9944936eaa97a724a | [
"MIT"
] | permissive | gtalarico/ironpython-stubs | d875cb8932c7644f807dc6fde9dd513d159e4f5c | c7f6a6cb197e3949e40a4880a0b2a44e72d0a940 | refs/heads/master | 2023-07-12T01:43:47.295560 | 2022-05-23T18:12:06 | 2022-05-23T18:12:06 | 95,340,553 | 235 | 88 | NOASSERTION | 2023-07-05T06:36:28 | 2017-06-25T05:30:46 | Python | UTF-8 | Python | false | false | 1,410 | py | class AreaTag(SpatialElementTag,IDisposable):
""" Provides access to the area topology in Autodesk Revit. """
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
Area=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The area that the tag is associated with.
Get: Area(self: AreaTag) -> Area
"""
AreaTagType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The tag type.
Get: AreaTagType(self: AreaTag) -> AreaTagType
Set: AreaTagType(self: AreaTag)=value
"""
| [
"gtalarico@gmail.com"
] | gtalarico@gmail.com |
ec944233f08b6b31e0c6a20e89a857540fba1341 | 504efba4ab5ba1721ab3388144b16fa5f24833e7 | /05_Chroma_Scan_NoSC/02_04/simulation_parameters.py | 3e62fc436c63071f23e47fade4bde02984f16c0f | [
"MIT"
] | permissive | HaroonRafique/PS_Transfer | b568fe41c98357877c3bc63b2ca89f8724439da0 | 59ed8a0978ba4699f34c9f7a2500e0026759a2b6 | refs/heads/master | 2023-05-25T21:13:36.586605 | 2020-07-10T07:41:40 | 2020-07-10T07:41:40 | 213,405,455 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,799 | py | import numpy as np
parameters = {}
dps = [-2.5E-3, -2E-3, -1.5E-3, -1E-3, -0.5E-3, 0, 0.5E-3, 1E-3, 1.5E-3, 2E-3, 2.5E-3]
parameters['dpp_rms'] = dps[3]
parameters['x_offset'] = 0.#50E-6 # 50 micron orbit offset to begin oscillation
parameters['n_macroparticles'] = int(5E4) # int(5E5)
# Include machine (PS), tunes, lattice start position (BWS65H) for bunch output file label
parameters['tunex'] = '6218'
parameters['tuney'] = '624'
parameters['machine'] = 'PS'
parameters['lattice_start'] = 'BSG52'
parameters['Optics'] = 'Op' #'ReM' #'Lattice', #,
parameters['bunch_label'] = parameters['machine'] + '_Lattice_Tune_' + parameters['tunex'] + '_' + parameters['tuney'] + '_' + parameters['lattice_start']
parameters['flat_file'] = '../../00_Lattice_Setup/Optimised_Lattice/PTC-PyORBIT_flat_file.flt'
parameters['tomo_file'] = 'PyORBIT_Tomo_file_BCMS_PreLIU.mat'
parameters['bunch_file'] = '../../01_Generate_Distn/Bunches/PyORBIT_Tomo_Bunch_Manual_Twiss_Nmp_' + str(parameters['n_macroparticles'])+'_PS_Lattice_Tune_6218_624_' + parameters['lattice_start']+'_'+parameters['Optics']+'.mat'
parameters['intensity'] = 65E+10
parameters['macrosize'] = parameters['intensity']/float(parameters['n_macroparticles'])
parameters['gamma'] = 2.49253731343
parameters['bunch_length'] = 140e-9
parameters['blength'] = 140e-9
parameters['epsn_x'] = 1E-6
parameters['epsn_y'] = 1E-6
parameters['LongitudinalJohoParameter'] = 1.2
parameters['LongitudinalCut'] = 2.4
parameters['TransverseCut'] = 5
parameters['rf_voltage'] = 0.0212942055190595723
# ~ parameters['rf_voltage'] = 0.0
parameters['circumference'] = 2*np.pi*100
parameters['phi_s'] = 0
parameters['macrosize'] = parameters['intensity']/float(parameters['n_macroparticles'])
# PS Injection 1.4 GeV
parameters['gamma'] = 2.49253731343
parameters['beta'] = np.sqrt(parameters['gamma']**2-1)/parameters['gamma']
c = 299792458
parameters['sig_z'] = (parameters['beta'] * c * parameters['blength'])/4.
parameters['turns_max'] = int(30)
parameters['turns_print'] = range(0, parameters['turns_max'])
parameters['turns_update'] = range(0, parameters['turns_max'])
switches = {
'CreateDistn': True,
'Space_Charge': False,
'GridSizeX': 64,
'GridSizeY': 64,
'GridSizeZ': 32
}
# PTC RF Table Parameters
harmonic_factors = [1] # this times the base harmonic defines the RF harmonics (for SPS = 4620, PS 10MHz 7, 8, or 9)
time = np.array([0,1,2])
ones = np.ones_like(time)
Ekin_GeV = 1.4*ones
RF_voltage_MV = np.array([0.0212942055190595723*ones]).T # in MV
# ~ RF_voltage_MV = np.array([0.0*ones]).T # in MV
RF_phase = np.array([np.pi*ones]).T
RFparameters = {
'harmonic_factors': harmonic_factors,
'time': time,
'Ekin_GeV': Ekin_GeV,
'voltage_MV': RF_voltage_MV,
'phase': RF_phase
}
| [
"haroon.rafique@protonmail.com"
] | haroon.rafique@protonmail.com |
46e9e80b3ef30dbb6efd906e9158852e265a5b91 | d125c002a6447c3f14022b786b07712a7f5b4974 | /tests/bugs/core_4811_test.py | 2bbaf2d77f1cba7d0e6a4e8ea3c6a6fc24a2c090 | [
"MIT"
] | permissive | FirebirdSQL/firebird-qa | 89d5b0035071f9f69d1c869997afff60c005fca9 | cae18186f8c31511a7f68248b20f03be2f0b97c6 | refs/heads/master | 2023-08-03T02:14:36.302876 | 2023-07-31T23:02:56 | 2023-07-31T23:02:56 | 295,681,819 | 3 | 2 | MIT | 2023-06-16T10:05:55 | 2020-09-15T09:41:22 | Python | UTF-8 | Python | false | false | 7,408 | py | #coding:utf-8
"""
ID: issue-5109
ISSUE: 5109
TITLE: Make user names behave according to SQL identifiers rules
DESCRIPTION:
JIRA: CORE-4811
FBTEST: bugs.core_4811
"""
import pytest
from firebird.qa import *
substitutions = [('set echo.*', ''), ('Use CONNECT or CREATE DATABASE.*', ''),
('Your user name and password.*', ''), ('line: [0-9]+, col: [0-9]+', ''),
('exception [0-9]+', 'exception')]
db = db_factory()
tmp_user = user_factory('db', name='tmp$c4811', password='1')
tmp_role = role_factory('db', name='Boss')
test_script = """
set wng off;
set list on;
create or alter procedure sp_check_actual_role as begin end;
commit;
recreate exception ex_have_no_role 'You''ve specified role: >@1< -- but your actual role is NONE.';
set term ^;
create or alter procedure sp_check_actual_role(
a_probe_role varchar(31)
) returns(
checking varchar(80),
result varchar(31)
) as
begin
if ( upper(current_role) = 'NONE' )
then
exception ex_have_no_role using ( a_probe_role );
checking = 'role: >' || a_probe_role || '< - '
|| trim(
iif( a_probe_role containing '''', 'in apostrophes',
iif( a_probe_role containing '"', 'in double quotes', 'without delimiters' )
)
)
|| ', ' || iif( upper(a_probe_role) = a_probe_role, 'UPPER case', 'CaMeL case' )
;
result = current_role;
suspend;
end
^
set term ;^
commit;
set bail on;
set echo on;
grant Boss to Tmp$c4811;
grant usage on exception ex_have_no_role to Tmp$c4811;
grant execute on procedure sp_check_actual_role to Tmp$c4811;
set echo off;
set bail off;
-- show grants;
commit;
-- set echo on;
-- checking for USER name:
connect '$(DSN)' user 'Tmp$c4811' password '1';
-- PASSES since http://sourceforge.net/p/firebird/code/62016 (2015-07-16 14:26), this was build = 31981
select 'user: >''Tmp$c4811''< - in apostrophes, CaMeL case' checking, current_user as result from rdb$database;
commit;
connect '$(DSN)' user 'TMP$C4811' password '1'; -- should PASS, checked on builds 31948, 31981
select 'user: >''TMP$C4811''< - in apostrophes, UPPER case' checking, current_user as result from rdb$database;
commit;
connect '$(DSN)' user Tmp$c4811 password '1'; -- should PASS, checked on builds 31948, 31981
select 'user: >Tmp$c4811< - without delimiters, CaMeL case' checking, current_user as result from rdb$database;
commit;
connect '$(DSN)' user TMP$C4811 password '1'; -- should PASS, checked on builds 31948, 31981
select 'user: >TMP$C4811< - without delimiters, UPPER case' checking, current_user as result from rdb$database;
commit;
connect '$(DSN)' user "Tmp$c4811" password '1'; -- should *** FAIL ***
select 'user: >"Tmp$c4811"< - in double quotes, CaMeL case' checking, current_user as result from rdb$database;
commit;
connect '$(DSN)' user "TMP$C4811" password '1'; -- should PASS, checked on builds 31948, 31981
select 'user: >"TMP$C4811" - in double quotes, UPPER case' checking, current_user as result from rdb$database;
commit;
-- checking for ROLE (actual role in all following cases will be: [BOSS], checked on builds 31948, 31981)
-- Statement that created role (see above):
-- create role Boss;
-- Enclosing role in apostrophes and specifying it exactly like it was in its creation sttm:
connect '$(DSN)' user 'TMP$C4811' password '1' role 'Boss';
select * from sp_check_actual_role( '''Boss''' ); --------------- should return: BOSS
commit;
-- Enclosing role in apostrophes and specifying it in UPPERCASE (i.e. differ than in its CREATE ROLE statement):
connect '$(DSN)' user 'TMP$C4811' password '1' role 'BOSS';
select * from sp_check_actual_role( '''BOSS''' ); --------------- should return: BOSS
commit;
-- do NOT enclosing role in any delimiters and change CaSe of its characters (i.e. differ than in its CREATE ROLE statement):
connect '$(DSN)' user 'TMP$C4811' password '1' role BosS;
select * from sp_check_actual_role( 'BosS' ); --------------- should return: BOSS
commit;
-- do NOT enclosing role in any delimiters and specifying it in UPPERCASE (i.e. differ than in its CREATE ROLE statement):
connect '$(DSN)' user 'TMP$C4811' password '1' role BOSS;
select * from sp_check_actual_role( 'BOSS' ); --------------- should return: BOSS
commit;
-- Enclosing role in double quotes and change CaSe of its characters (i.e. differ than in its CREATE ROLE statement):
connect '$(DSN)' user 'TMP$C4811' password '1' role "BoSs";
select * from sp_check_actual_role( '"BoSs"' ); --------------- should raise EX_HAVE_NO_ROLE, actual role will be 'NONE'
commit;
-- Enclosing role in double quotes and specifying it in UPPERCASE (i.e. differ than in its CREATE ROLE statement):
connect '$(DSN)' user 'TMP$C4811' password '1' role "BOSS";
select * from sp_check_actual_role( '"BOSS"' ); --------------- should return: BOSS
commit;
"""
act = isql_act('db', test_script, substitutions=substitutions)
expected_stdout = """
grant Boss to Tmp$c4811;
grant usage on exception ex_have_no_role to Tmp$c4811;
grant execute on procedure sp_check_actual_role to Tmp$c4811;
CHECKING user: >'Tmp$c4811'< - in apostrophes, CaMeL case
RESULT TMP$C4811
CHECKING user: >'TMP$C4811'< - in apostrophes, UPPER case
RESULT TMP$C4811
CHECKING user: >Tmp$c4811< - without delimiters, CaMeL case
RESULT TMP$C4811
CHECKING user: >TMP$C4811< - without delimiters, UPPER case
RESULT TMP$C4811
CHECKING user: >"TMP$C4811" - in double quotes, UPPER case
RESULT TMP$C4811
CHECKING role: >'Boss'< - in apostrophes, CaMeL case
RESULT BOSS
CHECKING role: >'BOSS'< - in apostrophes, UPPER case
RESULT BOSS
CHECKING role: >BosS< - without delimiters, CaMeL case
RESULT BOSS
CHECKING role: >BOSS< - without delimiters, UPPER case
RESULT BOSS
CHECKING role: >"BOSS"< - in double quotes, UPPER case
RESULT BOSS
"""
expected_stderr = """
Statement failed, SQLSTATE = 28000
Statement failed, SQLSTATE = HY000
exception 3
-EX_HAVE_NO_ROLE
-You've specified role: >"BoSs"< -- but your actual role is NONE.
-At procedure 'SP_CHECK_ACTUAL_ROLE'
"""
@pytest.mark.version('>=3.0')
def test_1(act: Action, tmp_user: User, tmp_role: Role):
act.expected_stdout = expected_stdout
act.expected_stderr = expected_stderr
act.execute()
assert (act.clean_stderr == act.clean_expected_stderr and
act.clean_stdout == act.clean_expected_stdout)
| [
"pcisar@ibphoenix.cz"
] | pcisar@ibphoenix.cz |
efeb6964d4ee7986f15a1bfc4dc59835f24a8359 | af9268e1ead8cdb491868c14a2240d9e44fb3b56 | /last-minute-env/lib/python2.7/site-packages/django/core/management/sql.py | 5cc7aaf4f51755cba6b812d95cb2f39c8ea9b876 | [] | no_license | frosqh/Cousinade2017 | d5154c24c93ca8089eeba26b53c594e92cb6bd82 | c34d5707af02402bf2bb7405eddc91297da399ff | refs/heads/master | 2021-01-20T07:57:34.586476 | 2017-10-22T18:42:45 | 2017-10-22T18:42:45 | 90,074,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,026 | py | from __future__ import unicode_literals
from django.apps import apps
from django.db import models
def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True, include_views=False)
else:
tables = connection.introspection.table_names(include_views=False)
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade)
return statements
def emit_pre_migrate_signal(verbosity, interactive, db, **kwargs):
# Emit the pre_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running pre-migrate handlers for application %s" % app_config.label)
models.signals.pre_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db,
**kwargs
)
def emit_post_migrate_signal(verbosity, interactive, db, **kwargs):
# Emit the post_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running post-migrate handlers for application %s" % app_config.label)
models.signals.post_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db,
**kwargs
)
| [
"frosqh@gmail.com"
] | frosqh@gmail.com |
35239d3462f35f0aad5fdfef0333f99b1a39b0e5 | 473d3edf1cc1fda57f7da875c16dc93a4ebbdb23 | /blog/migrations/0001_initial.py | 56ca95d90d3ca7a5f27d1d98cd9b39041a7ceb8e | [] | no_license | nelliejellie/blog | df9a61bc40f8589252591528ed238b8010a17e53 | b629c8d3e2dd20b64c960f6f1f6f8bc7c62f95a4 | refs/heads/master | 2022-06-24T01:35:21.104705 | 2020-05-08T20:07:13 | 2020-05-08T20:07:13 | 261,609,317 | 0 | 0 | null | 2020-05-08T20:07:14 | 2020-05-05T23:57:54 | JavaScript | UTF-8 | Python | false | false | 1,365 | py | # Generated by Django 3.0.2 on 2020-04-29 09:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=100, unique_for_date='publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_post', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
},
),
]
| [
"emekaewelike@gmail.com"
] | emekaewelike@gmail.com |
06e39ae81eeb08e88f62ee72fdeb30b8088d12d1 | 4d8f7abf3ec6ff049815663f1c3c0f85926caab9 | /SF2D/Compute_renorm_HT_4tops.py | cbe5a6bbc3a44497c365ea653ac9440806ccf4c5 | [] | no_license | daniel-sunyou-li/ChargedHiggs | 07e160d92ae628ed950a1e13e9bbe41aabfa69d1 | 282139b9cee9788a2fc6c536c86cc4731e4f7de7 | refs/heads/master | 2023-08-06T06:27:44.969889 | 2021-08-06T14:42:00 | 2021-08-06T14:42:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,799 | py | import ROOT
import sys
import numpy
import argparse
import array
from ROOT import TFile, TTree
parser = argparse.ArgumentParser(description="compute the renormalization factors for charged Higgs analysis")
parser.add_argument("-f", "--file", default="", help="The path to the analysis tree")
parser.add_argument("-l", "--label", default="", help="The name of the output file")
args = parser.parse_args()
filename = args.file
tfile = TFile.Open(filename)
fout = ROOT.TFile("Fourtops_Weights_"+args.label+"_extended_HT_cuts.root", "RECREATE")
h2D_origin = ROOT.TH2F("h2D_origin", "h2D_origin", 6, 4, 10, 40, 150, 4000)
h2D_origin.Sumw2()
h2D_weight_dcsv = ROOT.TH2F("h2D_weight_dcsv", "h2D_weight_dcsv", 6, 4, 10, 40, 150, 4000)
h2D_weight_dcsv.Sumw2()
h2D_weight_djet = ROOT.TH2F("h2D_weight_djet", "h2D_weight_djet", 6, 4, 10, 40, 150, 4000)
h2D_weight_djet.Sumw2()
ttree = tfile.Get("ljmet")
ttree.SetBranchStatus("*", 0)
ttree.SetBranchStatus("NJets_JetSubCalc*", 1)
ttree.SetBranchStatus("theJetPt_JetSubCalc_PtOrdered*", 1)
ttree.SetBranchStatus("AK4HT*", 1)
ttree.SetBranchStatus("btagCSVWeight*", 1)
ttree.SetBranchStatus("btagDeepJetWeight*", 1)
ttree.SetBranchStatus("leptonPt_MultiLepCalc*", 1)
ttree.SetBranchStatus("isElectron*", 1)
ttree.SetBranchStatus("isMuon*", 1)
ttree.SetBranchStatus("corr_met_MultiLepCalc*", 1)
ttree.SetBranchStatus("MCPastTrigger*", 1)
nevents = ttree.GetEntries()
for iev in range(nevents):
if iev%1000==1:
print(iev)
ttree.GetEntry(iev)
njet = ttree.NJets_JetSubCalc
if not ((ttree.leptonPt_MultiLepCalc > 35 and ttree.isElectron) or (ttree.leptonPt_MultiLepCalc > 30 and ttree.isMuon)): continue
if not (ttree.corr_met_MultiLepCalc > 30): continue
if not (ttree.MCPastTrigger): continue
HT = ttree.AK4HT
if njet>9: njet=9
#for ijet in range(njet):
# if ttree.theJetPt_JetSubCalc_PtOrdered.at(ijet)>120:
# n_fastjet+=1
# elif (ttree.theJetPt_JetSubCalc_PtOrdered.at(ijet)<=120 and ttree.theJetPt_JetSubCalc_PtOrdered.at(ijet)>40):
# n_slowjet+=1
#if n_fastjet>5: n_fastjet=5
#if n_slowjet>5: n_slowjet=5
h2D_origin.Fill(njet, HT)
h2D_weight_dcsv.Fill(njet, HT, ttree.btagCSVWeight)
h2D_weight_djet.Fill(njet, HT, ttree.btagDeepJetWeight)
h2D_scale_dcsv = h2D_origin.Clone()
h2D_scale_dcsv.SetTitle("h2D_scale_dcsv")
h2D_scale_dcsv.Divide(h2D_weight_dcsv)
h2D_scale_djet = h2D_origin.Clone()
h2D_scale_djet.SetTitle("h2D_scale_dcsv")
h2D_scale_djet.Divide(h2D_weight_djet)
fout.WriteTObject(h2D_origin, "h2D_origin")
fout.WriteTObject(h2D_weight_dcsv, "h2D_weight_dcsv")
fout.WriteTObject(h2D_scale_dcsv, "h2D_scale_dcsv")
fout.WriteTObject(h2D_weight_djet, "h2D_weight_djet")
fout.WriteTObject(h2D_scale_djet, "h2D_scale_djet")
fout.Close()
| [
"jingyu.luo@cern.ch"
] | jingyu.luo@cern.ch |
c78eff018f1221cf0e9bbbe56bc0e7d2cb566ff7 | fca3644a3ab3c83bba33fb7a9a3bd94538a4dd5c | /drive/web/front/forms.py | a046ffe6bf4ba9cdacc31105def77467977003e6 | [] | no_license | enixdark/pyra-structures | 1a8327cf7de5c7b6ab552900e43d83001011cf15 | fb8df7bdbc7a256381d42c501bf55c54ebf7dae6 | refs/heads/master | 2023-01-22T01:39:38.648567 | 2020-10-02T07:25:52 | 2020-10-02T07:25:52 | 98,119,242 | 0 | 0 | null | 2023-01-12T13:10:39 | 2017-07-23T19:05:22 | CSS | UTF-8 | Python | false | false | 5,104 | py | from marshmallow import (
fields,
pre_load,
Schema,
validate,
validates,
validates_schema,
ValidationError,
)
import re
from drive.utils.email_address import EmailAddress
from drive.utils.i18n import _
from datetime import date
def IsText(field):
def text_validator(value):
if re.search(r'[\x00-\x1f\x7f-\x9f]', value):
raise ValidationError(
_(
'Invalid character(s) in ${field}.',
mapping={'field': field},
)
)
return text_validator
def IsMultiLineText(field):
def text_validator(value):
if re.search(r'[\x00-\x09\x0b-\x0c\x0e-\x1f\x7f-\x9f]', value):
raise ValidationError(
_(
'Invalid character(s) in ${field}.',
mapping={'field': field},
)
)
return text_validator
def IsIdentifier(field):
def text_validator(value):
if re.search(r'[^A-Za-z0-9_-]', value):
raise ValidationError(
_(
'Invalid character(s) in ${field}.',
mapping={'field': field},
)
)
return text_validator
def my_required(value, field_name=''):
if len(value) > 0:
raise ValidationError(
_(
'${field} is required.',
mapping={'field': field_name}
)
)
class Form(Schema):
def __init__(self, data, *args, **kwargs):
super(Form, self).__init__(*args, **kwargs)
self.data = data
self.has_error = False
self.errors = {}
def validate(self):
errors = super().validate(data=self.data)
if bool(errors):
self.has_error = True
self.errors = errors
return False
return True
def value(self, name):
if self.data[name] is not None:
return self.data[name]
def add_error(self, name='', message=""):
self.has_error = True
if self.errors and self.errors[name] is not None:
self.errors[name].append(message)
else:
self.errors[name] = [message]
return self.errors
def error_message(self):
for key, val in self.errors.items():
if len(val) > 0:
return val[0]
break
return ''
class UserForm(Form):
first_name = fields.String(
required=True,
validate=[validate.Length(min=1, max=255)],
error_messages={'required': 'user first name is required'},
)
last_name = fields.String(
required=True,
validate=[validate.Length(min=1, max=255)],
error_messages={'required': 'user last name is required'},
)
email = fields.Email(
required=True,
validate=[validate.Length(min=1, max=255)],
error_messages={'required': 'Email required'}
)
class SignupForm(Form):
first_name = fields.String(
required=True,
validate=[validate.Length(min=1, max=255)],
error_messages={'required': 'First name required'}
)
last_name = fields.String(
required=True,
validate=[validate.Length(min=1, max=255)],
error_messages={'required': 'Last name required'}
)
email = fields.Email(
required=True,
validate=[validate.Length(min=1, max=255)],
error_messages={'required': 'Email required'}
)
password = fields.String(
required=True,
validate=validate.Length(min=8),
error_messages={'required': 'Password required'}
)
retype_password = fields.String(
required=True,
validate=validate.Length(min=8),
error_messages={'required': 'Retype Password required'}
)
class LoginForm(Form):
email = fields.Email(
required=True,
validate=[validate.Length(min=1, max=255)],
error_messages={'required': 'Email required'}
)
password = fields.String(
required=True,
validate=[validate.Length(min=8)],
error_messages={'required': 'Password required'}
)
class ChangePasswordForm(Form):
old_password = fields.String(
required=True,
validate=[validate.Length(min=8)],
error_messages={'required': 'Password required'}
)
new_password = fields.String(
required=True,
validate=[validate.Length(min=8)],
error_messages={'required': 'Password required'}
)
confirm_password = fields.String(
required=True,
validate=[validate.Length(min=8)],
error_messages={'required': 'Password required'}
)
class ForgotPasswordForm(Form):
email = fields.Email(
required=True,
validate=[validate.Length(min=1, max=255)],
error_messages={'required': 'Email required'},
default="",
) | [
"cqshinn92@gmail.com"
] | cqshinn92@gmail.com |
17d1add048a7e5db1e09574e9d1fe27e3d3112e2 | 548fbb3bf6648e76e711ee398148cae9ee10a0d2 | /running_sum_array.py | 14687c688612f9dc8f01a1f9715982cc4b68a444 | [] | no_license | KonstantinSKY/LeetCode | 34cce8eda7182aa6a1616b3471b0cfe9310fe1d4 | 1570122134b962412b0530c3850eb37f1c8c585e | refs/heads/master | 2023-04-16T17:03:23.753146 | 2023-04-03T18:16:21 | 2023-04-03T18:16:21 | 310,714,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | """ """
import time
from typing import List
class Solution:
def runningSum(self, nums: List[int]) -> List[int]:
for i in range(1, len(nums)):
nums[i] += nums[i-1]
return nums
if __name__ == "__main__":
start_time = time.time()
print(Solution().runningSum([1, 2, 3, 4]))
print(Solution().runningSum([1, 1, 1, 1, 1]))
print(Solution().runningSum([3, 1, 2, 10, 1]))
print(Solution().runningSum([3]))
print("--- %s seconds ---" % (time.time() - start_time))
| [
"sky012877@gmail.com"
] | sky012877@gmail.com |
544d0d079ce7d07e8a91f5a310818a244f1b8764 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5744014401732608_1/Python/izubr/gcj.py | fd75bd5e4d6cbfd5653b75455e59fbedb1601060 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py |
def form_result(possibility, matrix, case):
if possibility == "POSSIBLE":
print "Case #%s: %s" % (case + 1, possibility)
print matrix.rstrip()
else:
print "Case #%s: %s" % (case + 1, possibility)
t = int(raw_input(""))
for case in range(t):
b, m = raw_input("").split(" ")
b, m = int(b), int(m)
matrix = [[1] * b for i in range(b)]
for i in range(b):
for j in range(b):
if i >= j:
matrix[i][j] = 0
in_point = [0] * b
from_point = [0] * b
in_point[0] = 1
cur_sum = 1
for i in range(1, b):
in_point[i] = 2 ** (i - 1)
for i in range(b):
from_point[i] = in_point[b - 1 - i]
paths = [[0] * b for i in range(b)]
for i in range(b):
for j in range(b):
if i < j:
paths[i][j] = in_point[i] * from_point[j]
if m > in_point[b-1]:
# print in_point[b-1]
form_result("IMPOSSIBLE", [], case)
continue
bin_digits = bin(m)[2:] + '0'
bin_digits = '0' * (b - len(bin_digits)) + bin_digits
# print bin_digits
all_zeros = True
for j in range(1, b):
if bin_digits[j] == '0':
matrix[0][j] = 0
else:
all_zeros = False
if all_zeros:
for j in range(1, b):
matrix[0][j] = 1
res_matrix = ""
for i in range(b):
res = ""
for j in range(b):
res += str(matrix[i][j])
res_matrix += res + "\n"
form_result("POSSIBLE", res_matrix, case)
# print in_point
# print from_point
# for i in range(b):
# res = ""
# for j in range(b):
# res += str(paths[i][j]) + " "
# print res
# form_result(best_code, best_jam, case)
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
0664ffff7d89af5ac8e3b22e1e91030ca4ea3943 | 547695aff7b19ec2fe3f6b6ab7447d7b65c89322 | /FlaskWebProject1/test/myapi/commons/apispec.py | c346f60a6453fb5111bffe93ad7447311ce92cce | [] | no_license | yuchanmo/FlaskWebProject1 | d914454fd71f226e83cf909268ae297e2edbf6db | ae862f950146ceb6638e2b1a25beb2ad8c1207d9 | refs/heads/master | 2023-01-24T01:03:23.167143 | 2019-08-21T15:06:51 | 2019-08-21T15:06:51 | 203,608,914 | 0 | 0 | null | 2022-12-27T15:34:13 | 2019-08-21T15:06:56 | Python | UTF-8 | Python | false | false | 2,391 | py | from flask import jsonify, render_template, Blueprint
from apispec import APISpec
from apispec.exceptions import APISpecError
from apispec.ext.marshmallow import MarshmallowPlugin
from apispec_webframeworks.flask import FlaskPlugin
class FlaskRestfulPlugin(FlaskPlugin):
"""Small plugin override to handle flask-restful resources
"""
@staticmethod
def _rule_for_view(view, app=None):
view_funcs = app.view_functions
endpoint = None
for ept, view_func in view_funcs.items():
if hasattr(view_func, "view_class"):
view_func = view_func.view_class
if view_func == view:
endpoint = ept
if not endpoint:
raise APISpecError('Could not find endpoint for view {0}'.format(view))
# WARNING: Assume 1 rule per view function for now
rule = app.url_map._rules_by_endpoint[endpoint][0]
return rule
class APISpecExt:
"""Very simple and small extension to use apispec with this API as a flask extension
"""
def __init__(self, app=None):
self.spec = None
if app is not None:
self.init_app(app)
def init_app(self, app):
app.config.setdefault("APISPEC_TITLE", "restful_api")
app.config.setdefault("APISPEC_VERSION", "1.0.0")
app.config.setdefault("OPENAPI_VERSION", "3.0.2")
app.config.setdefault("SWAGGER_JSON_URL", "/swagger.json")
app.config.setdefault("SWAGGER_UI_URL", "/swagger-ui")
app.config.setdefault("SWAGGER_URL_PREFIX", None)
self.spec = APISpec(
title=app.config["APISPEC_TITLE"],
version=app.config["APISPEC_VERSION"],
openapi_version=app.config["OPENAPI_VERSION"],
plugins=[MarshmallowPlugin(), FlaskRestfulPlugin()],
)
blueprint = Blueprint(
"swagger",
__name__,
template_folder="./templates",
url_prefix=app.config["SWAGGER_URL_PREFIX"],
)
blueprint.add_url_rule(app.config["SWAGGER_JSON_URL"], "swagger_json", self.swagger_json)
blueprint.add_url_rule(app.config["SWAGGER_UI_URL"], "swagger_ui", self.swagger_ui)
app.register_blueprint(blueprint)
def swagger_json(self):
return jsonify(self.spec.to_dict())
def swagger_ui(self):
return render_template("swagger.j2")
| [
"mojjijji@gmail.com"
] | mojjijji@gmail.com |
52fedae218df70fb543cf1dd2cd22af4b39b6488 | 3f53e38076713ab49fd03a54c7c9d3e21de5eb14 | /Pyrado/pyrado/tasks/predefined.py | 50d88df14b227c763e6a5a33ef902b36a254c00d | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | arlene-kuehn/SimuRLacra | 4510473789d1c8927c8d5969a9606238523d5dd7 | 15901f70f0538bce19acdda2a0018984f67cc0fe | refs/heads/master | 2023-01-28T13:10:05.607575 | 2020-12-04T14:47:01 | 2020-12-04T14:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,810 | py | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import numpy as np
import pyrado
from pyrado.spaces.empty import EmptySpace
from pyrado.tasks.desired_state import DesStateTask
from pyrado.tasks.final_reward import FinalRewTask, FinalRewMode
from pyrado.tasks.masked import MaskedTask
from pyrado.tasks.reward_functions import RewFcn, ZeroPerStepRewFcn, AbsErrRewFcn
from pyrado.tasks.utils import proximity_succeeded, never_succeeded
from pyrado.utils.data_types import EnvSpec
def create_goal_dist_task(env_spec: EnvSpec,
ds_index: int,
rew_fcn: RewFcn,
succ_thold: float = 0.01) -> MaskedTask:
# Define the indices for selection. This needs to match the observations' names in RcsPySim.
idcs = [f'GD_DS{ds_index}']
# Get the masked environment specification
spec = EnvSpec(
env_spec.obs_space,
env_spec.act_space,
env_spec.state_space.subspace(
env_spec.state_space.create_mask(idcs)) if env_spec.state_space is not EmptySpace else EmptySpace
)
# Create a desired state task with the goal [0, 0]
dst = DesStateTask(spec, np.zeros(2), rew_fcn, functools.partial(proximity_succeeded, thold_dist=succ_thold))
# Mask selected goal distance
return MaskedTask(env_spec, dst, idcs)
def create_goal_dist_distvel_task(env_spec: EnvSpec,
ds_index: int,
rew_fcn: RewFcn,
succ_thold: float = 0.01) -> MaskedTask:
# Define the indices for selection. This needs to match the observations' names in RcsPySim.
idcs = [f'GD_DS{ds_index}', f'GD_DS{ds_index}d']
# Get the masked environment specification
spec = EnvSpec(
env_spec.obs_space,
env_spec.act_space,
env_spec.state_space.subspace(
env_spec.state_space.create_mask(idcs)) if env_spec.state_space is not EmptySpace else EmptySpace
)
# Create a desired state task with the goal [0, 0]
dst = DesStateTask(spec, np.zeros(2), rew_fcn, functools.partial(proximity_succeeded, thold_dist=succ_thold))
# Mask selected goal distance velocities
return MaskedTask(env_spec, dst, idcs)
def create_check_all_boundaries_task(env_spec: EnvSpec, penalty: float) -> FinalRewTask:
# Check every limit (nut just of a subspace of the state state as it could happen when using a MaskedTask)
return FinalRewTask(
DesStateTask(env_spec, np.zeros(env_spec.state_space.shape), ZeroPerStepRewFcn(), never_succeeded),
FinalRewMode(always_negative=True), factor=penalty
)
def create_task_space_discrepancy_task(env_spec: EnvSpec, rew_fcn: RewFcn) -> MaskedTask:
"""
Create a task which punishes the discrepancy between the actual and the commanded state of the observed body.
The observed body is specified in in the associated experiment configuration file in RcsPySim.
This task only looks at the X and Z coordinates.
:param env_spec: environment specification
:param rew_fcn: reward function
:return: masked task
"""
# Define the indices for selection. This needs to match the observations' names in RcsPySim.
idcs = [idx for idx in env_spec.state_space.labels if 'DiscrepTS' in idx]
if not idcs:
raise pyrado.ValueErr(msg="No state space labels found that contain 'DiscrepTS'")
# Get the masked environment specification
spec = EnvSpec(
env_spec.obs_space,
env_spec.act_space,
env_spec.state_space.subspace(env_spec.state_space.create_mask(idcs))
)
# Create a desired state task (no task space discrepancy is desired and the task never stops because of success)
dst = DesStateTask(spec, np.zeros(spec.state_space.shape), rew_fcn, never_succeeded)
# Mask selected discrepancy observation
return MaskedTask(env_spec, dst, idcs)
def create_collision_task(env_spec: EnvSpec, factor: float) -> MaskedTask:
"""
Create a task which punishes collision costs given a collision model with pairs of bodies.
This task only looks at the instantaneous collision cost.
:param env_spec: environment specification
:param factor: cost / reward function scaling factor
:return: masked task
"""
if not factor >= 0:
raise pyrado.ValueErr(given=factor, ge_constraint='0')
# Define the indices for selection. This needs to match the observations' names in RcsPySim.
idcs = ['CollCost']
# Get the masked environment specification
spec = EnvSpec(
env_spec.obs_space,
env_spec.act_space,
env_spec.state_space.subspace(env_spec.state_space.create_mask(idcs))
)
rew_fcn = AbsErrRewFcn(q=np.array([factor]), r=np.zeros(spec.act_space.shape))
# Create a desired state task (no collision is desired and the task never stops because of success)
dst = DesStateTask(spec, np.zeros(spec.state_space.shape), rew_fcn, never_succeeded)
# Mask selected collision cost observation
return MaskedTask(env_spec, dst, idcs)
| [
"fabio.muratore@famura.net"
] | fabio.muratore@famura.net |
4c85ede7212f8220b8d5663534c28dcba7c46309 | 639b371a36aa7bc346375fb0c63fe4357a7fa928 | /isnobal/lib.py | d320666b29d383f6ff407741928bf5e110c31b16 | [] | no_license | rogerlew/isnobal | baf9dccc19a8c898d6c5eb5c6554e61c0c42b541 | 887e02cb6361e8f35f3cbcb2aaeba35b62a65d67 | refs/heads/master | 2016-09-05T21:47:37.365412 | 2015-01-09T18:03:02 | 2015-01-09T18:03:02 | 28,944,436 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py |
import os
import shutil
import time
def clean(path, isdir=None):
"""
cleans the path
if path is a file it is removed, if path is a
directory the directory tree is removed and the
root directory is recreated
Parameters
----------
path : string
path to be cleaned
isdir : bool
if path does not currently exist and ispath
is true a new directory is created
"""
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
time.sleep(1)
os.mkdir(path)
else:
os.remove(path)
elif isdir:
os.mkdir(path)
def identifyStep(fname):
"""
identifies the simulation step from a filename
Parameters
----------
fname : string
only the basename is used for determining step
Returns
-------
step : int
integer representing the simulation step
"""
basename = os.path.basename(fname)
if 'dem' in basename:
return 0
elif 'mask' in basename:
return 0
elif 'init' in basename:
try:
return int(''.join([a for a in basename if a in '0123456789']))
except:
return 0
try:
return int(''.join([a for a in basename.split('_')[1]
if a in '0123456789']))
except:
try:
return int(basename.split('.')[1])
except:
warnings.warn('Could not identify step for "%s", '
'returning 0'% basename)
return 0
| [
"rogerlew@gmail.com"
] | rogerlew@gmail.com |
f430f08749ef992d69507cf068f4f2d1e99c4b86 | dc7c62f22e5b7da4691d2bdf9a1da2f3ba9edd75 | /sketch_181106c/stack.py | e0a7adc48f20d65c672224ee5626dd4d30872a45 | [] | no_license | xiaohaiguicc/CS5001 | 563c17637f06f0074ccb743db4f0bdd2a326f978 | 51698ba8bfc2201639e6f4d358e0fc531780d2fc | refs/heads/master | 2020-04-06T17:32:55.046301 | 2018-12-20T23:53:05 | 2018-12-20T23:53:05 | 157,664,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | class Stack:
def _init_(self):
self.content = []
def push(self, item):
self.content.append(item)
def pop(self):
if len(self.content) > 0:
return self.content.pop()
| [
"xiaohaiguicc@gmail.com"
] | xiaohaiguicc@gmail.com |
86988fc60904cf6ad10ea91b11b7a4d5313cdc80 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_121/ch118_2020_09_30_02_00_29_265968.py | 81e75271029f2a468b29c741aee70ec9ceea4b9c | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | import math
def reflexao_total_interna(n1, n2, θ2):
θ1=math.degrees(math.asin(math.sin(θ2)*n2/n1))
if sin(θ1)>1:
resultado=True
else:
resultado=False
return resultado | [
"you@example.com"
] | you@example.com |
bf540d38a0dea62068f14ad5ec55fae7d96a6b0e | d6d20681f41102df3feb2b438ef80569bd73730f | /.history/Uge5-pandas/handinExercises_20200311123505.py | f29264f126048debd63967bdff8beb92c1762646 | [] | no_license | MukHansen/pythonAfleveringer | d0ad2629da5ba2b6011c9e92212949e385443789 | 4107c3c378f757733961812dd124efc99623ff2e | refs/heads/master | 2020-12-22T13:27:19.135138 | 2020-05-22T11:35:52 | 2020-05-22T11:35:52 | 236,796,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,354 | py | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
dataA = pd.read_csv('https://api.statbank.dk/v1/data/FOLK1A/CSV?delimiter=Semicolon&Tid=2008K1%2C2020K1&CIVILSTAND=G%2CF', delimiter=';')
dataB1 = pd.read_csv('https://api.statbank.dk/v1/data/FOLK1A/CSV?delimiter=Semicolon&OMR%C3%85DE=*&CIVILSTAND=TOT', delimiter=';')
# dataB = pd.read_csv('https://api.statbank.dk/v1/data/FOLK1A/CSV?delimiter=Semicolon&CIVILSTAND(Head)=U%2CTOT&OMR%C3%85DE=*&Tid=2020K1', delimiter=';')
dataB2 = pd.read_csv('https://api.statbank.dk/v1/data/FOLK1A/CSV?delimiter=Semicolon&OMR%C3%85DE=*&CIVILSTAND=U', delimiter=';')
dataC1 = pd.read_csv('https://api.statbank.dk/v1/data/FOLK1A/CSV?delimiter=Semicolon&OMR%C3%85DE=101&CIVILSTAND=*&Tid=*', delimiter=';')
test = np.genfromtxt('https://api.statbank.dk/v1/data/FOLK1A/CSV?delimiter=Semicolon&OMR%C3%85DE=101&CIVILSTAND=*&Tid=*', delimiter=';', dtype=np.uint, skip_header=1)
pct08 = dataA['INDHOLD'][1] / dataA['INDHOLD'][0] * 100
pct20 = dataA['INDHOLD'][3] / dataA['INDHOLD'][2] * 100
pctchange = pct20 - pct08
print('5.A')
print('------------------------------------------')
print("Divorced procent in '08",pct08)
print("Divorced procent in '20",pct20)
print('Change in procent',pctchange)
print('------------------------------------------')
# 1:6 to skip "hele landet" at index 0
largestCities = dataB1.sort_values('INDHOLD', ascending=False)[1:6]
# ls = largestCities.head(2)
def highestPctOfNeverMarried():
pctList = {}
lst = [1,2,32,50,73]
for number in lst:
area = dataB2['OMRÅDE'][number]
val = dataB2['INDHOLD'][number] / dataB1['INDHOLD'][number] * 100
# print(area, 'Ugifte i procent', val)
pctList.update({area: val})
print('Highest --------------------------- /n', max(zip(pctList.values(), pctList.keys())))
def printMeYo():
# deezMask = (data[:,0] == 2015) & (data[:,2] <= 65) & (data[:,3] == countrycode)
# return np.sum(data[deezMask][:,4])
deezMask = (dataC1[:,1] == 'I alt')
print(np.sum(dataC1[deezMask][:,3]))
# for i in dataC1:
# if dataC1['CIVILSTAND'][i] == 'I alt':
# print('------------------------------------------------ IN FOR LOOOP')
# print('YO!')
# print(dataC1['INDHOLD'][i])
# if dataC1['CIVILSTAND'][idx] == 'I alt':
# print(dataC1['INDHOLD'][idx])
# if dataC1['CIVILSTAND'][i] == 'I alt':
# print(dataC1['INDHOLD'][i]
# def changesInPctPlot():
# yearsToDisp = []
# eastpopulation = []
# westpopulation = []
# for key, value in years.items():
# yearsToDisp.append(key)
# for key, value in east.items():
# eastpopulation.append(value)
# for key, value in west.items():
# westpopulation.append(value)
# plt.plot(yearsToDisp, eastpopulation, linewidth=2)
# plt.plot(yearsToDisp, westpopulation, linewidth=2)
# plt.title("Number of poeple in %", fontsize=18)
# plt.xlabel("Year", fontsize=10)
# plt.xticks(yearsToDisp, rotation=65)
# plt.tick_params(axis='both', labelsize=10)
# plt.show()
regionH = dataB2['INDHOLD'][1] / dataB1['INDHOLD'][1] * 100
# regionM = dataB2['INDHOLD'][73] / dataB1['INDHOLD'][73] * 100
# regionSD = dataB2['INDHOLD'][50] / dataB1['INDHOLD'][50] * 100
# regionS = dataB2['INDHOLD'][32] / dataB1['INDHOLD'][32] * 100
# kbh = dataB2['INDHOLD'][2] / dataB1['INDHOLD'][2] * 100
print('5.B')
# print(largestCities)
# print('------------------------------')
# print('Region H procent',regionH)
highestPctOfNeverMarried()
# print(dataB2.loc[0])
# print(yo)
# print(dataB)
print('5.C')
# print(dataC1)
# printMeYo()
# print(test)
# plt.bar(ages, no_citicens, width=0.5, linewidth=0, align='center') # first plot: danes
# plt.ticklabel_format(useOffset=False)
# # plt.axis([0, max(ages) + 10, 0, 17000])
# title = 'Distribution of CPH Citizens in {}'.format(2015)
# plt.title(title, fontsize=12)
# plt.xlabel("Year", fontsize=10)
# plt.ylabel("Number of poeple in %", fontsize=10)
# plt.tick_params(axis='both', which='major', labelsize=10)
# p1 = plt.bar(ages, no_citicens, width=0.5, linewidth=0, align='center', color='red')
# p2 = plt.bar(ages_f, no_citicens_f, width=0.5, linewidth=0, align='center', color='yellow')
# plt.legend([p1,p2],['danish','foreigners'],loc=1)
# plt.show()
| [
"cph-mh752@cphbusiness.dk"
] | cph-mh752@cphbusiness.dk |
39ef6807088c40eb988f2dd2e8540cb782c446cf | 62dc63713e8c8ce8622c70117595bae857618107 | /BlackFlyCameraClass.py | 152951b5f96ad3d211963330218e880700e8a66b | [] | no_license | LiamDroog/BaldrControlSuite | ad7544d5e92b5e27537e7f20c7cf3ddc78b36769 | 2ca76c4c97c334b6bd5924b00cbcb8e6f687f495 | refs/heads/master | 2023-07-04T07:57:46.115829 | 2021-08-03T17:36:49 | 2021-08-03T17:36:49 | 371,804,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,708 | py | import os
import matplotlib.pyplot as plt
import numpy as np
import time
from SimpleFLIR import Camera, GetBFSCameras
class CameraNotConnected(Exception):
pass
class ListCameras:
def __init__(self):
self.camlist = GetBFSCameras().getCameras()
def get(self):
return self.camlist
class RunBlackFlyCamera:
# Todo: loop upon instantiation to get commands from queue (as input when creating)
# Handle the inputs for shutdown
# Handle how to implement hardware trigger
# Move this to new file bc it'll get specialized quick
"""
Per-camera instance of this is required. Deals with a hardware trigger (currently software as no hardware triggers
have been configured) and writes data to specified file directory so that the file daemon can transfer it to the
queue for writing to shot hdf5 file
"""
def __init__(self, camserial, filenum):
"""
Initalizes camera from input serial number and starting filename
:param camserial: Camera's serial number
:param filenum: number to start numbering files at
"""
try:
self.cam = Camera(camserial)
except:
raise CameraNotConnected('Camera ' + camserial + ' not connected.')
self.camserial = camserial
self.camName = self.cam.getDeviceName()
self.filenum = filenum
self.datafilename = self.camserial + '_shotdata_' + '0' * (4 - len(str(self.filenum))) + str(filenum) + '.npy'
# self.metadatafilename = self.camserial + '_shotmetadata_' + '0' * (4 - len(str(self.filenum))) + str(
# filenum) + '.npy'
# set file directory
self.filepath = 'TempDataFiles/' + self.camserial + '/'
if not os.path.exists(self.filepath):
os.makedirs(self.filepath)
# initialize camera
self.cam.init()
# todo: run trigger watch loop here?
def adjust(self, target, value):
self.cam.setattr(target, value)
def get(self, attr):
return self.cam.getDeviceParams(attr)
def wait_for_trigger(self):
pass
def handleTrigger(self):
self.filenum += 1
self.__saveData(self.cam.get_array())
def get_image_array(self):
return self.cam.get_array()
def __getShotMetadata(self):
return self.cam.getDeviceParams()
def __saveData(self, data):
self.datafilename = self.camserial + '_shotdata_' + '0' * (4 - len(str(self.filenum))) + str(
self.filenum) + '.npy'
returndict = {}
returndict['diagnosticname'] = self.camName + ' Serial: ' + self.camserial
returndict['targetfile'] = self.filenum
returndict['data'] = data
returndict['metadata'] = self.__getShotMetadata()
np.save(self.filepath + self.datafilename, returndict)
print('Saved image ' + self.datafilename)
def start(self):
self.cam.start()
def stop(self):
self.cam.stop()
def close(self):
self.cam.close()
def printInfo(self):
self.cam.document()
def liveView(self):
self.isLiveOut = True
self.cam.configliveout()
self.cam.start()
fig = plt.figure(1)
fig.canvas.mpl_connect('close_event', self.__closeLiveView)
while self.isLiveOut:
image = self.cam.get_array()
plt.imshow(image, cmap='bone')
plt.colorbar()
plt.pause(0.001)
plt.clf()
def __closeLiveView(self, event):
self.isLiveOut = False
if __name__ == '__main__':
camera = RunBlackFlyCamera('19129388', 1)
#camera.start()
#camera.stop()
camera.close()
| [
"droog@ualberta.ca"
] | droog@ualberta.ca |
1802e16b7d00226435e6164b100705ff00463f91 | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/interactions/utils/destruction_liability.py | 3cfd2f7aae1a2fe038831b0aeb4ff3d4fc657c5b | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\interactions\utils\destruction_liability.py
# Compiled at: 2020-06-20 03:55:42
# Size of source mod 2**32: 1064 bytes
from interactions.liability import SharedLiability
DELETE_OBJECT_LIABILITY = 'DeleteObjectLiability'
class DeleteObjectLiability(SharedLiability):
def __init__(self, obj_list, source_liability=None):
super().__init__(source_liability=source_liability)
self._delete_objects = obj_list
def shared_release(self):
for obj in self._delete_objects:
obj.schedule_destroy_asap()
self._delete_objects.clear()
def merge(self, interaction, key, new_liability):
new_liability._delete_objects.extend(self._delete_objects)
return new_liability
def create_new_liability(self, interaction):
return self.__class__((self._delete_objects), source_liability=self) | [
"cristina.caballero2406@gmail.com"
] | cristina.caballero2406@gmail.com |
cada85a3a534bd5162de90bc54c1f74691921397 | 215fd5c4f9893d9f38e4e48199ea16d7d6ef9430 | /2.Binary_Search/2.10_62_Search_In_Rotated_Sorted_Array.py | f96b0cbc87e0ec031279dc7c1b691c17e7d12dcc | [] | no_license | fztest/Classified | fd01622c097ca21b2e20285b06997ff0e9792dd1 | b046d94657c0d04f3803ca15437dfe9a6f6f3252 | refs/heads/master | 2020-03-25T06:34:07.885108 | 2017-05-04T17:22:36 | 2017-05-04T17:22:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,606 | py | """
Description
_____________
Suppose a sorted array is rotated at some pivot unknown to you beforehand.
(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
You are given a target value to search. If found in the array return its index, otherwise return -1.
You may assume no duplicate exists in the array.
Example
____________
For [4, 5, 1, 2, 3] and target=1, return 2.
For [4, 5, 1, 2, 3] and target=0, return -1.
Approach
____________
Binary Search
++++++++++++++
Constraint: A[mid] == target
Goal: find target
Strategy: The goal is to ditch half of the target each iteration
There are five situations we need to perform correspondingly(actions might be
duplicative but nice to distinquish between them)
1. if find the target return it
if A[mid] == target:
return mid
2. mid land at index 3 of [9,1,2,3,4,5,6]
elif A[mid] < A[end]:
now there are two sections, at right, it's a normal sorted array, at left
it's a rotated sorted array again. We can both essentially recursively handle
them and ditch the other half after knowing where target lands
2(a). target is in the right section (sorted array)
if target >= A[mid] and target <= A[end]:
start = mid
2(b). target is in the left section (rotated array)
else:
end = mid
3. mid land at index 3 of [3,4,5,6,7,1,2]
else:
Now similarly, there are two sections
3(b). left sections of sorted array
if target >= A[start] and target <= A[mid]:
end = mid
3(c). right section of rotated arrays
else:
start = mid
Complexity
______________
Time - O(Lg(N))
Space - O(1)
"""
class Solution:
"""
@param A : a list of integers
@param target : an integer to be searched
@return : an integer
"""
def search(self, A, target):
# write your code here
if A is None or len(A) == 0 or target is None:
return -1
start, end = 0, len(A) - 1
while start + 1 < end:
mid = start + (end - start) / 2
if A[mid] == target:
return mid
elif A[mid] < A[end]:
if target >= A[mid] and target <= A[end]:
start = mid
else:
end = mid
else:
if target >= A[start] and target <= A[mid]:
end = mid
else:
start = mid
if A[start] == target:
return start
if A[end] == target:
return end
return -1
| [
"cdzengpeiyun@gmail.com"
] | cdzengpeiyun@gmail.com |
56a0f90fa491c88e80bdabddde8c0dfdbfd2f47c | 5ea53027a9353f70e6a54f1211521bacbd5a46e2 | /src/goodtoknow/views.py | e1565eb86038e94fe2ef8761b992c07db748e1b8 | [] | no_license | Covee/Pillme_django | 380d4f696e503ed4f9278c44770d1840f66ec10b | 7d00581af09ae2ebe6610600083b11ab7ed29540 | refs/heads/master | 2022-12-16T05:50:49.685119 | 2018-07-25T11:22:58 | 2018-07-25T11:22:58 | 135,764,707 | 0 | 0 | null | 2022-11-22T02:30:45 | 2018-06-01T21:24:26 | Python | UTF-8 | Python | false | false | 1,242 | py | from django.shortcuts import render, get_object_or_404
from hitcount.views import HitCountDetailView
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.http import HttpResponse
from django.views.generic import DetailView, ListView
from .models import gPost
import json
class gPostListView(ListView):
model = gPost
template_name = 'goodtoknow/post_list.html'
class gPostCountHitDetailView(HitCountDetailView):
model = gPost # your model goes here
count_hit = True # set to True if you want it to try and count the hit
# class gPostDetailView(DetailView):
# model = gPost
# template_name = 'goodtoknow/gpost_detail.html'
@login_required
@require_POST # POST method만 받음
def gpost_like(request):
pk = request.POST.get('pk', None)
gpost = get_object_or_404(gPost, pk=pk)
gpost_like, gpost_like_created = gpost.likegpost_set.get_or_create(user=request.user)
if not gpost_like_created:
gpost_like.delete()
message = "좋아요 취소"
else:
message = "좋아요"
context = {
'like_count': gpost.like_count,
'message': message,
'username': request.user.username
}
return HttpResponse(json.dumps(context))
| [
"uniqtop91@gmail.com"
] | uniqtop91@gmail.com |
299de9361c771a3ef2f202cfcdc387d919e1fb73 | 3e24611b7315b5ad588b2128570f1341b9c968e8 | /pacbiolib/thirdparty/pythonpkgs/networkx/networkx_1.9.1/lib/python2.7/site-packages/networkx/algorithms/tests/test_euler.py | c9936ea9a316115e662048c757e53837be943115 | [
"BSD-2-Clause"
] | permissive | bioCKO/lpp_Script | dc327be88c7d12243e25557f7da68d963917aa90 | 0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2 | refs/heads/master | 2022-02-27T12:35:05.979231 | 2019-08-27T05:56:33 | 2019-08-27T05:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,961 | py | #! python
# run with nose: nosetests -v test_euler.py
from nose.tools import *
import networkx as nx
from networkx import is_eulerian,eulerian_circuit
class TestEuler:
def test_is_eulerian(self):
assert_true(is_eulerian(nx.complete_graph(5)))
assert_true(is_eulerian(nx.complete_graph(7)))
assert_true(is_eulerian(nx.hypercube_graph(4)))
assert_true(is_eulerian(nx.hypercube_graph(6)))
assert_false(is_eulerian(nx.complete_graph(4)))
assert_false(is_eulerian(nx.complete_graph(6)))
assert_false(is_eulerian(nx.hypercube_graph(3)))
assert_false(is_eulerian(nx.hypercube_graph(5)))
assert_false(is_eulerian(nx.petersen_graph()))
assert_false(is_eulerian(nx.path_graph(4)))
def test_is_eulerian2(self):
# not connected
G = nx.Graph()
G.add_nodes_from([1,2,3])
assert_false(is_eulerian(G))
# not strongly connected
G = nx.DiGraph()
G.add_nodes_from([1,2,3])
assert_false(is_eulerian(G))
G = nx.MultiDiGraph()
G.add_edge(1,2)
G.add_edge(2,3)
G.add_edge(2,3)
G.add_edge(3,1)
assert_false(is_eulerian(G))
def test_eulerian_circuit_cycle(self):
G=nx.cycle_graph(4)
edges=list(eulerian_circuit(G,source=0))
nodes=[u for u,v in edges]
assert_equal(nodes,[0,3,2,1])
assert_equal(edges,[(0,3),(3,2),(2,1),(1,0)])
edges=list(eulerian_circuit(G,source=1))
nodes=[u for u,v in edges]
assert_equal(nodes,[1,2,3,0])
assert_equal(edges,[(1,2),(2,3),(3,0),(0,1)])
G=nx.complete_graph(3)
edges=list(eulerian_circuit(G,source=0))
nodes=[u for u,v in edges]
assert_equal(nodes,[0,2,1])
assert_equal(edges,[(0,2),(2,1),(1,0)])
edges=list(eulerian_circuit(G,source=1))
nodes=[u for u,v in edges]
assert_equal(nodes,[1,2,0])
assert_equal(edges,[(1,2),(2,0),(0,1)])
def test_eulerian_circuit_digraph(self):
G=nx.DiGraph()
G.add_cycle([0,1,2,3])
edges=list(eulerian_circuit(G,source=0))
nodes=[u for u,v in edges]
assert_equal(nodes,[0,1,2,3])
assert_equal(edges,[(0,1),(1,2),(2,3),(3,0)])
edges=list(eulerian_circuit(G,source=1))
nodes=[u for u,v in edges]
assert_equal(nodes,[1,2,3,0])
assert_equal(edges,[(1,2),(2,3),(3,0),(0,1)])
def test_eulerian_circuit_multigraph(self):
G=nx.MultiGraph()
G.add_cycle([0,1,2,3])
G.add_edge(1,2)
G.add_edge(1,2)
edges=list(eulerian_circuit(G,source=0))
nodes=[u for u,v in edges]
assert_equal(nodes,[0,3,2,1,2,1])
assert_equal(edges,[(0,3),(3,2),(2,1),(1,2),(2,1),(1,0)])
@raises(nx.NetworkXError)
def test_not_eulerian(self):
f=list(eulerian_circuit(nx.complete_graph(4)))
| [
"409511038@qq.com"
] | 409511038@qq.com |
a293bd444aa2724b6df8d537890f9990a47b15c1 | 139af68b78734a6bc53bd942ffa05476baf3d71d | /Python Fundamentals 2020 - 2021/MID - Exams/01. Counter Strike.py | 2d8040229eed4fc397fcd553deec104baade8d55 | [] | no_license | MiroVatov/Python-SoftUni | 7fe3fc0a3928848c5317fb120f789c773bfc117e | 0d0d6f116281b4de8c413d254386e27d992d047b | refs/heads/main | 2023-08-24T09:44:31.261137 | 2021-10-18T14:04:03 | 2021-10-18T14:04:03 | 317,510,574 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | battles_won = 0
initial_energy = int(input())
while True:
distance = input()
if distance == "End of battle":
print(f"Won battles: {battles_won}. Energy left: {initial_energy}")
break
energy = int(distance)
if initial_energy < energy:
print(f"Not enough energy! Game ends with {battles_won} won battles and {initial_energy} energy")
break
else:
battles_won += 1
initial_energy -= energy
if battles_won % 3 == 0:
initial_energy += battles_won
| [
"noreply@github.com"
] | MiroVatov.noreply@github.com |
a7a9b0d01df39a7334dbb96bcabf78d7796a7cde | 3e1f833a1362fde69ea5deb5636b6600c3734a54 | /beutifullmatrix.py | d598c31e05a02807cf2b5e7c441112a5ce392476 | [] | no_license | pawan-1997/portfolio | 75549baf691189ec4230e6b8e3b5d6a2061f170f | 955a4daa312b11d45b91347cfc19d82142ee5906 | refs/heads/main | 2022-12-28T20:23:52.233594 | 2020-10-03T16:21:50 | 2020-10-03T16:21:50 | 300,917,042 | 0 | 1 | null | 2020-10-03T16:21:52 | 2020-10-03T15:48:06 | JavaScript | UTF-8 | Python | false | false | 1,136 | py | matrix = []
for i in range(5):
m = input()
matrix.append(m)
index_i = 1
pos_i = 0
pos_y = 0
for i in matrix:
if("1" in i):
pos_i = index_i
# print(pos_i)
# print(i.find("1"))
pos_y = i.find("1")
# print(pos_y)
index_i = index_i + 1
# print(pos_i)
moves = 0
def calc_i():
if(pos_i == 1 or pos_i == 5):
# print("In 1 5 for x")
return 2
elif(pos_i == 2 or pos_i == 4):
# print("In 2 4 for x")
return 1
elif(pos_i == 3):
# print("In 3 for x")
return 0
else:
# print("In else for x")
return 0
def calc_y():
if(pos_y == 0 or pos_y == 8):
# print("In 0 8 for y")
return 2
elif(pos_y == 2 or pos_y == 6):
# print("In 2 6 for y")
return 1
elif(pos_y == 4):
# print("In 4 for y")
return 0
else:
# print("In else for y")
pass
moves_i = calc_i()
moves_j = calc_y()
# print(moves_i)
# print(moves_j)
moves = moves_i + moves_j
print(moves)
# print(matrix)
| [
"rcashok0@gmail.com"
] | rcashok0@gmail.com |
eabf5a12f02c113b17e02b3868d4257e2b22e4d9 | 9e271a3bc1bf388d82bc5a01d275d910c00f315c | /user/migrations/0016_history.py | db099076f07e14f65b384b61c7664c39e3115ac3 | [
"MIT"
] | permissive | kthaisociety/website | 36f11b704f9c38414e0999b55db4513444b53f9e | 4c4efb8a93218ae128d203b15c4340f90fe9f6a6 | refs/heads/master | 2023-08-09T19:44:16.968356 | 2023-05-20T20:33:05 | 2023-05-20T20:33:05 | 218,593,606 | 2 | 3 | MIT | 2023-05-20T20:33:06 | 2019-10-30T18:17:10 | Python | UTF-8 | Python | false | false | 998 | py | # Generated by Django 2.2.18 on 2021-03-28 21:49
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [("user", "0015_user_slack_picture_hash")]
operations = [
migrations.CreateModel(
name="History",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("title", models.CharField(max_length=255)),
("body", models.TextField(max_length=5000)),
("time", models.DateField()),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
],
options={"verbose_name_plural": "histories"},
)
]
| [
"oriol.closa@est.fib.upc.edu"
] | oriol.closa@est.fib.upc.edu |
b683462d7673efeb1ba027e98d544e1841f91e03 | 5b70fbd53b534306c146ffb98a0f99d2343a948f | /src/Python/Problem74.py | 341431d1b4675c9eded21d48205b99cf477cd670 | [] | no_license | aniruddhamurali/Project-Euler | 1f4ff3aa1e9c4efbc2a85026821e19a28b5edf90 | 408b3098fbc98ff3954679602c0468ddb56ea0ac | refs/heads/master | 2020-03-20T23:07:22.178103 | 2018-07-27T01:40:46 | 2018-07-27T01:40:46 | 137,830,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | import math
import time
def digitFactorialSum(n):
total = 0
for digit in str(n):
total += math.factorial(int(digit))
return total
def main():
start = time.time()
count = 0
for i in range(0,1000000):
chain = set()
n = i
while i not in chain:
nSum = digitFactorialSum(n)
if nSum in chain:
break
chain.add(nSum)
n = nSum
chain.add(i)
if len(chain) == 60:
count += 1
#print(chain)
print(time.time()-start)
return count
# Answer: 402
| [
"aniruddha.murali@gmail.com"
] | aniruddha.murali@gmail.com |
bc4f9b46cbfb936585c30642734af6d52f04b823 | 7cf00f09f9a46175a08993196da0db7b3a48a992 | /arrays/is_monotonic.py | 90fc581517fcf3a0346b47cf67b74f76d3070754 | [] | no_license | newsteinking/algorithms3 | 4fdde66f2f40ce53346752173493265391307ccd | c090674898d97fc2564ac688dc2347a5d0c33dfb | refs/heads/master | 2022-04-21T10:02:47.091503 | 2020-04-21T06:32:07 | 2020-04-21T06:32:07 | 257,497,952 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | '''
An array is monotonic if it is either monotone increasing or monotone decreasing.
An array A is monotone increasing if for all i <= j, A[i] <= A[j]. An array A is monotone
decreasing if for all i <= j, A[i] >= A[j].
Return true if and only if the given array A is monotonic.
Example 1:
Input: [1,2,2,3]
Output: true
Example 2:
Input: [6,5,4,4]
Output: true
Example 3:
Input: [1,3,2]
Output: false
Example 4:
Input: [1,2,4,5]
Output: true
Example 5:
Input: [1,1,1]
Output: true
'''
# def monotonic_array(A):
# if len(A) == 1: return True
# down, up = False, False
#
# for i in range(len(A) - 1):
# if A[i] > A[i + 1]:
# if up: return False
# down = True
# elif A[i] < A[i + 1]:
# if down: return False
# up = True
#
# return False if up and down else True
def is_monotonic(A):
if len(A) == 1: return True
down, up = False, False
for i in range(len(A) - 1):
if A[i] > A[i + 1]:
if up: return False
down = True
elif A[i] < A[i + 1]:
if down: return False
up = True
return False if up and down else True
| [
"newstein33@gmail.com"
] | newstein33@gmail.com |
c8d40e1b41a5aa6ba1ecbbad3fd6ded3ce1e72cb | bd08d0532f20b7285b437c9bf620de1bbcd5b9ea | /aalh_iit_natreghis_001/debug-convert-dates.py | 01dd4897c756affd40535d0c2248df0dd5aaf371 | [
"Unlicense"
] | permissive | johndewees/iitmigration | a9e8a31ba6ceb541ce12c22fd612596cc243dbca | 4dadfbecda719d6e7d60af076a231aedec3c862f | refs/heads/main | 2023-03-14T17:06:58.777683 | 2021-03-27T20:44:58 | 2021-03-27T20:44:58 | 320,086,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,850 | py | from openpyxl import load_workbook
import re
filename = 'aalh_iit_natreghis_001.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 31
maximumcol = 31
minimumrow = 7
maximumrow = 194
iterationrow = 7
targetcol = 31
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
testvar = ws.cell(row=iterationrow, column=targetcol).value
#print(testvar)
if testvar == None:
continue
#print('No Date Digitized')
elif testvar.find('/') != -1:
testvar2 = re.search('\d\d\d\d\/\d\d\/\d\d', testvar)
if testvar2:
testvar3 = testvar2[0]
testvar3 = testvar3.replace('/','-')
ws.cell(row=iterationrow, column=targetcol).value = testvar3
print(iterationrow,'|',testvar,'|',ws.cell(row=iterationrow, column=targetcol).value)
else:
testvarlist = testvar.split('/')
testvaryear = testvarlist[2]
testvaryear = testvaryear.strip()
testvarmonth = testvarlist[0]
testvarmonth = testvarmonth.strip()
testvarmonth = int(testvarmonth)
if testvarmonth < 10:
testvarmonth = str(testvarmonth)
testvarmonth = '0' + testvarmonth
else:
testvarmonth = str(testvarmonth)
testvarday = testvarlist[1]
testvarday = testvarday.strip()
testvarday = int(testvarday)
if testvarday < 10:
testvarday = str(testvarday)
testvarday = '0' + testvarday
else:
testvarday = str(testvarday)
isodate = testvaryear + '-' + testvarmonth + '-' + testvarday
ws.cell(row=iterationrow, column=targetcol).value = isodate
print(iterationrow,'|',testvar,'|',ws.cell(row=iterationrow, column=targetcol).value)
else:
continue
#print('Date is already formatted correctly')
for cell in row:
testvar2 = ws.cell(row=iterationrow, column=targetcol).value
if testvar2 == None:
continue
#print('Still No Date Digitized')
elif testvar2.find('-') != -1:
length = len(testvar2)
if length > 10:
print('***CHECK THIS LINE FOR INCORRECT FORMATTING***')
elif length < 10:
print('***CHECK THIS LINE FOR INCORRECT FORMATTING***')
#else:
#print('Date is correctly formatted')
iterationrow = iterationrow + 1
wb.save('aalh_iit_natreghis_001.xlsx') | [
"noreply@github.com"
] | johndewees.noreply@github.com |
d8dcc43724528dd231ed378820c811ee17da5ad7 | 67325192c1e528a39d457f11e61b480d68826708 | /__main__.py | 82f897e592bf6b74763104fad108b9230f332b9a | [
"MIT"
] | permissive | vashistaarav1611/mcpython-a-minecraft-clone-in-python | 5851b377b54fd2b28c106112c7b18f397b71ab50 | c16cd66f319efdeec4130e1a43f5a857caf1ea13 | refs/heads/master | 2023-02-01T22:48:51.787106 | 2020-12-21T15:02:25 | 2020-12-21T15:02:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | print("MCPYTHON ModLoader")
import sys, os, globals as G
dir = sys.argv[1] if len(sys.argv) > 1 else "./"
G.local = dir
sys.argv.append(os.path.dirname(os.path.realpath(__file__)))
import ModLoader
ModLoader.load(dir)
| [
"baulukas1301@googlemail.com"
] | baulukas1301@googlemail.com |
60b231062ee9be5296c037658e8fe6e337909004 | 43bac293a3ee710140f3869937ef4b37345bac2a | /ex37.py | f6dfdccbd4c692a1335b88844333df307e5eb468 | [] | no_license | DikranHachikyan/python-PLDA-20191011 | 4e176f7eaa65627a4670acd75f470016bfed4f8e | 80cc4740039fcc473cdf436499b0c602a9ab48e0 | refs/heads/master | 2020-08-11T15:54:02.413645 | 2019-11-05T09:39:17 | 2019-11-05T09:39:17 | 214,591,265 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | #!/home/wizard/anaconda3/bin/python
# import time
from time import time,sleep
def foo(sleep_time=0.3):
'''Function foo sleeps sleep_time seconds'''
sleep(sleep_time)
def measure(func):
def wrapper(*args,**kwargs):
t = time()
func(*args,**kwargs)
print(f'{func.__name__} : {time() - t:.4f}')
print(f'{func.__doc__}')
return wrapper
f = measure(foo)
f(0.5)
print(f'{f.__name__}:{f.__doc__}')
| [
"dhachikian@expert-bg.org"
] | dhachikian@expert-bg.org |
c39386cd8e78c8663735bc64da95f42972ef91f9 | 1033906372e48d2f53b907848b86dec2eab635f4 | /old/midify.py | dc63760c0b5b28e1d2f23765396f8c3eee5b2a8c | [] | no_license | csv/ddpy | b0a29fbc094f728b8cbfafa4d5a301c7a35006c8 | 2c1fccdff907f0504b5f514cfd67199a2e77514e | refs/heads/master | 2016-09-05T17:44:25.506356 | 2013-11-08T18:57:25 | 2013-11-08T18:57:25 | 12,258,676 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | from midiutil.MidiFileGenerator import MidiFileGenerator, MidiTrack
from midiutil.Scales import *
from math import ceil
from random import sample
import json
from defaults import note_lookup, root_lookup
def note_to_midi(n):
if isinstance(n, basestring):
return note_lookup[n]
elif isinstance(n, int):
return n
def root_to_midi(n):
if isinstance(n, basestring):
return root_lookup[n]
elif isinstance(n, int):
return n
def bpm_time(bpm=120, count=0.25):
onebar = float((60.0/float(bpm))*4.0)
return onebar*float(count)
def scale_vec(vec, low, high):
# extract min and max info
min_vec = min(vec)
max_vec = max(vec)
# scale
return [(int(ceil(v - min_vec)) * (high-low) / (max_vec - min_vec)) for v in vec]
def midify(
vec,
out_file,
key = "C",
scale=MAJOR,
bpm=120,
count=0.25,
channel=1,
min_note="C-1",
max_note="G9"
):
# transform keys and min/max notes
key = root_to_midi(key)
min_note = note_to_midi(min_note)
max_note = note_to_midi(max_note)
# select notes
notes = build_scale(key, scale, min_note, max_note)
# scale notes
note_indexes = scale_vec(vec, low=0, high=(len(notes)-1))
# determinte note length
beat = bpm_time(bpm, count)
# generate midi file
m = MidiFileGenerator()
track = MidiTrack(channel=channel, tempo=bpm)
t = 0
for i in note_indexes:
n = notes[i]
track.add_note(time=t, duration=beat, note=n, velocity=100)
t += beat
m.tracks.append(track)
m.writeToFile(out_file)
if __name__ == '__main__':
vec = sample(range(1,10000), 32)
midify(vec, bpm=130, count= 0.125, out_file="random.mid", scale=CHROMATIC, min_note="C2", max_note="D#3")
vec = sample(range(1,10000), 32)
midify(vec, bpm=130, count= 0.125, out_file="bass.mid", key = "E", scale=MAJOR, min_note="E2", max_note="G#4")
vec = sample(range(1,10000), 32)
midify(vec, bpm=130, count= 0.125, out_file="arp.mid", key = "E", scale=MAJOR, min_note="B5", max_note="G#7") | [
"brianabelson@gmail.com"
] | brianabelson@gmail.com |
803e427fe7e8e3ed41280faf81f3782e5db35337 | f0a624fc75db12a105e096391d354d5847a7afa5 | /day08/demo03.py | 4998c37c3149e7644e42bac91846e763e07c3c91 | [] | no_license | Linkin-1995/test_code1 | e50399e929bdf23ac7b82f54dd9ff63a64223d6a | 7d0b41516751538a967aa5d42161195ac49fc842 | refs/heads/master | 2022-12-06T18:47:06.858474 | 2020-08-21T10:40:31 | 2020-08-21T10:40:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | """
函数 - 功能
参数: 用法 向 做法 传递的信息
制作
def 函数名称(变量1,变量2):
函数体
使用
函数名(数据1,数据2)
练习:exercise01~03
"""
# 做法
# 形式参数:表面的数据(真实数据的代表)
def attack(count):
for __ in range(count):
print("摆拳")
print("临门一脚")
print("勾拳")
# 实际参数:具有真实数据(客观存在)
# 用法 10次
attack(10) # 调试如果希望审查函数体代码,按F7
attack(3)
| [
"1105377455@qq.com"
] | 1105377455@qq.com |
49ac1ff9278d3e2219128273a10b912b104b6472 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/Lambda-Resource-Static-Assets/2-resources/_External-learning-resources/02-pyth/algorithms-master/algorithms/backtrack/letter_combination.py | 5bece7303612a8a562505e1618ed24a503bc5002 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 791 | py | """
Given a digit string, return all possible letter
combinations that the number could represent.
A mapping of digit to letters (just like on the telephone buttons) is given below:
2: "abc"
3: "def"
4: "ghi"
5: "jkl"
6: "mno"
7: "pqrs"
8: "tuv"
9: "wxyz"
Input:Digit string "23"
Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
"""
def letter_combinations(digits):
if digits == "":
return []
kmaps = {
"2": "abc",
"3": "def",
"4": "ghi",
"5": "jkl",
"6": "mno",
"7": "pqrs",
"8": "tuv",
"9": "wxyz",
}
ans = [""]
for num in digits:
tmp = []
for an in ans:
for char in kmaps[num]:
tmp.append(an + char)
ans = tmp
return ans
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
a70fa1ef04a8545a6a13b33878d492afa39584b7 | 5b3bd326998606188b45a7870852643eda024a97 | /utils/dataset_util_test.py | 7b293e9d4759ba5e20c82eb256f1963d613d98af | [] | no_license | KuznetsovIllya/clearml_od_toy | 31556d0726d15a054c1c18317c361d97801381a4 | 92f15f04a023d4e0e165a250fddc3129144913d0 | refs/heads/main | 2023-04-11T05:55:56.248478 | 2021-04-14T15:59:40 | 2021-04-14T15:59:40 | 357,827,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:5b987aa43023f49104601b5e44e6096dd219539d07712d377a93b0716555ebfc
size 1416
| [
"illia.kuznietsov@modern-expo.com"
] | illia.kuznietsov@modern-expo.com |
ad84bdb8f2de5e3741f4d7ffefeadb7518f4c055 | 48894ae68f0234e263d325470178d67ab313c73e | /sa/profiles/Dell/Powerconnect55xx/get_config.py | cb916fc0aa0de2175f60138f19dba501f00cb408 | [
"BSD-3-Clause"
] | permissive | DreamerDDL/noc | 7f949f55bb2c02c15ac2cc46bc62d957aee43a86 | 2ab0ab7718bb7116da2c3953efd466757e11d9ce | refs/heads/master | 2021-05-10T18:22:53.678588 | 2015-06-29T12:28:20 | 2015-06-29T12:28:20 | 118,628,133 | 0 | 0 | null | 2018-01-23T15:19:51 | 2018-01-23T15:19:51 | null | UTF-8 | Python | false | false | 671 | py | # -*- coding: utf-8 -*-
##----------------------------------------------------------------------
## Dell.Powerconnect55xx.get_config
##----------------------------------------------------------------------
## Copyright (C) 2007-2013 The NOC Project
## See LICENSE for details
##----------------------------------------------------------------------
## NOC modules
from noc.sa.script import Script as NOCScript
from noc.sa.interfaces import IGetConfig
class Script(NOCScript):
name = "Dell.Powerconnect55xx.get_config"
implements = [IGetConfig]
def execute(self):
config = self.cli("show running-config")
return self.cleaned_config(config)
| [
"dmitryluhtionov@gmail.com"
] | dmitryluhtionov@gmail.com |
e0060a932e35c03b8ddfa5590407f24a3b89b43b | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/compareVersions_20200909132443.py | e3af193a67a236c06299befe3c97f2948195dae3 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | def compare(version1,version2):
# split where there are ,
# then loop through both of them
# if v1 > v2 return 1
# if v1 < v2 return -1
# otherwise return 0
v1 = version1.split(".")
v2 = version2.split(".")
v1 = [int(i) for i in v1]
v2= [int(i) for i in v2]
if len(v1) > len(v2):
length = len(v1)
else:
length = len(v2)
for i in range(length):
if v1[i] > v2[i]:
return 1
elif v1[i] < v2[i]:
return -1
for a,b in zip(v1,v2):
print('a',a,'b',b)
if a > b or (a is not None and b is None):
return 1
elif a < b or (b is not None and a is None):
return -1
return 0
print(compare("1","1.1"))
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
ddfca99d193430dadb415a4fc865f00805199317 | 464850ba426263b17084fc71363ca14b8278b15e | /08.py | 44f4c2b9d406b330216766b8bf809b7a7f07f5da | [] | no_license | eng-arvind/python | 8442c30ec10f979f913b354458b4f910539d8728 | 249f5f35f245a3f1742b10310de37ca6c6023af2 | refs/heads/master | 2020-12-23T06:40:16.911269 | 2020-02-02T18:42:01 | 2020-02-02T18:42:01 | 237,069,973 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | def pat(n):
k = 1
for i in range(n):
for j in range(i + 1):
print(k, end=" ")
k += 1
print()
pat(5)
| [
"noreply@github.com"
] | eng-arvind.noreply@github.com |
54f0a4e994fa3dc317186e28957003463120f74a | fc2b8c646223acd17819d631ce57146cd1725456 | /collect_dataset/main_collect.py | 9482d6222d69a652ec7a04fa6d5909b487bf616f | [] | no_license | peachman05/RGB_action_recognition | bd4ac4a60097ac70795b0a9cbdf6d332cd85d764 | 21b47f1d1c0f1712d0dc22bb57c52db3c31b47ed | refs/heads/master | 2020-08-31T13:35:38.699161 | 2020-04-12T09:11:45 | 2020-04-12T09:11:45 | 218,701,781 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | from collect_dataset import run_collect
# Define
run_time = 60 # second
action_select = 3 # 0=dribble, 1=shoot, 2=pass, 3=stand
path_dataset = 'F:\\Master Project\\Dataset\\BasketBall-RGB\\' # folder path
show_FPS = False
action_list = ['dribble','shoot','pass','stand']
action = action_list[action_select]
path_save = path_dataset +'\\'+action+'\\'+action
run_collect(path_save, run_time, show_FPS)
print("finish main")
| [
"you@example.com"
] | you@example.com |
ab25a0d7423a94f6e815e35c92274f70bf90ad71 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2887/60717/245850.py | ee87114ecb204b742c07e205a3999ba39009bdb5 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | n=int(input())
fLive=0
fDead=0
sLive=0
sDead=0
for i in range(0,n):
list2=input().split()
for j in range(0,3):
list2[j]=int(list2[j])
if list2[0]==1:
fLive+=list2[1]
fDead+=list2[2]
else:
sLive+=list2[1]
sDead+=list2[2]
if fLive>=fDead:
print('LIVE')
else:
print('DEAD')
if sLive>=sDead:
print('LIVE')
else:
print('DEAD')
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
f7c6f5795fda746e447912d435cc4d8d0ee17c71 | fa8dc42cfcf99de58f76807accc5c3566ddae6e4 | /tests/test_verify.py | 408a92f3380584be91aae4234fe172385484b523 | [
"MIT"
] | permissive | thusoy/porridge | 1124cc99cd77f672e6fec5e3d87396c72938a944 | f332b67f29bcbc19b7bb7da2f68ad3af35a9cd4d | refs/heads/master | 2021-01-23T03:53:04.352136 | 2018-05-17T16:28:41 | 2018-05-17T16:28:41 | 86,129,217 | 19 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,526 | py | # coding: utf-8
from __future__ import unicode_literals
import pytest
from hypothesis import given, assume
from hypothesis.strategies import integers, text
from porridge import Porridge, MissingKeyError, EncodedPasswordError
from porridge.utils import ensure_bytes
@pytest.mark.parametrize('test_password', (
"pässword".encode("latin-1"),
"pässword",
))
def test_verify(test_password):
"""
Verification works with unicode and bytes.
"""
porridge = Porridge('keyid1:key1', encoding='latin1')
encoded = ( # handrolled test vector lifted from argon2_cffi
"$argon2i$m=8,t=1,p=1$"
"bL/lLsegFKTuR+5vVyA8tA$VKz5CHavCtFOL1N5TIXWSA"
)
assert porridge.verify(test_password, encoded)
@given(text())
def test_verify_self(porridge, given_password):
assert porridge.verify(given_password, porridge.boil(given_password))
@given(
time_cost=integers(1, 5),
memory_cost=integers(0, 513),
parallelism=integers(1, 5),
)
def test_verify_custom_parameters(password, time_cost, memory_cost, parallelism):
assume(parallelism * 8 <= memory_cost)
porridge = Porridge('key:secret', time_cost=time_cost, memory_cost=memory_cost,
parallelism=parallelism)
assert porridge.verify(password, porridge.boil(password))
def test_verify_self_default_parameters(password):
porridge = Porridge('key:secret')
assert porridge.verify(password, porridge.boil(password))
def test_invalid_password(porridge):
assert porridge.verify('pass1', porridge.boil('pass2')) == False
def test_attacker_cant_verify_without_secret(password):
our_porridge = Porridge('id1:key1')
attacker_porridge = Porridge('otherid:otherkey')
encoded_password = our_porridge.boil(password)
with pytest.raises(MissingKeyError):
attacker_porridge.verify(password, encoded_password)
def test_verify_invalid_password_type(porridge):
with pytest.raises(TypeError) as exception:
porridge.verify(1, '')
assert exception.value.args[0].startswith("'password' must be a str")
@pytest.mark.parametrize('encoded', (
# these are all encoded versions of 'password'
'$argon2i$v=19$m=512,t=2,p=2$Vr7zN80DmEZdRQcMGeV2lA$/fcYY5wcLE9YR4ttKuwshw',
'$argon2i$v=16$m=8,t=1,p=1$bXlzYWx0eXNhbHQ$nz8csvIXGASHCkUia+K4Zg',
'$argon2i$m=8,t=1,p=1$bXlzYWx0eXNhbHQ$nz8csvIXGASHCkUia+K4Zg',
))
def test_verify_legacy_passwords_without_secret(encoded):
# Set high enough parameters to avoid triggering the safety check
porridge = Porridge('key1:secret1', memory_cost=256, time_cost=1, parallelism=2)
assert porridge.verify('password', encoded)
@pytest.mark.parametrize('encoded', (
'definitely not a valid',
'$argon2i$m=8,t=1,p=1$bXlzYWx0eXNhbHQ$nz8csvIXGASHCkUia+K4Zg' + 'a' * 207,
))
def test_verify_invalid_encode(porridge, encoded):
with pytest.raises(EncodedPasswordError):
porridge.verify('password', encoded)
@pytest.mark.parametrize('parameter', ('time_cost', 'memory_cost', 'parallelism'))
def test_verify_bails_on_values_higher_than_configured(porridge, parameter):
parameters = {
'time_cost': porridge.time_cost,
'memory_cost': porridge.memory_cost,
'parallelism': porridge.parallelism,
}
parameters[parameter] *= porridge.parameter_threshold + 1
encoded = get_encoded_password_with_parameters(parameters)
with pytest.raises(EncodedPasswordError):
porridge.verify('password', encoded)
@pytest.mark.parametrize('parameter', ('time_cost', 'memory_cost', 'parallelism'))
@given(threshold=integers(1, 8))
def test_verify_doesnt_bail_on_values_equal_to_threshold(parameter, threshold):
# Create an instance where memory_cost is at least the highest parallelism*8
porridge = Porridge('key1:secret1', memory_cost=64, time_cost=1, parallelism=1,
parameter_threshold=threshold)
parameters = {
'time_cost': porridge.time_cost,
'memory_cost': porridge.memory_cost,
'parallelism': porridge.parallelism,
}
parameters[parameter] *= porridge.parameter_threshold
encoded = get_encoded_password_with_parameters(parameters)
# Since the parameters are wrong the password should not be valid
assert porridge.verify('password', encoded) == False
def get_encoded_password_with_parameters(parameters):
template = '$argon2i$v=19$m={memory_cost},t={time_cost},p={parallelism}{tail}'
tail = ',keyid=key1$AhkxHIhp4o4KOuYBCbduUg$vXvsYVvrrzRdOMpVLXgs4w'
return template.format(tail=tail, **parameters)
| [
"git@thusoy.com"
] | git@thusoy.com |
59763f5bcd4fd02e277f6764e628fd7a08f72889 | a0e63dcefb114d024b2c56ae00a3525caebb8f31 | /shutit_threads.py | 2a0b1cf8198e5f85307393d8ec3630162227bfa6 | [
"MIT"
] | permissive | andrzejsydor/shutit | 2eec3d66c3a3f973ee93e64b87c313fda9f5ea3b | 5f3fbb7236b7c18806c4156910de4425591c197a | refs/heads/master | 2020-03-22T13:54:22.854701 | 2018-07-07T13:32:37 | 2018-07-07T13:32:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,330 | py | import curtsies
from curtsies.events import PasteEvent
from curtsies.input import Input
import itertools
import time
import threading
import traceback
import sys
import os
# There are two threads running in ShutIt. The 'main' one, which drives the
# automation, and the 'watcher' one, which manages either the different view
# panes, or outputs a stack trace of the main thread if 'nothing happens' on it.
PY3 = sys.version_info[0] >= 3
# TODO: reject tmux sessions - it does not seem to play nice
# TODO: keep a time counter after the line
# TODO: show context of line (ie lines around)
# TODO: put the lines into an array of objects and mark the lines as inverted/not
def gather_module_paths():
import shutit_global
shutit_global_object = shutit_global.shutit_global_object
owd = shutit_global_object.owd
shutit_module_paths = set()
for shutit_object in shutit_global.shutit_global_object.shutit_objects:
shutit_module_paths = shutit_module_paths.union(set(shutit_object.host['shutit_module_path']))
if '.' in shutit_module_paths:
shutit_module_paths.remove('.')
shutit_module_paths.add(owd)
for path in shutit_module_paths:
if path[0] != '/':
shutit_module_paths.remove(path)
shutit_module_paths.add(owd + '/' + path)
return shutit_module_paths
def managing_thread_main():
import shutit_global
from shutit_global import SessionPaneLine
shutit_global.shutit_global_object.global_thread_lock.acquire()
shutit_module_paths = gather_module_paths()
shutit_global.shutit_global_object.global_thread_lock.release()
shutit_global.shutit_global_object.stacktrace_lines_arr = [SessionPaneLine('',time.time(),'log'),]
last_code = []
draw_type = 'default'
zoom_state = None
while True:
# We have acquired the lock, so read in input
with Input() as input_generator:
input_char = input_generator.send(0.001)
if input_char == 'r':
# Rotate sessions at the bottom
shutit_global.shutit_global_object.lower_pane_rotate_count += 1
elif input_char == '1':
if zoom_state == 1:
draw_type = 'default'
zoom_state = None
else:
draw_type = 'zoomed1'
zoom_state = 1
elif input_char == '2':
if zoom_state == 2:
draw_type = 'default'
zoom_state = None
else:
draw_type = 'zoomed2'
zoom_state = 2
elif input_char == '3':
if zoom_state == 3:
draw_type = 'default'
zoom_state = None
else:
draw_type = 'zoomed3'
zoom_state = 3
elif input_char == '4':
if zoom_state == 4:
draw_type = 'default'
zoom_state = None
else:
draw_type = 'zoomed4'
zoom_state = 4
elif input_char == 'q':
draw_type = 'clearscreen'
shutit_global.shutit_global_object.pane_manager.draw_screen(draw_type=draw_type)
os.system('reset')
os._exit(1)
# Acquire lock to write screen. Prevents nasty race conditions.
# Different depending PY2/3
if PY3:
if not shutit_global.shutit_global_object.global_thread_lock.acquire(blocking=False):
time.sleep(0.01)
continue
else:
if not shutit_global.shutit_global_object.global_thread_lock.acquire(False):
time.sleep(0.01)
continue
code = []
for thread_id, stack in sys._current_frames().items():
# ignore own thread:
if thread_id == threading.current_thread().ident:
continue
for filename, lineno, name, line in traceback.extract_stack(stack):
# if the file is in the same folder or subfolder as a folder in: self.host['shutit_module_path']
# then show that context
for shutit_module_path in shutit_module_paths:
if filename.find(shutit_module_path) == 0:
if len(shutit_global.shutit_global_object.stacktrace_lines_arr) == 0 or shutit_global.shutit_global_object.stacktrace_lines_arr[-1] != line:
linearrow = '===> ' + str(line)
code.append('_' * 80)
code.append('=> %s:%d:%s' % (filename, lineno, name))
code.append('%s' % (linearrow,))
from_lineno = lineno - 5
if from_lineno < 0:
from_lineno = 0
to_lineno = 10
else:
to_lineno = lineno + 5
lineno_count = from_lineno
with open(filename, "r") as f:
for line in itertools.islice(f, from_lineno, to_lineno):
line = line.replace('\t',' ')
lineno_count += 1
if lineno_count == lineno:
code.append('***' + str(lineno_count) + '> ' + line.rstrip())
else:
code.append('===' + str(lineno_count) + '> ' + line.rstrip())
code.append('_' * 80)
if code != last_code:
for line in code:
shutit_global.shutit_global_object.stacktrace_lines_arr.append(SessionPaneLine(line,time.time(),'log'))
last_code = code
shutit_global.shutit_global_object.pane_manager.draw_screen(draw_type=draw_type)
shutit_global.shutit_global_object.global_thread_lock.release()
def track_main_thread():
t = threading.Thread(target=managing_thread_main)
t.daemon = True
t.start()
def managing_thread_main_simple():
"""Simpler thread to track whether main thread has been quiet for long enough
that a thread dump should be printed.
"""
import shutit_global
last_msg = ''
while True:
printed_anything = False
if shutit_global.shutit_global_object.log_trace_when_idle and time.time() - shutit_global.shutit_global_object.last_log_time > 10:
this_msg = ''
for thread_id, stack in sys._current_frames().items():
# ignore own thread:
if thread_id == threading.current_thread().ident:
continue
printed_thread_started = False
for filename, lineno, name, line in traceback.extract_stack(stack):
if not printed_anything:
printed_anything = True
this_msg += '='*80 + '\n'
this_msg += 'STACK TRACES PRINTED ON IDLE: THREAD_ID: ' + str(thread_id) + ' at ' + time.strftime('%c') + '\n'
this_msg += '='*80 + '\n'
if not printed_thread_started:
printed_thread_started = True
this_msg += '%s:%d:%s' % (filename, lineno, name) + '\n'
if line:
this_msg += ' %s' % (line,) + '\n'
if printed_anything:
this_msg += '='*80 + '\n'
this_msg += 'STACK TRACES DONE\n'
this_msg += '='*80 + '\n'
if this_msg != last_msg:
print(this_msg)
last_msg = this_msg
time.sleep(5)
def track_main_thread_simple():
t = threading.Thread(target=managing_thread_main_simple)
t.daemon = True
t.start()
| [
"ian.miell@gmail.com"
] | ian.miell@gmail.com |
1820e9bea144a350bba9d060cb30654b92aefb91 | 1f1048624ee8d71101ae1127c9aa9c9dc81b857f | /tests/test_bug_fixes.py | bb5c7e5d42f69d5ddf7e161a607b0a2909c18b24 | [
"BSD-3-Clause"
] | permissive | netpastor/pyexcel | 434983a942de4d70549bcec578854d3da241c576 | 0c126b9e4c650c6735665c79e616546149f2b717 | refs/heads/master | 2021-01-19T10:40:19.107503 | 2017-04-10T07:04:50 | 2017-04-10T15:13:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,968 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from textwrap import dedent
import pyexcel as pe
from datetime import datetime
from _compact import StringIO, OrderedDict
from nose.tools import eq_
def test_bug_01():
"""
if first row of csv is shorter than the rest of the rows,
the csv will be truncated by first row. This is a bug
"a,d,e,f" <- this will be 1
'1',2,3,4 <- 4
'2',3,4,5
'b' <- give '' for missing cells
"""
r = pe.Reader(os.path.join("tests", "fixtures", "bug_01.csv"))
assert len(r.row[0]) == 4
# test "" is append for empty cells
assert r[0, 1] == ""
assert r[3, 1] == ""
def test_issue_03():
file_prefix = "issue_03_test"
csv_file = "%s.csv" % file_prefix
xls_file = "%s.xls" % file_prefix
my_sheet_name = "mysheetname"
data = [[1, 1]]
sheet = pe.Sheet(data, name=my_sheet_name)
sheet.save_as(csv_file)
assert(os.path.exists(csv_file))
sheet.save_as(xls_file)
book = pe.load_book(xls_file)
assert book.sheet_names()[0] == my_sheet_name
os.unlink(csv_file)
os.unlink(xls_file)
def test_issue_06():
import logging
logger = logging.getLogger("test")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
output = StringIO()
book = pe.Book({'hoja1': [['datos', 'de', 'prueba'], [1, 2, 3]], })
book.save_to_memory('csv', output)
logger.debug(output.getvalue())
def test_issue_09():
pe.book.LOCAL_UUID = 0
merged = pe.Book()
sheet1 = pe.Sheet(sheet=[[1, 2]])
sheet2 = pe.Sheet(sheet=[[1, 2]])
merged += sheet1
merged += sheet2
eq_(merged[1].name, "pyexcel sheet_1")
def test_issue_10():
thedict = OrderedDict()
thedict.update({"Column 1": [1, 2, 3]})
thedict.update({"Column 2": [1, 2, 3]})
thedict.update({"Column 3": [1, 2, 3]})
pe.save_as(adict=thedict, dest_file_name="issue10.xls")
newdict = pe.get_dict(file_name="issue10.xls")
assert isinstance(newdict, OrderedDict) is True
assert thedict == newdict
os.unlink("issue10.xls")
def test_issue_29():
a = [
# error case
['2016-03-31 10:59', '0123', 'XS360_EU', '04566651561653122'],
# python types
[datetime(2016, 4, 15, 17, 52, 11), 123, False, 456193284757]
]
s = pe.get_sheet(array=a)
content = dedent("""
pyexcel_sheet1:
+------------------+------+----------+-------------------+
| 2016-03-31 10:59 | 0123 | XS360_EU | 04566651561653122 |
+------------------+------+----------+-------------------+
| 15/04/16 | 123 | false | 456193284757 |
+------------------+------+----------+-------------------+""")
eq_(str(s), content.strip('\n'))
def test_issue_29_nominablesheet():
a = [
['date', 'number', 'misc', 'long number'],
# error case
['2016-03-31 10:59', '0123', 'XS360_EU', '04566651561653122'],
# python types
[datetime(2016, 4, 15, 17, 52, 11), 123, False, 456193284757]
]
s = pe.get_sheet(array=a)
s.name_columns_by_row(0)
content = dedent("""
pyexcel_sheet1:
+------------------+--------+----------+-------------------+
| date | number | misc | long number |
+==================+========+==========+===================+
| 2016-03-31 10:59 | 0123 | XS360_EU | 04566651561653122 |
+------------------+--------+----------+-------------------+
| 15/04/16 | 123 | false | 456193284757 |
+------------------+--------+----------+-------------------+""")
eq_(str(s), content.strip('\n'))
def test_issue_51_orderred_dict_in_records():
from pyexcel.plugins.sources.pydata import RecordsReader
records = []
orderred_dict = OrderedDict()
orderred_dict.update({"Zebra": 10})
orderred_dict.update({"Hippo": 9})
orderred_dict.update({"Monkey": 8})
records.append(orderred_dict)
orderred_dict2 = OrderedDict()
orderred_dict2.update({"Zebra": 1})
orderred_dict2.update({"Hippo": 2})
orderred_dict2.update({"Monkey": 3})
records.append(orderred_dict2)
records_reader = RecordsReader(records)
array = list(records_reader.to_array())
expected = [['Zebra', 'Hippo', 'Monkey'], [10, 9, 8], [1, 2, 3]]
eq_(array, expected)
def test_issue_51_normal_dict_in_records():
from pyexcel.plugins.sources.pydata import RecordsReader
records = []
orderred_dict = {}
orderred_dict.update({"Zebra": 10})
orderred_dict.update({"Hippo": 9})
orderred_dict.update({"Monkey": 8})
records.append(orderred_dict)
orderred_dict2 = {}
orderred_dict2.update({"Zebra": 1})
orderred_dict2.update({"Hippo": 2})
orderred_dict2.update({"Monkey": 3})
records.append(orderred_dict2)
records_reader = RecordsReader(records)
array = list(records_reader.to_array())
expected = [['Hippo', 'Monkey', 'Zebra'], [9, 8, 10], [2, 3, 1]]
eq_(array, expected)
def test_issue_55_unicode_in_headers():
headers = [u'Äkkilähdöt', u'Matkakirjoituksia', u'Matkatoimistot']
content = [headers, [1, 2, 3]]
sheet = pe.Sheet(content)
sheet.name_columns_by_row(0)
eq_(sheet.colnames, headers)
def test_issue_60_chinese_text_in_python_2_stdout():
import sys
data = [['这', '是', '中', '文'], ['这', '是', '中', '文']]
sheet = pe.Sheet(data)
sys.stdout.write(repr(sheet))
def test_issue_60_chinese_text_in_python_2_stdout_on_book():
import sys
adict = {"Sheet 1": [['这', '是', '中', '文'], ['这', '是', '中', '文']]}
book = pe.Book()
book.bookdict = adict
sys.stdout.write(repr(book))
def test_issue_63_empty_array_crash_texttable_renderer():
sheet = pe.Sheet([])
print(sheet)
def test_xls_issue_11():
data = [[1, 2]]
sheet = pe.Sheet(data)
sheet2 = pe.get_sheet(file_content=sheet.xls, file_type='XLS')
eq_(sheet.array, sheet2.array)
test_file = 'xls_issue_11.JSON'
sheet2.save_as(test_file)
os.unlink(test_file)
def test_issue_68():
data = [[1]]
sheet = pe.Sheet(data)
stream = sheet.save_to_memory('csv')
eq_(stream.read(), '1\r\n')
data = {"sheet": [[1]]}
book = pe.Book(data)
stream = book.save_to_memory('csv')
eq_(stream.read(), '1\r\n')
def test_issue_74():
from decimal import Decimal
data = [[Decimal("1.1")]]
sheet = pe.Sheet(data)
table = sheet.texttable
expected = 'pyexcel sheet:\n+-----+\n| 1.1 |\n+-----+'
eq_(table, expected)
def test_issue_76():
from pyexcel._compact import StringIO
tsv_stream = StringIO()
tsv_stream.write('1\t2\t3\t4\n')
tsv_stream.write('1\t2\t3\t4\n')
tsv_stream.seek(0)
sheet = pe.get_sheet(file_stream=tsv_stream, file_type='csv',
delimiter='\t')
data = [
[1, 2, 3, 4],
[1, 2, 3, 4]
]
eq_(sheet.array, data)
| [
"wangc_2011@hotmail.com"
] | wangc_2011@hotmail.com |
69869813ffb36d5a2c382d673696ec7c7fd2fbe9 | 084e35c598426b1137f9cd502e1b5e7f09cdf034 | /leetcode_weekly_competition/226周赛/1.py | 09da9b65e006ed6ec0c0456861c0ba4c9e9e6cf0 | [] | no_license | sakurasakura1996/Leetcode | 3a941dadd198ee2f54b69057ae3bbed99941974c | 78f239959af98dd3bd987fb17a3544010e54ae34 | refs/heads/master | 2021-09-11T05:07:44.987616 | 2021-09-07T05:39:34 | 2021-09-07T05:39:34 | 240,848,992 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | class Solution:
def countBalls(self, lowLimit: int, highLimit: int) -> int:
ans = [0] * 46
for i in range(lowLimit, highLimit+1):
tmp = 0
cur = i
while cur:
tmp += int(cur%10)
cur = int(cur/10)
ans[tmp] += 1
return max(ans)
if __name__ == '__main__':
solu = Solution()
lowLimit = 52603
highLimit = 87295
ans = solu.countBalls(lowLimit, highLimit)
print(ans) | [
"2470375551@qq.com"
] | 2470375551@qq.com |
491e8253f9d4c90d5822a1fc62bb284b383d4408 | cc595296b60913bfd6e718c60aaa68d9a5008781 | /profiler/weather_plot.py | 74168fba17d23322089332a6a9ef6e24b4c0039b | [] | no_license | JBlaschke/divelite | f04e24afe1b160702a95878586210d9739141222 | e9a54c67ab8c201003783e50da3a9a46acf24507 | refs/heads/master | 2021-05-21T05:40:25.118724 | 2020-10-11T01:02:56 | 2020-10-11T01:02:56 | 252,570,721 | 0 | 0 | null | 2020-04-02T21:41:56 | 2020-04-02T21:41:55 | null | UTF-8 | Python | false | false | 939 | py | import sys
import numpy as np
import matplotlib.pyplot as plt
log_fname = sys.argv[1]
size = int(sys.argv[2])
data = {}
with open(log_fname) as f:
for line in f:
cols = line.split()
if len(cols) == 2:
rank = int(cols[0])
ts = float(cols[1])
if rank not in data:
data[rank] = [ts]
else:
data[rank].append(ts)
deltas = []
for i in range(1, size):
if i in data:
plt.scatter([i]*len(data[i]), data[i], s=2, marker='o')
# calculate delta
ts_list = data[i]
for j in range(len(ts_list)-1):
deltas.append(ts_list[j+1] - ts_list[j])
plt.show()
# plot histogram of deltas
deltas = np.asarray(deltas)
plt.hist(deltas, bins=100)
plt.title("No. points %d More than 0.5s %d Min %f Max %f Mean %f"%(deltas.size, deltas[deltas>0.5].size, np.min(deltas), np.max(deltas), np.average(deltas)))
plt.show()
| [
"monarin@gmail.com"
] | monarin@gmail.com |
ba0e2b9495f7c37519b04935cc2e4a99f79786e5 | 4fc1c45a7e570cc1204d4b5f21150f0771d34ea5 | /quan_table/read_data/read_data_test.py | 7f8abcee5c4e3a5e9366bbd3813bec0c96321901 | [] | no_license | CN1Ember/feathernet_mine | 77d29576e4ecb4f85626b94e6ff5884216af3098 | ac0351f59a1ed30abecd1088a46c7af01afa29d5 | refs/heads/main | 2023-05-28T17:19:06.624448 | 2021-06-17T04:39:09 | 2021-06-17T04:39:09 | 374,603,757 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,150 | py | from PIL import Image
import numpy as np
import os
from torch.utils.data import Dataset
import math
import cv2
import torchvision
import torch
import random
class CASIA(Dataset):
def __init__(self,data_flag = None, transform=None, phase_train=True, data_dir=None,phase_test=False,add_mask = True):
self.phase_train = phase_train
self.phase_test = phase_test
self.transform = transform
self.add_mask = add_mask
self.mask_np = None
# for val
val_file = os.getcwd() +'/data/test_file_list/%s_val.txt'%data_flag
label_val_file = os.getcwd() + '/data/test_file_list/%s_val_label.txt'%data_flag
self.mask_np = np.fromfile('./data/mask_file/mask_for_nir.bin', np.uint8).reshape((112,112))
try:
with open(val_file, 'r') as f:
self.depth_dir_val = f.read().splitlines()
with open(label_val_file, 'r') as f:
self.label_dir_val = f.read().splitlines()
except:
print('can not open files, may be filelist is not exist')
exit()
def __len__(self):
if self.phase_train:
return len(self.depth_dir_train)
else:
if self.phase_test:
return len(self.depth_dir_test)
else:
return len(self.depth_dir_val)
def __getitem__(self, idx):
depth_dir = self.depth_dir_val
label_dir = self.label_dir_val
label = int(label_dir[idx])
label = np.array(label)
depth = Image.open(depth_dir[idx])
# depth = depth.convert('RGB')
depth = depth.convert('L')
# '''filp left and right randonly and add mask'''
# if random.randint(0,9) < 5:
# depth = depth.transpose(Image.FLIP_LEFT_RIGHT) #水平翻转
'''transform'''
if self.transform:
depth = self.transform(depth)
if self.phase_train:
return depth,label
else:
return depth,label,depth_dir[idx]
| [
"chenguo@gpu017.scut-smil.cn"
] | chenguo@gpu017.scut-smil.cn |
9415639e3ab0499619b3e5d066463c3db20866ca | 2a07f85d91050192f5eaa8d5c72fc2e0fbdfb015 | /bmi/pages.py | 32dfdd5024793394903a3fd024d64f2e7af4104a | [] | no_license | chapkovski/hse-otree-workshop | b95c9e18fc49908a820f15666dc56ffce9e39c49 | 6a83b6b3543c3079408f50c7c3e5a22179862446 | refs/heads/master | 2020-04-20T08:37:27.161917 | 2019-02-01T19:22:22 | 2019-02-01T19:22:22 | 168,744,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
class Input(Page):
form_model = 'player'
form_fields = ['height', 'weight']
class ResultsWaitPage(WaitPage):
def is_displayed(self):
# if this is interpersonal treatment (in other words 'individual' is False in settings then
# they should wait for the partner. It is not necessary if we are in individual treatment
return not self.session.config.get('individual')
def after_all_players_arrive(self):
pass
class Results(Page):
pass
page_sequence = [
Input,
ResultsWaitPage,
Results
]
| [
"chapkovski@gmail.com"
] | chapkovski@gmail.com |
acec31ec61ba2685785c55fce5948b4cca5d910f | 7e326ba379d8e46fbf597938c1efcb99afb0d7e8 | /server/models.py | 6aaa295aa567f34255d38ca1326ffb51cf140983 | [] | no_license | obulpathi/miner | 393f532e3901bbb3885155d6c8ff3ea363634e50 | 53e8221d6545cb9da2f166bfa771eceb11f730f6 | refs/heads/master | 2020-04-16T01:14:37.607057 | 2014-06-14T01:02:37 | 2014-06-14T01:02:37 | 12,418,001 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bitified Image Entry Model and helper functions."""
import cgi
from google.appengine.ext import db
# Datetime string format
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
class Miner(db.Model):
"""Main Miner model."""
user = db.StringProperty(required=True)
timestamp = db.DateTimeProperty(auto_now_add=True)
miner = db.StringProperty(required=True)
speed = db.StringProperty(required=True)
@property
def timestamp_strsafe(self):
if self.timestamp:
return self.timestamp.strftime(DATETIME_FORMAT)
return None
| [
"obulpathi@gmail.com"
] | obulpathi@gmail.com |
4aa9a980e8e71317eb2171d8d36ed74801927c53 | ef7f73ffe943eb6b2a4456d374de3fd49bc5d3bb | /src/kvt/augmentation/__init__.py | 88afc26d0a9e31864017394769d435a987ab2b6a | [
"BSD-2-Clause"
] | permissive | Ynakatsuka/birdclef-2021 | 11acf0a7c2a6463e574df7a058fbf8cc6ab782b6 | d7cf7b39e3164a75547ee50cc9a29bd5ed4c29bd | refs/heads/main | 2023-05-12T01:00:24.612945 | 2021-06-02T01:37:16 | 2021-06-02T01:37:16 | 358,929,160 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | from .audio import (
CosineVolume,
LowFrequencyMask,
OneOf,
PinkNoise,
RandomVolume,
SpecifiedNoise,
SpeedTuning,
StretchAudio,
TimeShift,
or_mixup,
)
from .augmix import RandomAugMix
from .autoaugment import ImageNetPolicy
from .block_fade import BlockFade
from .grid_mask import GridMask
from .histogram import HistogramNormalize
from .line import Line
from .mix import cutmix, mixup
from .morphological import RandomMorph
from .needless import NeedleAugmentation
from .random_erasing import RandomErasing
from .spec_augmentation import SpecAugmentationPlusPlus
from .sprinkle import Sprinkle
| [
"nk.tsssa@gmail.com"
] | nk.tsssa@gmail.com |
fb945c11cc4e07aba01f67c2de8cdc6fd748421c | 186f694b65b43cd56e746ce8538e4f1edad6129e | /1on1/Two_Pointer/lint-386.py | 7a6611cdb2072bd584bf7e854b88dde5e704967e | [] | no_license | zionhjs/algorithm_repo | 287486e0173e68cfa9e535490004c952192a54db | 26b4a770d5335abd738ae26c68d91f6af7b13749 | refs/heads/master | 2022-12-17T15:59:17.932490 | 2020-09-23T04:12:38 | 2020-09-23T04:12:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | class Solution:
"""
@param s: A string
@param k: An integer
@return: An integer
"""
def lengthOfLongestSubstringKDistinct(self, s, k):
if not s or k == 0:
return 0
alpha_counts = [0 for _ in range(256)]
count = 0
max_len = 0
i, j = 0, 0
while j < len(s):
char = ord(s[j])
alpha_counts[char] += 1
if alpha_counts[char] == 1:
count += 1
while i < j and count > k:
char = ord(s[i])
alpha_counts[char] -= 1
if alpha_counts[char] == 0:
count -= 1
i += 1
max_len = max(max_len, j-i+1)
j += 1
return max_len
| [
"hjszion@gmail.com"
] | hjszion@gmail.com |
d3f66cfc2fc33594b799867d55558df1743f6729 | 892a46487e056458270a774cda5b1b6752f25a84 | /1/run.py | e9e81aaa3f3325d052c1eee58ca189292dc8fdf7 | [] | no_license | taojy123/MyPhones | 249a980ccff475cf844ae858dae514ca967cc39e | 93291080dd7dc6c22054aa3d5a24a15ca1532912 | refs/heads/master | 2020-12-24T14:00:46.372187 | 2015-09-28T05:07:24 | 2015-09-28T05:07:24 | 22,827,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,439 | py |
#!/usr/bin/env python
import os
import sys
import webbrowser
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myphones.settings")
#these pertain to your application
import myphones.wsgi
import myphones.urls
import myphones.settings
import myphones.models
import myphones.views
import django.contrib.auth
import django.contrib.contenttypes
import django.contrib.sessions
import django.contrib.sites
import django.contrib.admin
import django.db.models.sql.compiler
from django.contrib.auth.backends import *
#from django.conf.urls.defaults import *
#these are django imports
import django.template.loaders.filesystem
import django.template.loaders.app_directories
import django.middleware.common
import django.contrib.sessions.middleware
import django.contrib.auth.middleware
import django.middleware.doc
import django.contrib.messages
import django.contrib.staticfiles
import django.contrib.messages.middleware
import django.contrib.sessions.backends.db
#import django.contrib.messages.storage.user_messages
import django.contrib.messages.storage.fallback
import django.db.backends.sqlite3.base
import django.db.backends.sqlite3.introspection
import django.db.backends.sqlite3.creation
import django.db.backends.sqlite3.client
import django.contrib.auth.context_processors
from django.core.context_processors import *
import django.contrib.messages.context_processors
import django.contrib.auth.models
import django.contrib.contenttypes.models
import django.contrib.sessions.models
import django.contrib.sites.models
import django.contrib.messages.models
import django.contrib.staticfiles.models
import django.contrib.admin.models
import django.template.defaulttags
import django.template.defaultfilters
import django.template.loader_tags
#dont need to import these pkgs
#need to know how to exclude them
import email.mime.audio
import email.mime.base
import email.mime.image
import email.mime.message
import email.mime.multipart
import email.mime.nonmultipart
import email.mime.text
import email.charset
import email.encoders
import email.errors
import email.feedparser
import email.generator
import email.header
import email.iterators
import email.message
import email.parser
import email.utils
import email.base64mime
import email.quoprimime
import django.core.cache.backends.locmem
import django.templatetags.i18n
import django.templatetags.future
import django.views.i18n
import django.core.context_processors
import django.template.defaulttags
import django.template.defaultfilters
import django.template.loader_tags
#from django.conf.urls.defaults import *
import django.contrib.admin.views.main
import django.core.context_processors
import django.contrib.auth.views
import django.contrib.auth.backends
import django.views.static
import django.contrib.admin.templatetags.log
#import django.contrib.admin.templatetags.adminmedia
import django.conf.urls.shortcut
import django.views.defaults
from django.core.handlers.wsgi import WSGIHandler
#from django.core.servers.basehttp import AdminMediaHandler
from django.conf import settings
from django.utils import translation
import django.contrib.staticfiles.urls
if __name__ == "__main__":
if len(sys.argv)==1:
sys.argv.append("runserver")
sys.argv.append("0.0.0.0:8000")
else:
webbrowser.open_new_tab('http://127.0.0.1:8000')
print sys.argv
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"taojy123@163.com"
] | taojy123@163.com |
b4bd715fdfbb95ac52bb29757e9e40e5b5bcfe71 | d57996fd2c6f92fdb457e8a35f6eb03a54dcf147 | /code_window/ScreenGeometry.py | d04fcc0d6da4137a249e55da885c47d80a1c34a9 | [] | no_license | ActonMartin/UI | 43aab17874d0e24bc35989b847bbe6d54c825e31 | 025d181dd6557822c28cd49af84ab9ffd3a0f274 | refs/heads/master | 2020-12-10T22:30:22.910870 | 2020-01-14T01:30:04 | 2020-01-14T01:30:04 | 233,730,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | import sys
from PyQt5.QtWidgets import QApplication,QMainWindow,QWidget,QPushButton
def onclick_button():
print('1')
print("widget.x()= %d" % widget.x())
print("widget.y()=%d" % widget.y())
print('widget.width()=%d' % widget.width())
print('widget.height()=%d' % widget.height())
print('2')
print("widget.geometry().x()=%d" % widget.geometry().x())
print("widget.geometry().y()=%d" % widget.geometry().y())
print("widget.geometry().width()=%d" % widget.geometry().width())
print("widget.geometry().height()=%d" % widget.geometry().height())
print('3')
print("widget.frameGeometry().x()=%d" % widget.frameGeometry().x())
print("widget.frameGeometry().y()=%d" % widget.frameGeometry().y())
print("widget.frameGeometry().width()=%d" % widget.frameGeometry().width())
print("widget.frameGeometry().height()=%d" % widget.frameGeometry().height())
app = QApplication(sys.argv)
widget = QWidget()
bth = QPushButton(widget)
bth.setText('按钮')
bth.clicked.connect(onclick_button)
bth.move(24,100)
widget.resize(300,200) # 设置的工作区的尺寸
widget.move(250,200)
widget.setWindowTitle('屏幕坐标系')
widget.show()
sys.exit(app.exec_())
| [
"wszcla@outlook.com"
] | wszcla@outlook.com |
cf95ac327ce7cd47aa6c9d323ed27a1f6cb0762e | 0c36a11f1a8659b7691f3f612030b63e40e65869 | /chan/html_净值曲线_opt.py | 6bea437aae3389736500f304f7902ecba6388317 | [] | no_license | webclinic017/joinquantNew | 93af36b540b20cc1cb27f97b2ede11f88ec1f9e2 | 0cbadfeba4d0629471773304f6541981957965e9 | refs/heads/master | 2023-01-12T00:42:19.419870 | 2020-11-16T09:19:53 | 2020-11-16T09:19:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,332 | py | # -*- coding: utf-8 -*-
# @Time : 2020/6/30 10:15
# @Author : zhangfang
import pandas as pd
# import html5lib
from trading_future.future_singleton import Future
from backtest_func import yearsharpRatio, maxRetrace, annROR, annROR_signal
import matplotlib.pyplot as plt
import numpy as np
def PowerSetsRecursive(items):
# 求集合的所有子集
result = [[]]
for x in items:
result.extend([subset + [x] for subset in result])
return result
if __name__ == "__main__":
level_lst = [i for i in range(1, 6)]
level_lst = [5]
date_lst = [('2015-01-01', '2020-01-01'), ('2015-01-01', '2020-07-01')]
# date_lst = [('2015-01-01', '2020-01-01')]
method = 'sum' # 单利:sum;复利:muti
fee = np.float(0.00015)
fold_ini_path = 'c://g//缠论//回测报告//'
# fold_ini_path = 'G://缠论//回测报告//'
porfolio = Future()
mode = '蓝线笔_蓝线反转确认_蓝线反转平仓_200627'
code_lst_5 = ['ap', 'j', 'rb', 'i', 'fu', 'sm', 'if', 'v', 'zn', 'pp', 'ni', 'pb'] # 所有5分钟夏普>0
code_lst_15 = ['v', 'sm', 'sf', 'ap', 'ni', 'j', 'i', 'if', 'hc', 'cu', 'al', 'pp', 'zc', 'rb', 'c', 'zn',
'ag', 'pb', 'sc', 'sr', 'fu'] # 所有15分钟夏普>0
code_lst_30 = ['zc', 'v', 'ap', 'sm', 'if', 'al', 'rb', 'j', 'sc', 'fu', 'i', 'ta', 'sf', 'hc', 'pp'] # 所有30分钟夏普>0
code_lst_60 = ['ap', 'hc', 'j', 'rb', 'sc', 'al', 'ni', 'sf', 'fu', 'ta', 'zc', 'v',
'bu', 'i', 'sm', 'm', 'ma', 'tf', 'zn'] # 所有60分钟夏普>0
code_lst_240 = ['al', 'cu', 'v', 'i', 'ma', 'j', 'zn', 'jm', 'fu', 'bu', 'rb',
'sm', 'ta', 'p', 'zc', 'hc', 'c', 'pp', 'if', 'ru', 'm', 'pb'] # 所有4小时夏普>0
code_lst_1440 = ['v', 'ma', 'fu', 'cu', 'j', 'au', 'cf', 'c', 'ta', 'pp', 'sf', 'ag', 'jm', 'sr', 'oi', 'tf', 'if', 'hc',
'bu', 'zn', 'sm'] # 所有日级别夏普>0
code_dict = {}
code_dict['5'] = code_lst_5
code_dict['15'] = code_lst_15
code_dict['30'] = code_lst_30
code_dict['60'] = code_lst_60
code_dict['240'] = code_lst_240
code_dict['1440'] = code_lst_1440
# code_lst = ['ma', 'ta', 'c', 'bu', 'sf', 'v', 'sm', 'hc', 'rb', 'pp', 'p', 'zc', 'ag', 'al', 'i',
# 'pb', 'ap', 'zn'] # 保证金<10000的品种18个
# code_lst = ['ma', 'ta', 'c', 'bu', 'sf', 'v', 'sm', 'hc', 'rb', 'pp', 'p'] # 保证金<5000的品种11个
ret = {}
ret['symbol'] = []
ret['tm'] = []
ret['start_time'] = []
ret['end_time'] = []
ret['复盘模型'] = []
ret['K线数量'] = []
ret['盈利比'] = []
ret['trading_times'] = []
ret['盈利次数'] = []
ret['平均盈利'] = []
ret['平均亏损'] = []
ret['点差'] = []
ret['sharp'] = []
ret['ann_return'] = []
ret['max_drawdown'] = []
ret['level'] = []
porfolio_lst = []
for level in level_lst:
for (s_date, e_date) in date_lst:
for period in [5, 15, 30, 60, 240, 1440]:
mode_period = mode + '_' + str(period) + '分钟'
fold_path = fold_ini_path + mode_period + '//'
chg_df_all = pd.DataFrame(columns=['date_time'])
code_lst = code_dict[str(period)]
for code in code_lst:
print(code)
html = pd.read_html(fold_path + code + '.htm', encoding='gbk')
state = html[0]
print(state)
html = pd.read_html(fold_path + code + '.htm', encoding='gbk', header=0, index_col=0)
trade = html[1]
profit_df_all = trade[['时间', '获利']].rename(columns={'时间': 'date_time', '获利': 'profit'}).fillna(value=0)
profit_df_all['date_time'] = profit_df_all['date_time'].apply(lambda x: x[:4] + '-' + x[5:7] + '-' + x[8:10])
profit_df_all = profit_df_all.groupby(['date_time'])
profit_df = profit_df_all.sum()
profit_df['count'] = profit_df_all.count()
# trade_times_everyday = count_df.profit.mean()
profit_df['date_time'] = profit_df.index
profit_df = profit_df.assign(date_time=lambda df: df.date_time.apply(lambda x: str(x)[:10]))
profit_df = profit_df.reset_index(drop=True)
hq = pd.read_csv('e:/data/future_index/' + code.upper() + '_' + 'daily' + '_index.csv')[
['date_time', 'close']].assign(date_time=lambda df: df.date_time.apply(lambda x: str(x)[:10]))
hq = hq[(hq['date_time'] > s_date) & (hq['date_time'] < e_date)]
contract_lst = [code.upper()]
VolumeMultiple = porfolio.get_VolumeMultiple(contract_lst)[code.upper()]['VolumeMultiple']
print(VolumeMultiple)
aum_ini = hq.close.tolist()[0] * VolumeMultiple * 2 * level
profit_df = hq.merge(profit_df, on=['date_time'], how='left').sort_values(['date_time'])
# profit_df = profit_df.fillna(0)
profit_df['chg'] = (profit_df['profit'] - profit_df['close'].shift(1) * profit_df['count'] * VolumeMultiple * fee) * level / profit_df['close'].shift(1) / (VolumeMultiple * 2)
profit_df = profit_df.fillna(0)
if method == 'sum':
profit_df['net'] = 1 + profit_df['chg'].cumsum()
else:
profit_df['net'] = (1 + profit_df['chg']).cumprod()
print(profit_df)
net_lst = profit_df.net.tolist()
chg_df_ = profit_df.reset_index(drop=False)[['date_time', 'chg']].rename(columns={'chg': 'chg_' + code})
chg_df_all = chg_df_all.merge(chg_df_, on=['date_time'], how='outer')
chg_df_all = chg_df_all.fillna(value=0)
chg_df = chg_df_all.sort_values(['date_time']).set_index(['date_time'])
chg_name = ['chg_' + m for m in code_lst]
chg_df['chg'] = chg_df[chg_name].sum(axis=1) / len(code_lst)
if method == 'sum':
chg_df['net'] = 1 + chg_df['chg'].cumsum()
else:
chg_df['net'] = (1 + chg_df['chg']).cumprod()
chg_df = chg_df.reset_index(drop=False)
chg_df['date_time'] = pd.to_datetime(chg_df['date_time'])
chg_df = chg_df.set_index(['date_time'])
chg_df.ix[:, ['net']].plot()
sharpe_ratio = yearsharpRatio(chg_df['net'].tolist(), 1)
if method == 'sum':
ann_return = annROR_signal(chg_df['net'].tolist(), 1)
else:
ann_return = annROR(chg_df['net'].tolist(), 1)
max_drawdown = maxRetrace(chg_df['net'].tolist(), 1)
porfolio_row = []
porfolio_row.append(int(level))
porfolio_row.append(len(code_lst))
porfolio_row.append(str(period))
porfolio_row.append(fee)
porfolio_row.append(sharpe_ratio)
porfolio_row.append(ann_return)
porfolio_row.append(max_drawdown)
porfolio_row.append(s_date)
porfolio_row.append(e_date)
porfolio_lst.append(porfolio_row)
title_str = '品种%s个 周期%sm sharp %.2f annRet %.2f 回撤 %.2f 杠杆%s' % (str(len(code_lst)), str(period),
sharpe_ratio, 100 * ann_return, 100 * max_drawdown, int(level))
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.title(title_str)
plt.savefig(fold_ini_path + 'fig/' + str(len(code_lst)) + '_' + str(period) + 'm' + '_fee_opt' + '.png')
plt.show()
porfolio_state = pd.DataFrame(porfolio_lst, columns=['杠杆率', '品种数', 'period', 'fee', 'sharpe_ratio', 'ann_return',
'max_drawdown', 's_date', 'e_date'])
porfolio_state.to_excel(fold_ini_path + 'state_blue_line//state_porfolio_signal_period_' + method + '_opt.xlsx',
encoding='gbk')
| [
"519518384@qq.com"
] | 519518384@qq.com |
9b108dc93dae3c185f5fd4ad4d94489c27095057 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_MovingAverage_Seasonal_DayOfWeek_MLP.py | b2da5c3d66196e8f7a7296e7bbcc4ee517c2f8a1 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 164 | py | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['MovingAverage'] , ['Seasonal_DayOfWeek'] , ['MLP'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
0029fb3267463c9804af961f0c25a555fe4d1786 | fc29ccdcf9983a54ae2bbcba3c994a77282ae52e | /Leetcode_By_Topic/dp_2seq-583.py | 044a5edc5199781c6b697036ab50df590e614417 | [] | no_license | linnndachen/coding-practice | d0267b197d9789ab4bcfc9eec5fb09b14c24f882 | 5e77c3d7a0632882d16dd064f0aad2667237ef37 | refs/heads/master | 2023-09-03T19:26:25.545006 | 2021-10-16T16:29:50 | 2021-10-16T16:29:50 | 299,794,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py |
class Solution:
"""
def minDistance(self, word1: str, word2: str) -> int:
m, n = len(word1), len(word2)
dp = [[0] * (n+1) for _ in range(m+1)]
# edges
for i in range(1, m+1):
dp[i][0] = i
for j in range(1, n+1):
dp[0][j] = j
for i in range(1, m+1):
for j in range(1, n+1):
if word1[i-1] == word2[j-1]:
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = min(dp[i-1][j]+1, dp[i][j-1]+1)
return dp[-1][-1]
"""
def minDistance(self, w1, w2):
m, n = len(w1), len(w2)
dp = [[0] * (n + 1) for i in range(m + 1)]
for i in range(m):
for j in range(n):
# print(0+(w1[i] == w2[j]), (w1[i] == w2[j]))
dp[i + 1][j + 1] = max(dp[i][j + 1], dp[i + 1][j], \
dp[i][j] + (w1[i] == w2[j]))
# total len - longest common len
return m + n - 2 * dp[m][n] | [
"lchen.msc2019@ivey.ca"
] | lchen.msc2019@ivey.ca |
dcddd89959f1064a7904ec4071f2b74a51df8bab | ad212b92beac17c4d061848c1dcd443d02a168c8 | /python/0454_4sum_II/counters.py | 397e23695a0aeceb0cf68e0a3664427ecddf74b0 | [] | no_license | 21eleven/leetcode-solutions | 5ec97e4391c8ebaa77f4404a1155f3ef464953b3 | 35c91e6f5f5ed348186b8641e6fc49c825322d32 | refs/heads/master | 2023-03-03T10:22:41.726612 | 2021-02-13T21:02:13 | 2021-02-13T21:02:13 | 260,374,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | """
454. 4Sum II
Medium
Given four lists A, B, C, D of integer values, compute how many tuples (i, j, k, l) there are such that A[i] + B[j] + C[k] + D[l] is zero.
To make problem a bit easier, all A, B, C, D have same length of N where 0 ≤ N ≤ 500. All integers are in the range of -228 to 228 - 1 and the result is guaranteed to be at most 231 - 1.
Example:
Input:
A = [ 1, 2]
B = [-2,-1]
C = [-1, 2]
D = [ 0, 2]
Output:
2
Explanation:
The two tuples are:
1. (0, 0, 0, 1) -> A[0] + B[0] + C[0] + D[1] = 1 + (-2) + (-1) + 2 = 0
2. (1, 1, 0, 0) -> A[1] + B[1] + C[0] + D[0] = 2 + (-1) + (-1) + 0 = 0
"""
class Solution:
def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int:
x = Counter()
y = Counter()
count = 0
for a in A:
for b in B:
x[a+b] += 1
for c in C:
for d in D:
y[c+d] += 1
for k, v in x.items():
if v2 := y[0-k]:
count += v*v2
return count
| [
"noahlidell@gmail.com"
] | noahlidell@gmail.com |
44e5ad078fcf91fcaff4eac2ee6a76bff6f9f15f | 960f2de01a49c822e6e4afd9595bf60d75f7dacc | /fbpagespam.py | 9277c724f17a9799dca6b7ee27df9efc0c0fd4bc | [] | no_license | 2020saurav/random-scripts | adbf5461e4f98d51a02cf2ee3992bdb82c77dc86 | b339c47765a9e18f86565ea9639f5ff6a7691f46 | refs/heads/master | 2021-01-23T14:05:29.907049 | 2014-11-11T20:33:22 | 2014-11-11T20:33:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | import requests
import time
import urllib2
import json
# Replace line 6 with actual token and put spam comment in line 16
TOKEN = 'put-your-access-token-here'
def get_posts():
url = 'https://graph.facebook.com/v2.2/891971000853462/posts?access_token='+ TOKEN
page = urllib2.urlopen(url)
page = page.read()
page = json.loads(page)
return page["data"]
def comment(postId):
url = 'https://graph.facebook.com/v2.2/'+postId+'/comments'
comment = 'SPAM'
payload = {'access_token' : TOKEN, 'message': comment}
s = requests.post(url,data=payload,verify=False)
time.sleep(1)
if __name__ == '__main__':
posts = get_posts()
for post in posts:
comment(post["id"])
print "Done"
| [
"2020saurav@gmail.com"
] | 2020saurav@gmail.com |
1081b155b8d6b430076939dd4c8bec7a57c484c7 | 33b5ef4f67e9c36d45990506e6f9f39d573ce730 | /folders/python/instagram/63def.py | 23caa9ce83278630e81abd66b2d763746fdb6fc7 | [] | no_license | dineshkumarkummara/my-basic-programs-in-java-and-python | 54f271e891e8d9dbdf479a9617e9355cbd0819e9 | b8a4cf455f4a057e382f7dda7581fad5b2f1e616 | refs/heads/master | 2022-12-12T06:02:55.558763 | 2020-08-19T12:14:39 | 2020-08-19T12:14:39 | 283,415,087 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | #Creating a function in Python. Use the "def" keyword,
# give a function a name and list its arguments.
def greet(name):
print("hello my name is " +name+ "")
greet("sai")
greet("manu")
#sai and manu are arguments
#greet is a function name | [
"dk944176@gmail.com"
] | dk944176@gmail.com |
25e5547093fa72a5ff91fdf5c2545ec79a793127 | e5d059896640e25a57f29f5ec972c114f8ef5866 | /src/scs_analysis/socket_receiver.py | 68d886c62b7ddd4423cc75d222d28250d1d07be7 | [
"MIT"
] | permissive | tonybushido/scs_analysis | 10add7b13cee29e1445ea18240bdb08e3bc908a4 | 1121be19c83b0d616772da42ea90623d6f6573c4 | refs/heads/master | 2021-01-03T03:11:31.474595 | 2020-02-11T14:27:32 | 2020-02-11T14:27:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,516 | py | #!/usr/bin/env python3
"""
Created on 18 Aug 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
source repo: scs_analysis
DESCRIPTION
The socket_receiver utility is used to accept data via a Unix socket, with data sourced from the same host, or
another host on the same local area network. A socket_sender utility is provided for the purpose of sourcing data,
as part of the scs_dev package.
The socket_receiver utility should be started before socket_sender. When socket_sender terminates, socket_receiver
will also terminate.
If a port number is not specified, then port 2000 is used.
SYNOPSIS
socket_receiver.py [-p PORT] [-v]
EXAMPLES
socket_receiver.py -p 2002
SEE ALSO
scs_analysis/uds_receiver
scs_dev/socket_sender
BUGS
It is possible to create scenarios where a port becomes orphaned. Depending on host operating systems, orphaned ports
may take time to be garbage collected.
"""
import sys
from scs_analysis.cmd.cmd_socket_receiver import CmdSocketReceiver
from scs_host.comms.network_socket import NetworkSocket
# --------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
# ----------------------------------------------------------------------------------------------------------------
# cmd...
cmd = CmdSocketReceiver()
if cmd.verbose:
print("socket_receiver: %s" % cmd, file=sys.stderr)
receiver = None
try:
# ------------------------------------------------------------------------------------------------------------
# resources...
receiver = NetworkSocket('', cmd.port)
if cmd.verbose:
print("socket_receiver: %s" % receiver, file=sys.stderr)
sys.stderr.flush()
# ------------------------------------------------------------------------------------------------------------
# run...
for message in receiver.read():
print(message)
sys.stdout.flush()
receiver.ack()
# ----------------------------------------------------------------------------------------------------------------
# end...
except KeyboardInterrupt:
if cmd.verbose:
print("socket_receiver: KeyboardInterrupt", file=sys.stderr)
# ----------------------------------------------------------------------------------------------------------------
# close...
finally:
if receiver:
receiver.close()
| [
"bruno.beloff@southcoastscience.com"
] | bruno.beloff@southcoastscience.com |
559053148361b7bb8b316ba9e69c87eb3b307856 | 08eea46f91eb71972b66cc34a620df2a2d15e6a7 | /Advanced/Theano/Course/MaxPool.py | fa657097f96c4f937a1bd0ac9f0b3d0150916605 | [] | no_license | jsa4000/Getting-Started-Python | ab2f1ce51c78ce870560ab466c8408c9f81717e5 | 8f7f107a93bb7578a00d531123ee7f5db61d807e | refs/heads/master | 2021-01-11T14:19:18.123093 | 2019-01-12T07:47:51 | 2019-01-12T07:47:51 | 81,347,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | py | import theano
import theano.tensor as T
from theano.tensor.signal import downsample
import numpy
# Now what the Max Pooling algorithm does in a matrix.
input = T.dtensor4('input')
maxpool_shape = (2, 2)
pool_out = downsample.max_pool_2d(input, maxpool_shape, ignore_border=True)
f = theano.function([input],pool_out)
invals = numpy.random.RandomState(1).rand(3, 2, 5, 5) # Max pool will take the last two indexes for the pooling
#print invals
# Maxpool take a Matrix (multiple featutes, convolutions, etc), let say 10x10.
# An for all existing matrix there are in the 4D matrix [:,:;m;n]
# Generate a new matrix (with same shape) downsmapled with the maxpool_shape defined.
# For that it will divide the matrix 10x10 into the maxpool defined and will teake the maximun value.
print 'With ignore_border set to True:'
print 'invals[0, 0, :, :] =\n', invals[0, 0, :, :]
print 'output[0, 0, :, :] =\n', f(invals)[0, 0, :, :]
pool_out = downsample.max_pool_2d(input, maxpool_shape, ignore_border=False)
f = theano.function([input],pool_out)
print 'With ignore_border set to False:'
print 'invals[1, 0, :, :] =\n ', invals[1, 0, :, :]
print 'output[1, 0, :, :] =\n ', f(invals)[1, 0, :, :]
# Important note:
# - If matrix is 31 x 31 the the max pool result with (2,2) of max pool will generate a new
# matrix with 15x15, so it's like only get the integer part dividing Int(31/2) = 15
# - If matrix is 31 x 31 the the max pool result with (3,3) of max pool will generate a new
# matrix with 10x10 -> Unt(31/3) = 10
| [
"jsa4000@gmail.com"
] | jsa4000@gmail.com |
bb3c3f608d70d420e3a814c30207c034df5c72ea | 508321d683975b2339e5292202f3b7a51bfbe22d | /Userset.vim/ftplugin/python/CompletePack/PySide2/QtGui/QMatrix4x4.py | 3a5014bf558cdafbdfc83ec79abdb7af136dd07e | [] | no_license | cundesi/vimSetSa | 4947d97bcfe89e27fd2727423112bb37aac402e2 | 0d3f9e5724b471ab21aa1199cc3b4676e30f8aab | refs/heads/master | 2020-03-28T05:54:44.721896 | 2018-08-31T07:23:41 | 2018-08-31T07:23:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,756 | py | # encoding: utf-8
# module PySide2.QtGui
# from C:\Program Files\Autodesk\Maya2017\Python\lib\site-packages\PySide2\QtGui.pyd
# by generator 1.145
# no doc
# imports
import PySide2.QtCore as __PySide2_QtCore
import Shiboken as __Shiboken
class QMatrix4x4(__Shiboken.Object):
# no doc
def column(self, *args, **kwargs): # real signature unknown
pass
def copyDataTo(self, *args, **kwargs): # real signature unknown
pass
def data(self, *args, **kwargs): # real signature unknown
pass
def determinant(self, *args, **kwargs): # real signature unknown
pass
def fill(self, *args, **kwargs): # real signature unknown
pass
def flipCoordinates(self, *args, **kwargs): # real signature unknown
pass
def frustum(self, *args, **kwargs): # real signature unknown
pass
def inverted(self, *args, **kwargs): # real signature unknown
pass
def isAffine(self, *args, **kwargs): # real signature unknown
pass
def isIdentity(self, *args, **kwargs): # real signature unknown
pass
def lookAt(self, *args, **kwargs): # real signature unknown
pass
def map(self, *args, **kwargs): # real signature unknown
pass
def mapRect(self, *args, **kwargs): # real signature unknown
pass
def mapVector(self, *args, **kwargs): # real signature unknown
pass
def normalMatrix(self, *args, **kwargs): # real signature unknown
pass
def optimize(self, *args, **kwargs): # real signature unknown
pass
def ortho(self, *args, **kwargs): # real signature unknown
pass
def perspective(self, *args, **kwargs): # real signature unknown
pass
def rotate(self, *args, **kwargs): # real signature unknown
pass
def row(self, *args, **kwargs): # real signature unknown
pass
def scale(self, *args, **kwargs): # real signature unknown
pass
def setColumn(self, *args, **kwargs): # real signature unknown
pass
def setRow(self, *args, **kwargs): # real signature unknown
pass
def setToIdentity(self, *args, **kwargs): # real signature unknown
pass
def toAffine(self, *args, **kwargs): # real signature unknown
pass
def toTransform(self, *args, **kwargs): # real signature unknown
pass
def translate(self, *args, **kwargs): # real signature unknown
pass
def transposed(self, *args, **kwargs): # real signature unknown
pass
def viewport(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iadd__(self, y): # real signature unknown; restored from __doc__
""" x.__iadd__(y) <==> x+=y """
pass
def __idiv__(self, y): # real signature unknown; restored from __doc__
""" x.__idiv__(y) <==> x/=y """
pass
def __imul__(self, y): # real signature unknown; restored from __doc__
""" x.__imul__(y) <==> x*=y """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __isub__(self, y): # real signature unknown; restored from __doc__
""" x.__isub__(y) <==> x-=y """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lshift__(self, y): # real signature unknown; restored from __doc__
""" x.__lshift__(y) <==> x<<y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rlshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rlshift__(y) <==> y<<x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __rrshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rrshift__(y) <==> y>>x """
pass
def __rshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rshift__(y) <==> x>>y """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
| [
"noreply@github.com"
] | cundesi.noreply@github.com |
2b94ca5f26c8983462ef53b76ee77557a6b751e3 | 32904d4841d104143ba0f41cc3aeb749e470f546 | /backend/django/api/urls.py | 6e5dd09bbd5b11925232aa849f7d72ba6e996239 | [] | no_license | aurthurm/dispatrace-api-vuejs | 20ec5deee015e69bce7a64dc2d89ccae8941b800 | 56d122318af27ff64755fc515345974631d3026f | refs/heads/master | 2023-01-23T23:03:15.438339 | 2020-10-20T22:09:29 | 2020-10-20T22:09:29 | 219,028,985 | 0 | 1 | null | 2022-12-22T18:31:38 | 2019-11-01T17:08:35 | Vue | UTF-8 | Python | false | false | 877 | py | from django.urls import path, include
import api.accounts.routes, api.notices.routes, api.memos.routes
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
TokenVerifyView
)
from .views import ExtraTokenObtainPairView, get_user_data
app_name = 'api'
urlpatterns = [
path('accounts/', include(api.accounts.routes)),
path('notices/', include(api.notices.routes)),
path('memos/', include(api.memos.routes)),
# JWT AUTHENTICATION
path('token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('token/extras/', ExtraTokenObtainPairView.as_view(), name="token-extras"), # MODIFIED
path('token/verify/', TokenVerifyView.as_view(), name='token_verify'),
path('refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('user/', get_user_data, name='user'),
] | [
"aurthurmusendame@gmail.com"
] | aurthurmusendame@gmail.com |
093c22edc372fda95f2e5cdb6c60b226b694b071 | 7c81f9ea4c77007435781cc7fab991df50747c53 | /setup.py | d581e3ec5308d8ee33e528dbc682836b3c7dc998 | [
"MIT"
] | permissive | ArutyunovG/netron | dc259584f552e1ef2808cc7a0fc552d6681076c4 | 07f842e26d3618005ea75a23dd41313ddcfc28d0 | refs/heads/master | 2022-06-11T17:52:08.567397 | 2020-05-02T03:53:23 | 2020-05-02T03:53:23 | 260,730,924 | 0 | 0 | MIT | 2020-05-02T16:42:28 | 2020-05-02T16:42:27 | null | UTF-8 | Python | false | false | 6,872 | py | #!/usr/bin/env python
import distutils
import io
import json
import os
import setuptools
import setuptools.command.build_py
import distutils.command.build
node_dependencies = [
( 'netron', [
'node_modules/d3/dist/d3.min.js',
'node_modules/dagre/dist/dagre.min.js',
'node_modules/marked/marked.min.js',
'node_modules/pako/dist/pako.min.js',
'node_modules/long/dist/long.js',
'node_modules/protobufjs/dist/protobuf.min.js',
'node_modules/protobufjs/ext/prototxt/prototxt.js',
'node_modules/flatbuffers/js/flatbuffers.js' ] )
]
class build(distutils.command.build.build):
user_options = distutils.command.build.build.user_options + [ ('version', None, 'version' ) ]
def initialize_options(self):
distutils.command.build.build.initialize_options(self)
self.version = None
def finalize_options(self):
distutils.command.build.build.finalize_options(self)
def run(self):
build_py.version = bool(self.version)
return distutils.command.build.build.run(self)
class build_py(setuptools.command.build_py.build_py):
user_options = setuptools.command.build_py.build_py.user_options + [ ('version', None, 'version' ) ]
def initialize_options(self):
setuptools.command.build_py.build_py.initialize_options(self)
self.version = None
def finalize_options(self):
setuptools.command.build_py.build_py.finalize_options(self)
def run(self):
setuptools.command.build_py.build_py.run(self)
for target, files in node_dependencies:
target = os.path.join(self.build_lib, target)
if not os.path.exists(target):
os.makedirs(target)
for file in files:
self.copy_file(file, target)
def build_module(self, module, module_file, package):
setuptools.command.build_py.build_py.build_module(self, module, module_file, package)
if build_py.version and module == '__version__':
package = package.split('.')
outfile = self.get_module_outfile(self.build_lib, package, module)
with open(outfile, 'w+') as f:
f.write("__version__ = '" + package_version() + "'\n")
def package_version():
folder = os.path.realpath(os.path.dirname(__file__))
with open(os.path.join(folder, 'package.json')) as package_file:
package_manifest = json.load(package_file)
return package_manifest['version']
setuptools.setup(
name="netron",
version=package_version(),
description="Viewer for neural network, deep learning and machine learning models",
long_description='Netron is a viewer for neural network, deep learning and machine learning models.\n\n' +
'Netron supports **ONNX** (`.onnx`, `.pb`), **Keras** (`.h5`, `.keras`), **Core ML** (`.mlmodel`), **Caffe** (`.caffemodel`, `.prototxt`), **Caffe2** (`predict_net.pb`), **Darknet** (`.cfg`), **MXNet** (`.model`, `-symbol.json`), ncnn (`.param`) and **TensorFlow Lite** (`.tflite`). Netron has experimental support for **TorchScript** (`.pt`, `.pth`), **PyTorch** (`.pt`, `.pth`), **Torch** (`.t7`), **ArmNN** (`.armnn`), **BigDL** (`.bigdl`, `.model`), **Chainer** (`.npz`, `.h5`), **CNTK** (`.model`, `.cntk`), **Deeplearning4j** (`.zip`), **PaddlePaddle** (`__model__`), **MediaPipe** (`.pbtxt`), **ML.NET** (`.zip`), MNN (`.mnn`), **OpenVINO** (`.xml`), **scikit-learn** (`.pkl`), **Tengine** (`.tmfile`), **TensorFlow.js** (`model.json`, `.pb`) and **TensorFlow** (`.pb`, `.meta`, `.pbtxt`, `.ckpt`, `.index`).',
keywords=[
'onnx', 'keras', 'tensorflow', 'tflite', 'coreml', 'mxnet', 'caffe', 'caffe2', 'torchscript', 'pytorch', 'ncnn', 'mnn' 'openvino', 'darknet', 'paddlepaddle', 'chainer',
'artificial intelligence', 'machine learning', 'deep learning', 'neural network',
'visualizer', 'viewer'
],
license="MIT",
cmdclass={
'build': build,
'build_py': build_py
},
package_dir={
'netron': 'src'
},
packages=[
'netron'
],
package_data={
'netron': [
'favicon.ico', 'icon.png',
'base.js',
'numpy.js', 'pickle.js', 'hdf5.js', 'bson.js',
'zip.js', 'tar.js', 'gzip.js',
'armnn.js', 'armnn-metadata.json', 'armnn-schema.js',
'bigdl.js', 'bigdl-metadata.json', 'bigdl-proto.js',
'caffe.js', 'caffe-metadata.json', 'caffe-proto.js',
'caffe2.js', 'caffe2-metadata.json', 'caffe2-proto.js',
'chainer.js',
'cntk.js', 'cntk-metadata.json', 'cntk-proto.js',
'coreml.js', 'coreml-metadata.json', 'coreml-proto.js',
'darknet.js', 'darknet-metadata.json',
'dl4j.js', 'dl4j-metadata.json',
'flux.js', 'flux-metadata.json',
'keras.js', 'keras-metadata.json',
'mediapipe.js',
'mlnet.js', 'mlnet-metadata.json',
'mnn.js', 'mnn-metadata.json', 'mnn-schema.js',
'mxnet.js', 'mxnet-metadata.json',
'ncnn.js', 'ncnn-metadata.json',
'onnx.js', 'onnx-metadata.json', 'onnx-proto.js',
'openvino.js', 'openvino-metadata.json', 'openvino-parser.js',
'paddle.js', 'paddle-metadata.json', 'paddle-proto.js',
'pytorch.js', 'pytorch-metadata.json', 'python.js',
'sklearn.js', 'sklearn-metadata.json',
'tengine.js', 'tengine-metadata.json',
'tf.js', 'tf-metadata.json', 'tf-proto.js',
'tflite.js', 'tflite-metadata.json', 'tflite-schema.js',
'torch.js', 'torch-metadata.json',
'index.html', 'index.js',
'view-grapher.css', 'view-grapher.js',
'view-sidebar.css', 'view-sidebar.js',
'view.js',
'server.py'
]
},
install_requires=[],
author='Lutz Roeder',
author_email='lutzroeder@users.noreply.github.com',
url='https://github.com/lutzroeder/netron',
entry_points={
'console_scripts': [ 'netron = netron:main' ]
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Visualization'
]
) | [
"lutzroeder@users.noreply.github.com"
] | lutzroeder@users.noreply.github.com |
e321495336794031ffac05cf2c6817555903bab5 | 5410700e83210d003f1ffbdb75499062008df0d6 | /Day-2/ndarray-6.py | 965e6ad37da413d65901a3423f451ab63c239b05 | [] | no_license | lilyandcy/python3 | 81182c35ab8b61fb86f67f7796e057936adf3ab7 | 11ef4ace7aa1f875491163d036935dd76d8b89e0 | refs/heads/master | 2021-06-14T18:41:42.089534 | 2019-10-22T00:24:30 | 2019-10-22T00:24:30 | 144,527,289 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | import numpy as np
a = np.array([0, 1, 2, 3, 4])
print(a)
print(a[1:3])
print(a[:3])
print(a[1:])
print(a[1:-1])
print(a[::2])
print(a[1:4:2])
print(a[::-1]) | [
"myyan_yan@msn.com"
] | myyan_yan@msn.com |
b2b0207764e1be3090b747924840f7fb6775ecbc | c2242e8ce873f68efaf5c813820543a495e0d2e5 | /project/attendance_system/student/.~c9_invoke_it09KR.py | 399e07d384fbdfacabf746a0c0e08588b04df4a2 | [] | no_license | BUBTAMS/Attendance_system | 7d5471dd699ee3f976051687274d721d8b519e90 | f831b29a7083da570f7eccc9ed0bb19b10023fbb | refs/heads/master | 2022-03-25T22:10:11.989744 | 2019-12-15T10:18:41 | 2019-12-15T10:18:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.db import models
from django.forms import ModelForm
from .models import studentprofile
attributes=('student_id','image','mobile_number','parents_number','department','semester','dob','address','roll_no','batch','Class')
class StudentProfileDataForm(forms.ModelForm):
class Meta:
model=studentprofile
fields=('student_id','image','mobile_number','parents_number','department','semester','dob','address','roll_no','batch','Class')
class edit_student(forms.ModelForm):
email=forms.EmailField()
first_name=forms.CharField()
last_name=forms.CharField()
subject=forms.Model
class Meta:
model= studentprofile
fields=attributes + ('email','first_name','last_name')
def __init__(self,em,fn,ln,*args,**kwargs):
super() | [
"ranjaneabhishek.com"
] | ranjaneabhishek.com |
1899ab60d5f330bf69ce6a789f5aa04f754a4d41 | 9fab87766e5e4535db9d2a4f2bb9737d1c842ca9 | /python/test.py | 700fe0a846bbfbcfc8294bf961a98ca6e3aa8a0d | [
"Apache-2.0"
] | permissive | lavizhao/StumbleUponEvergreen | f10ac075b7b582d68385ae3d8017615d501632c0 | 85658c4efeeacbb2beae3ad74f7ad176053e4a55 | refs/heads/master | 2016-09-05T13:57:26.138467 | 2014-06-11T13:34:44 | 2014-06-11T13:34:44 | 20,257,014 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | #coding: utf-8
from read_conf import config
import csv
import numpy as np
from sklearn.preprocessing import Imputer
dp = config("../conf/dp.conf")
f = open(dp["raw_train"])
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
reader = csv.reader(f,delimiter='\t')
a = 0
train = []
for line in reader :
if a == 0:
a += 1
continue
temp = []
line = line[5:]
line = line[:-1]
for item in line:
if item == "?":
temp.append(np.nan)
else :
temp.append(float(item))
train.append(temp)
a += 1
imp.fit(train)
train = imp.transform(train)
for i in range(10):
print train[i]
| [
"zhaoximo@hotmail.com"
] | zhaoximo@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.