blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
378ea714e209d6c5672a433a408cfb3c7ae34d93 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02536/s373980977.py | 6ff28363fe4056683fdd331524dbb4a7e04042e3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | def main():
def find(target):
if parent[target] < 0:
return target
else:
parent[target] = find(parent[target])
return parent[target]
def is_same(x, y):
return find(x) == find(y)
def union(x, y):
root_x = find(x)
root_y = find(y)
if root_x == root_y:
return
if parent[root_x] > parent[root_y]:
root_x, root_y = root_y, root_x
parent[root_x] += parent[root_y]
parent[root_y] = root_x
# 今回これ使わないけど、どこに誰がいるのかはこれでわかる
def members(n, x):
root = find(x)
return [i for i in range(n) if find(i) == root]
def get_size(x):
return -parent[find(x)]
def get_root():
return [i for i, root in enumerate(parent) if root < 0]
n, m = map(int, input().split())
parent = [-1 for _ in range(n)]
for _ in range(m):
a, b = map(lambda x: int(x) - 1, input().split())
union(a, b)
ans = len(get_root()) - 1
print(ans)
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0bff5d925eabd40deb4c7de2c92931e939a44306 | 89acc97d24548e0393201151975bf7e262949fcb | /examples/embed.py | 32d7d2052e0058bbedbf9feaf377e1606293d8b3 | [] | no_license | FrankWork/tf-tutorials | 40b15cc35379ecc03a9f2de1015585c43e5ecc3e | bb698f37fc9a3baa5f8e3cddc523d11872794e0f | refs/heads/master | 2022-10-09T21:30:53.851685 | 2018-09-13T06:34:16 | 2018-09-13T06:34:16 | 82,744,146 | 1 | 1 | null | 2022-10-01T07:13:11 | 2017-02-22T01:00:40 | Python | UTF-8 | Python | false | false | 619 | py | import tensorflow as tf
import numpy as np
vocab_size = 5
embed_size = 3
with tf.Graph().as_default(), tf.Session() as sess:
# unk = tf.get_variable("unk", shape=[1, embed_size],
# dtype=tf.float32, initializer=tf.ones_initializer())
# embed = [unk]
# embed.append(tf.convert_to_tensor(np.zeros((vocab_size, embed_size)), dtype=tf.float32))
# embed = tf.concat(embed, axis=0, name='concat_embed')
embed = tf.get_variable('embed', initializer=np.ones((vocab_size, embed_size)))
val = tf.trainable_variables()
sess.run(tf.global_variables_initializer())
val_np = sess.run(val)
print(val_np) | [
"lzh00776@163.com"
] | lzh00776@163.com |
2cd05f45755b2cbdc09fb9ab6925cbcc9782dfc8 | 6c7355ae404490d0ff26c4ec925384242b7e9067 | /django introduction/demo/migrations/0004_auto_20200820_1622.py | 711cb98804700c2496efc2395cdbf97aa81b3e14 | [] | no_license | mahidulmoon/udemy-fullstack-practice | 7de8946a97224e554b97c490d18e71b0dc969e08 | e0f9ddd2b4dd3fa5ad486d92b7c9bdac8c77f05f | refs/heads/master | 2022-12-12T01:12:56.188124 | 2020-08-30T17:04:51 | 2020-08-30T17:04:51 | 288,985,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | # Generated by Django 3.0.6 on 2020-08-20 19:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('demo', '0003_auto_20200820_1147'),
]
operations = [
migrations.CreateModel(
name='BookNumber',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isbn_10', models.CharField(blank=True, max_length=10)),
('isbn_13', models.CharField(blank=True, max_length=10)),
],
),
migrations.AddField(
model_name='book',
name='number',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='demo.BookNumber'),
),
]
| [
"mahidulmoon@gmail.com"
] | mahidulmoon@gmail.com |
7bb0b5eb5e3c901c314723c3cb2459ec0ae664d1 | 66b3d81d66491bf6c488f19896661eb7d99a0535 | /src/sample.py | 86ab8a34897a0d19f96b760eba326834ca5fe11e | [
"LicenseRef-scancode-other-permissive",
"MIT-Modern-Variant",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | quminzi/harfbuzz | fa5597ce6619d1ca86c0c86b9099e558872e8b98 | 2cd5323531dcd800549b2cb1cb51d708e72ab2d8 | refs/heads/master | 2020-04-06T03:44:34.844047 | 2015-01-07T03:16:38 | 2015-01-07T03:16:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from gi.repository import HarfBuzz as hb
from gi.repository import GLib
# Python 2/3 compatibility
try:
unicode
except NameError:
unicode = str
def tounicode(s, encoding='utf-8'):
if not isinstance(s, unicode):
return s.decode(encoding)
else:
return s
fontdata = open (sys.argv[1], 'rb').read ()
blob = hb.glib_blob_create (GLib.Bytes.new (fontdata))
face = hb.face_create (blob, 0)
del blob
font = hb.font_create (face)
upem = hb.face_get_upem (face)
del face
hb.font_set_scale (font, upem, upem)
#hb.ft_font_set_funcs (font)
hb.ot_font_set_funcs (font)
buf = hb.buffer_create ()
hb.buffer_add_utf8 (buf, tounicode("Hello بهداد").encode('utf-8'), 0, -1)
hb.buffer_guess_segment_properties (buf)
hb.shape (font, buf, [])
del font
infos = hb.buffer_get_glyph_infos (buf)
positions = hb.buffer_get_glyph_positions (buf)
for info,pos in zip(infos, positions):
gid = info.codepoint
cluster = info.cluster
advance = pos.x_advance
print(gid, cluster, advance)
| [
"behdad@behdad.org"
] | behdad@behdad.org |
f7e85d419b45b467ec0f9ecb8344fa2b19b9e103 | 62922a76e40003f3d3a7d02282853f9a2b76c6fc | /NLP/nltk1.py | 75d047d7cba31598635ee4accf293af060a56548 | [] | no_license | cchangcs/ai_learning_record | a7d0d9c7fcdc1e97d8869aa7e63b535f8cf62df2 | 235a90ff5fe0205334376a927d462b8ae64e4e70 | refs/heads/master | 2020-04-01T16:59:31.203223 | 2018-11-21T11:12:34 | 2018-11-21T11:12:34 | 153,408,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | import nltk
import jieba
raw = open('liangjian.TXT', 'rb').read()
# jieba.lcut()与jieba.cut()的区别在于:
# jieba.cut()返回一个可迭代的generator,可以使用for循环获得粉刺后得到的每一个词语
# jieba.lcut()直接返回list
text = nltk.text.Text(jieba.lcut(raw))
# 显示出现的上下文
print(text.concordance(u'驻岛'))
# 对同义词的使用习惯,显示words出现的相同模式
print(text.common_contexts(['小日本', '鬼子']))
# 显示最常用的二次搭配
print(text.collocations())
# 查看关心的词在文中出现的位置
text.dispersion_plot(['李云龙', '秀芹'])
| [
"752340690@qq.com"
] | 752340690@qq.com |
d16c1374c115f76767b9316071a5126edd9b63bb | bad85cd8d547a071baf4b6590f7e81d13ef1ec0d | /assistant/weblink_channel/migrations/0019_auto_20200819_0907.py | c43e45d76cfb514f560bbdcbba6e6be45084f979 | [
"MIT"
] | permissive | kapiak/ware_prod | 92e11671059642e14219d5aa8334e0564403db77 | ae61256890834c434d2e38cc2ccacf00b638665a | refs/heads/master | 2023-01-06T04:36:43.173093 | 2020-09-21T04:06:51 | 2020-09-21T04:06:51 | 310,320,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,794 | py | # Generated by Django 3.1 on 2020-08-19 09:07
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('orders', '0003_auto_20200813_0326'),
('products', '0004_auto_20200813_0203'),
('weblink_channel', '0018_auto_20200819_0648'),
]
operations = [
migrations.AlterField(
model_name='purchaseorder',
name='sales_order',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='weblink_purchase_orders', to='weblink_channel.weblinkorder'),
),
migrations.AlterField(
model_name='purchaseorder',
name='supplier',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='weblink_purchase_orders', to='products.supplier'),
),
migrations.AlterField(
model_name='purchaseorderitem',
name='customer_order_item',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='weblink_purchase_order_item', to='orders.lineitem'),
),
migrations.AlterField(
model_name='purchaseorderitem',
name='purchase_order',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='weblink_items', to='weblink_channel.purchaseorder'),
),
migrations.AlterField(
model_name='purchaseorderitem',
name='sales_order_item',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='web_purchase_order_item', to='weblink_channel.weblinkorderitem'),
),
]
| [
"hamadeh.basel@gmail.com"
] | hamadeh.basel@gmail.com |
a7802d009966a5c9860b3f8685852ec97a59fc37 | 3efa9b57670d318b006d5eec837595683a7cb751 | /run_tests.py | 3601df35ca4471c85f461ad30e540d264eb03275 | [] | no_license | Erotemic/supersetup | c76d56eeb1e0a7322604510f8bb27c8faff6d593 | ab2b75be470b6db524acd74fc6e8235f1ab6f522 | refs/heads/main | 2023-08-28T00:27:25.517794 | 2021-11-05T20:24:16 | 2021-11-05T20:24:16 | 376,892,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
if __name__ == '__main__':
import pytest
import sys
package_name = 'supersetup'
pytest_args = [
'--cov-config', '.coveragerc',
'--cov-report', 'html',
'--cov-report', 'term',
'--xdoctest',
'--cov=' + package_name,
]
pytest_args = pytest_args + sys.argv[1:]
sys.exit(pytest.main(pytest_args))
| [
"erotemic@gmail.com"
] | erotemic@gmail.com |
1950318d50fddf94d54003132392554f9c5b0dac | 75e24fc71cf0833bb6040fa5037a0523c67d4581 | /nlplingo/tasks/sequence/bpjson.py | 24262871b860357be3a865693a02f463871edd05 | [
"Apache-2.0"
] | permissive | BBN-E/nlplingo | 53d5ff2aa17d03a1c6db8afc8ed2b0cf683b1c55 | 32ff17b1320937faa3d3ebe727032f4b3e7a353d | refs/heads/main | 2022-12-19T19:28:11.666850 | 2020-10-09T01:16:32 | 2020-10-09T01:16:32 | 302,090,268 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,582 | py |
def document_prediction_to_bp_json(documents, corpus_id):
"""
:type document: list[DocumentPrediction]
"""
bp = dict()
bp['corpus-id'] = corpus_id
bp['format-type'] = 'bp-corpus'
bp['format-version'] = 'v8f'
bp['entries'] = dict()
for document in documents:
for sentence in document.sentences.values():
entry_id = sentence.id
bp['entries'][entry_id] = dict()
bp['entries'][entry_id]['doc-id'] = entry_id.split('_')[0]
bp['entries'][entry_id]['sent-id'] = entry_id.split('_')[1]
bp['entries'][entry_id]['entry-id'] = entry_id
bp['entries'][entry_id]['segment-type'] = 'sentence'
bp['entries'][entry_id]['segment-text'] = sentence.text
bp['entries'][entry_id]['annotation-sets'] = dict()
bp['entries'][entry_id]['annotation-sets']['abstract-events'] = dict()
bp['entries'][entry_id]['annotation-sets']['abstract-events']['events'] = dict()
bp['entries'][entry_id]['annotation-sets']['abstract-events']['span-sets'] = dict()
spans = set()
# first collect all the spans from triggers and arguments
for event in sentence.events.values():
spans.add(event.trigger.text)
spans.update(argument.text for argument in event.arguments.values())
span_to_id = dict()
for i, span in enumerate(sorted(spans)):
span_to_id[span] = 'ss-{}'.format(str(i+1))
for span in span_to_id:
span_id = span_to_id[span]
bp['entries'][entry_id]['annotation-sets']['abstract-events']['span-sets'][span_id] = dict()
bp['entries'][entry_id]['annotation-sets']['abstract-events']['span-sets'][span_id]['ssid'] = span_id
bp['entries'][entry_id]['annotation-sets']['abstract-events']['span-sets'][span_id]['spans'] = []
span_d = dict()
span_d['hstring'] = span
span_d['string'] = span
bp['entries'][entry_id]['annotation-sets']['abstract-events']['span-sets'][span_id]['spans'].append(span_d)
for i, event in enumerate(sentence.events.values()):
event_d = dict()
event_id = 'event{}'.format(str(i+1))
assert event.trigger.text in span_to_id
trigger_id = span_to_id[event.trigger.text]
event_d['anchors'] = trigger_id
event_d['eventid'] = event_id
assert len(event.trigger.labels) == 1
event_types = list(event.trigger.labels.keys())[0].split('.')
assert len(event_types) == 2
event_d['helpful-harmful'] = event_types[0]
event_d['material-verbal'] = event_types[1]
event_d['agents'] = []
event_d['patients'] = []
for argument in event.arguments.values():
assert argument.text in span_to_id
argument_id = span_to_id[argument.text]
assert len(argument.labels) == 1
argument_role = list(argument.labels.keys())[0].lower()
if argument_role == 'agent':
event_d['agents'].append(argument_id)
elif argument_role == 'patient':
event_d['patients'].append(argument_id)
bp['entries'][entry_id]['annotation-sets']['abstract-events']['events'][event_id] = event_d
return bp
| [
"hqiu@bbn.com"
] | hqiu@bbn.com |
69c168177c43574cbd853f4f8cc6a2858db2c7c0 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/wordy/2cb74d70ed0f4ca582f53ed59ded5843.py | 780680aef39b375639045d1d0ccfbfb865385441 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 1,867 | py | def calculate(s):
# Assume we have the 'What is..' opening
s = s.strip('What is ')
# Rather than tokenizing the 'by' following multiplied
# or divided, let's just remove it. The phrase won't
# lose any cdetail for computation.
for each in [' by',' to the',' power','th','nd','st','rd']:
s = s.replace(each,"")
# Divide our current phrase into four parts
items = s.split(' ',3)
if len(items) == 3 and items[2][-1] == '?':
items[2] = items[2].strip('?')
items.append('?')
# Check for errors in input
if not items[0].strip('-').isdigit() or not items[2].strip('-').isdigit():
raise ValueError("Incorrect sequence of items for wordy calculate.")
elif items[1] not in definitions():
raise ValueError("Operation not found in definitions.")
# Perform the first operation
ans = definitions()[items[1]](int(items[0]),int(items[2]))
# Subsequent operations will operate on the above answer
s = items[3]
items = s.split(" ",2)
# Continue operating until the end
while '?' not in items[0]:
if '?' in items[1]:
items[1] = items[1].strip('?')
items.append('?')
if not items[1].strip('-').isdigit():
raise ValueError("Incorrect sequence of items for wordy calculate.")
elif items[0] not in definitions():
raise ValueError("Operation not found in definitions.")
ans = definitions()[items[0]](ans,int(items[1]))
s = items[2]
items = s.split(" ",2)
return ans
def definitions():
return {'plus': lambda x,y:x+y,
'minus': lambda x,y:x-y,
'multiplied': lambda x,y:x*y,
'divided': lambda x,y:x/y,
'raised': lambda x,y:x**y
}
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
b017ebc4063d176c60cd9d5d47c2fd34455ecfe4 | 2e996d6870424205bc6af7dabe8685be9b7f1e56 | /code/processing/20181219_r3_O1O3_IND_titration_flow/processing.py | 4c382b4d6b125f48bd4b3786e5fec98e64c5c186 | [
"CC-BY-4.0",
"MIT"
] | permissive | minghao2016/mwc_mutants | fd705d44e57e3b2370d15467f31af0ee3945dcc2 | 0f89b3920c6f7a8956f48874615fd1977891e33c | refs/heads/master | 2023-03-25T03:56:33.199379 | 2020-06-26T20:09:00 | 2020-06-26T20:09:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | import numpy as np
import pandas as pd
import glob
import imp
import sys
sys.path.insert(0, '../../../')
import mut.flow
# Define the experiment parameters
DATE = 20181219
RUN_NO = 3
USERNAME = 'gchure'
CLASS = 'IND'
gating_fraction = 0.4
# Load all files.
files = glob.glob('../../../data/flow/csv/{0}*_r{1}*.csv'.format(DATE, RUN_NO))
# Set up the DataFrame
colnames = ['date', 'username', 'mutant', 'operator', 'strain', 'IPTGuM',
'mean_FITC_H']
df = pd.DataFrame([], columns=colnames)
for f in files:
# Get the identifying finformation.
date, _, operator, strain, mutant, conc = f.split('/')[-1].split('_')
conc = float(conc.split('uM')[0])
rep = int(strain.split('R')[-1])
# Load in the data
data = pd.read_csv(f)
gated = mut.flow.gaussian_gate(data, gating_fraction)
# Compute the mean
mean_FITC = gated['FITC-H'].mean()
# Assemble the dictionary
samp_dict = dict(date=date, username=USERNAME, mutant=mutant,
operator=operator, strain=strain, IPTGuM=conc,
mean_FITC_H=mean_FITC, repressors=rep)
df = df.append(samp_dict, ignore_index=True)
fc_dfs = []
grouped = df[df['mutant']!='auto'].groupby(['IPTGuM', 'operator'])
mean_auto_df = df[df['mutant'] == 'auto']
for g, d in grouped:
mean_auto = mean_auto_df[mean_auto_df['IPTGuM']==g[0]]['mean_FITC_H'].values[0]
mean_delta = d.loc[d['mutant'] == 'delta']['mean_FITC_H'].values[0]
d['fold_change'] = (d['mean_FITC_H'] - mean_auto) / (mean_delta - mean_auto)
fc_dfs.append(d)
fold_change_df = pd.concat(fc_dfs, axis=0)
# Save to a CSV.
fold_change_df.to_csv(
'output/{0}_r{1}_{2}_fold_change.csv'.format(DATE, RUN_NO, CLASS))
# Add the comments and save to the data/csv file.
target = '../../../data/csv/{0}_r{1}_{2}_fold_change.csv'.format(DATE, RUN_NO,
mutant)
with open('comments.txt', 'r') as f:
comments = f.read().splitlines()
with open(target, 'a') as f:
for line in comments:
f.write(line)
fold_change_df.to_csv(f, mode='a', index=False)
| [
"gchure@caltech.edu"
] | gchure@caltech.edu |
2f719719bb503926708f3f1e7b6dc163f7417df6 | 8ea2acd4b2b15f5edd4608dfc20cb6fed49995cd | /docs/sphinx/rest_substitutions/snippets/python/converted/wx.DataObjectComposite.1.py | 288e6c8a0d5e5757a5ff188f805efc25fcd8ecea | [] | no_license | timechild/Phoenix | cbace6e93f69eaa5f998ff7861dc8b763fe7eef7 | 2c2d44f3750d01692a99f96f65d8d70f39174528 | refs/heads/master | 2021-06-27T04:26:52.037016 | 2017-09-15T04:48:42 | 2017-09-15T04:48:42 | 103,706,741 | 1 | 0 | null | 2017-09-15T22:31:21 | 2017-09-15T22:31:21 | null | UTF-8 | Python | false | false | 1,068 | py |
def MyDropTarget(self):
dataobj = wx.DataObjectComposite()
dataobj.Add(wx.BitmapDataObject(), True)
dataobj.Add(wx.FileDataObject())
self.SetDataObject(dataobj)
def OnData(self, x, y, defaultDragResult):
dragResult = wx.DropTarget.OnData(x, y, defaultDragResult)
if dragResult == defaultDragResult:
dataobjComp = self.GetDataObject()
format = dataObjects.GetReceivedFormat()
dataobj = dataobjComp.GetObject(format)
if format.GetType() == wx.DF_BITMAP:
dataobjBitmap = dataobj
# ... use dataobj.GetBitmap() ...
elif format.GetType() == wx.DF_FILENAME:
dataobjFile = dataobj
# ... use dataobj.GetFilenames() ...
else:
raise Exception("unexpected data object format")
return dragResult
| [
"robin@alldunn.com"
] | robin@alldunn.com |
63a26d8b55aaeaa632f4571a62a47a7c52bc529d | 61b87099017a2456c5c7b733a1c6559b988e4ebe | /user_portrait/cron/flow5/check_process.py | d17e151fb5e3e86b84d0aff6ae9cc34a1b0b0a2e | [] | no_license | zhuty16/user_portrait | 4fc4cc08e550864ebcfa3eef200127a94f07043b | a5e8ea3d28316e0d8822f92951462aad5c3d2355 | refs/heads/master | 2021-05-29T07:05:58.520759 | 2015-08-08T13:07:50 | 2015-08-08T13:07:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | # -*- coding:utf-8 -*-
import subprocess
import sys
import os
import time
def check(p_name):
cmd = 'ps -ef|grep %s|grep -v "grep"' % p_name
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
if p.wait() == 0:
val = p.stdout.read()
print val
if p_name in val:
print "ok - %s python process is running" % p_name
else:
print "no process is running!"
os.system("python ./%s &" % p_name)
def check_redis(p_name):
cmd = 'ps -ef|grep %s|grep -v "grep"' % p_name
restart_cmd = 'cd /home/ubuntu3/huxiaoqian/redis-2.8.13 && src/redis-server redis.conf'
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
(stdoutput, erroutput) = p.communicate()
val = stdoutput
if p_name in val:
print "ok - %s process is running" % p_name
else:
os.system(restart_cmd)
def check_elasticsearch(p_name):
cmd = 'ps -ef|grep %s|grep -v "grep"' % p_name
restart_cmd = 'cd /home/ubuntu3/yuankun/elasticsearch-1.6.0 && bin/elasticsearch -Xmx15g -Xms15g -Des.max-open-files=true -d'
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
(stdoutput, erroutput) = p.communicate()
if p_name in stdoutput:
print "%s ok - %s process is running" % (time.ctime(), p_name)
else:
os.system(restart_cmd)
if __name__ == '__main__':
# test procedure running
d_name = ['zmq_work_weibo_flow5.py']
for item in d_name:
check(item)
'''
# test redis running
check_redis("redis")
# test elasticsearch running
check_elasticsearch("elasticsearch")
sys.exit(0)
'''
| [
"1257819385@qq.com"
] | 1257819385@qq.com |
fe3840957fde644b7fc61ad6d81141e5c485844f | 0c9e35012baf61ee678bc719588b8cb2ccbe449e | /product/migrations/0078_locationstudio.py | c35d80b4b58d4926bd6221b180331d428b08a63c | [] | no_license | rickyakilimali/approeco | 6f0f62d57b6e5361b5c5dd473038f2999bac1413 | fd96ca6d70dabf20668d2a582c67e5d409a4a097 | refs/heads/master | 2018-09-21T12:44:27.414394 | 2018-06-06T16:35:40 | 2018-06-06T16:35:40 | 113,836,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,185 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-31 12:51
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('category', '0002_auto_20180118_1155'),
('product', '0077_communique'),
]
operations = [
migrations.CreateModel(
name='LocationStudio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(max_length=250)),
('is_active', models.BooleanField()),
('duree_location', models.CharField(choices=[('30 MIN', '30 MIN'), ('1H', '1H')], max_length=100, verbose_name='DUREE LOCATION')),
('prix', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='PRIX')),
('units', models.CharField(choices=[('USD', 'US$'), ('EUROS', 'EUROS'), ('USD/JOUR', 'USD/JOUR'), ('USD/PAGE', 'USD/PAGE'), ('USD/M2', 'USD/M2'), ('USD/MOIS', 'USD/MOIS'), ('USD/PIECE', 'USD/PIECE'), ('USD', 'USD'), ('USD/KG', 'USD/KG'), ('USD/PERSONNE', 'USD/PERSONNE'), ('USD/GARDIEN/MOIS', 'USD/GARDIEN/MOIS'), ('USD/LITRE', 'USD/LITRE'), ('USD/SPLIT', 'USD/SPLIT'), ('%', '%'), ('% DU SALAIRE', '% DU SALAIRE'), ('% DU 1ER SALAIRE', '% DU 1ER SALAIRE'), ('% DES FONDS TRANSPORTES', '% DES FONDS TRANSPORTES'), ('USD/PERSONNE/JOUR', 'USD/PERSONNE/JOUR'), ('USD/THEME', 'USD/THEME'), ('USD/KG', 'USD/KG'), ('USD/AN', 'USD/AN'), ('USD/HEURE', 'USD/HEURE'), ('USD/MODULE', 'USD/MODULE'), ('USD/KG OU L', 'USD/KG OU L'), ('% DU DEVIS', '% DU DEVIS')], max_length=50, verbose_name='UNITÉS')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='category.Category')),
('vendeur', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['prix'],
},
),
]
| [
"ricky.akilimali@approeco.net"
] | ricky.akilimali@approeco.net |
0c952a070f362f63b5390ebbb68b549f1f653ea8 | 29e08aa28f26e73358c6b8f2f309c216dcf4400b | /4/openstack-dashboard/openstack_dashboard/dashboards/admin/metering/tabs.py | 40bca2fb36973361822388acf18fa774bb0720fd | [] | no_license | TsinghuaCloud/TsinghuaCloud2.0-gui | fefe76318fc21ebf56f90f7fac81f4c273f1f6b6 | 4c91ccc048b846037ab281f8f62221f45e8edf43 | refs/heads/master | 2021-01-17T07:01:25.125829 | 2016-04-18T15:41:58 | 2016-04-18T15:41:58 | 39,073,412 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,154 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.api import ceilometer
class GlobalStatsTab(tabs.Tab):
name = _("Stats")
slug = "stats"
template_name = ("admin/metering/stats.html")
preload = False
@staticmethod
def _get_flavor_names(request):
try:
flavors = api.nova.flavor_list(request, None)
return [f.name for f in flavors]
except Exception:
return ['m1.tiny', 'm1.small', 'm1.medium',
'm1.large', 'm1.xlarge']
def get_context_data(self, request):
query = [{"field": "metadata.OS-EXT-AZ:availability_zone",
"op": "eq",
"value": "nova"}]
try:
instances = ceilometer.resource_list(request, query,
ceilometer_usage_object=None)
meters = ceilometer.meter_list(request)
except Exception:
instances = []
meters = []
exceptions.handle(request,
_('Unable to retrieve Nova Ceilometer '
'metering information.'))
instance_ids = set([i.resource_id for i in instances])
instance_meters = set([m.name for m in meters
if m.resource_id in instance_ids])
meter_titles = {"instance": _("Duration of instance"),
"memory": _("Volume of RAM in MB"),
"cpu": _("CPU time used"),
"cpu_util": _("Average CPU utilisation"),
"vcpus": _("Number of VCPUs"),
"disk.read.requests": _("Number of read requests"),
"disk.write.requests": _("Number of write requests"),
"disk.read.bytes": _("Volume of reads in B"),
"disk.write.bytes": _("Volume of writes in B"),
"disk.root.size": _("Size of root disk in GB"),
"disk.ephemeral.size": _("Size of ephemeral disk "
"in GB"),
"network.incoming.bytes": _("Number of incoming bytes "
"on the network for a VM interface"),
"network.outgoing.bytes": _("Number of outgoing bytes "
"on the network for a VM interface"),
"network.incoming.packets": _("Number of incoming "
"packets for a VM interface"),
"network.outgoing.packets": _("Number of outgoing "
"packets for a VM interface")}
for flavor in self._get_flavor_names(request):
name = 'instance:%s' % flavor
hint = (_('Duration of instance type %s (openstack flavor)') %
flavor)
meter_titles[name] = hint
class MetersWrap(object):
""" A quick wrapper for meter and associated titles. """
def __init__(self, meter, meter_titles):
self.name = meter
self.title = meter_titles.get(meter, "")
meters_objs = [MetersWrap(meter, meter_titles)
for meter in sorted(instance_meters)]
context = {'meters': meters_objs}
return context
class CeilometerOverviewTabs(tabs.TabGroup):
slug = "ceilometer_overview"
tabs = (GlobalStatsTab,)
sticky = True
| [
"root@controller.(none)"
] | root@controller.(none) |
075416780ca0fd70d872b7a9401baae8344ab08a | 156d6c4638773e5af6027b10336f60cca9a23252 | /src/ros_main.py | 57eabd65f11ea48dbd012d4adbe9913f69c1d2f8 | [
"MIT"
] | permissive | lijh1024/ros-RandLA-Net | 6f5d9a5095b97466acccc1bffc1dfdb9371ce4b8 | 5f15a6687bdced16615f7215fc1aa9ffacbc0ad2 | refs/heads/master | 2023-03-17T07:41:08.353403 | 2020-08-24T02:47:56 | 2020-08-24T02:47:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | #!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('ros_randla_net')
import sys
import rospy
from ros_node import InferenceNode
def main(args):
rospy.init_node('~', anonymous=True)
node = InferenceNode()
try:
rospy.spin()
except KeyboardInterrupt:
rospy.loginfo("Shutting down")
if __name__ == '__main__':
main(sys.argv)
| [
"xxdsox@gmail.com"
] | xxdsox@gmail.com |
feba3d89953b0b88ad4f1147481b481a8e68b566 | dc9865587582f65e5be010a8a831972845c8dbb4 | /django_mri/utils/compression.py | 50991175ab3b0cce9db71e8c9035155fcb07b45f | [
"Apache-2.0"
] | permissive | TheLabbingProject/django_mri | d31bd15613c82a55e2a42eba1fa3617e952e8b72 | 5b5ca1b119144d01e526825d2b2a2b87541b4d4a | refs/heads/master | 2023-04-07T03:22:09.366540 | 2023-03-29T08:18:01 | 2023-03-29T08:18:01 | 205,411,473 | 7 | 2 | Apache-2.0 | 2023-02-17T08:22:53 | 2019-08-30T15:42:45 | Python | UTF-8 | Python | false | false | 2,131 | py | """
Definition of the :func:`~django_mri.utils.compression.compress` and
:func:`~django_mri.utils.compression.uncompress` utility functions.
"""
import gzip
import shutil
from pathlib import Path
def uncompress(
source: Path, destination: Path = None, keep_source: bool = True
) -> Path:
"""
Uncompresses the provided (compressed) *source* file.
Parameters
----------
source : Path
File to uncompress
destination : Path, optional
Uncompressed output file path, by default None
keep_source : bool, optional
Whether to keep the source file or not, by default True
Returns
-------
Path
Output file path
"""
destination = destination or source.with_suffix("")
try:
with gzip.open(source, "rb") as compressed_data:
with open(destination, "wb") as uncompressed_data:
shutil.copyfileobj(compressed_data, uncompressed_data)
except FileNotFoundError:
if destination.exists():
return destination
else:
raise
else:
if not keep_source:
source.unlink()
return destination
def compress(
source: Path, destination: Path = None, keep_source: bool = True
) -> Path:
"""
Compresses the provided *source* file.
Parameters
----------
source : Path
File to compress
destination : Path, optional
Compressed output file path, by default None
keep_source : bool, optional
Whether to keep the source file or not, by default True
Returns
-------
Path
Output file path
"""
destination = destination or source.with_suffix(source.suffix + ".gz")
try:
with open(source, "rb") as uncompressed_data:
with gzip.open(destination, "wb") as compressed_file:
shutil.copyfileobj(uncompressed_data, compressed_file)
except FileNotFoundError:
if destination.exists():
return destination
else:
raise
else:
if not keep_source:
source.unlink()
return destination
| [
"z.baratz@gmail.com"
] | z.baratz@gmail.com |
c6b6ef9cc4eb38a80c21d9622919755f9d0305b4 | 5e382a50c521e4cd874ed4e94799e5ef062994a1 | /services/web/server/src/simcore_service_webserver/rest.py | a9b29a0b75d27657ed565de0e4f3a730bc284f99 | [
"MIT"
] | permissive | KZzizzle/osparc-simcore | 71103bcfb81d6ea90e0ac9529e8f08568685166c | 981bc8d193f3f5d507e3225f857e0308c339e163 | refs/heads/master | 2021-05-25T08:46:52.704734 | 2020-10-07T14:07:34 | 2020-10-07T14:07:34 | 253,747,491 | 0 | 0 | MIT | 2020-04-07T09:29:23 | 2020-04-07T09:29:22 | null | UTF-8 | Python | false | false | 3,270 | py | """ Restful API
- Loads and validates openapi specifications (oas)
- Adds check and diagnostic routes
- Activates middlewares
"""
import logging
from pathlib import Path
from typing import Optional
import openapi_core
import yaml
from aiohttp import web
from aiohttp_swagger import setup_swagger
from openapi_core.schema.specs.models import Spec as OpenApiSpecs
from servicelib import openapi
from servicelib.application_setup import ModuleCategory, app_module_setup
from servicelib.rest_middlewares import (
envelope_middleware_factory,
error_middleware_factory,
)
from simcore_service_webserver.resources import resources
from . import rest_routes
from .__version__ import api_version_prefix
from .rest_config import APP_CONFIG_KEY, APP_OPENAPI_SPECS_KEY, get_rest_config
log = logging.getLogger(__name__)
def get_openapi_specs_path(api_version_dir: Optional[str] = None) -> Path:
if api_version_dir is None:
api_version_dir = api_version_prefix
return resources.get_path(f"api/{api_version_dir}/openapi.yaml")
def load_openapi_specs(spec_path: Optional[Path] = None) -> OpenApiSpecs:
if spec_path is None:
spec_path = get_openapi_specs_path()
with spec_path.open() as fh:
spec_dict = yaml.safe_load(fh)
specs: OpenApiSpecs = openapi_core.create_spec(spec_dict, spec_path.as_uri())
return specs
@app_module_setup(
__name__,
ModuleCategory.ADDON,
depends=["simcore_service_webserver.security"],
logger=log,
)
def setup(app: web.Application, *, swagger_doc_enabled: bool = True):
cfg = get_rest_config(app)
api_version_dir = cfg["version"]
spec_path = get_openapi_specs_path(api_version_dir)
# validated openapi specs
app[APP_OPENAPI_SPECS_KEY] = specs = load_openapi_specs(spec_path)
# version check
base_path = openapi.get_base_path(specs)
major, *_ = specs.info.version
if f"/v{major}" != base_path:
raise ValueError(
f"REST API basepath {base_path} does not fit openapi.yml version {specs.info.version}"
)
if api_version_prefix != f"v{major}":
raise ValueError(
f"__version__.api_version_prefix {api_version_prefix} does not fit openapi.yml version {specs.info.version}"
)
# diagnostics routes
routes = rest_routes.create(specs)
app.router.add_routes(routes)
# middlewares
# NOTE: using safe get here since some tests use incomplete configs
is_diagnostics_enabled = (
app[APP_CONFIG_KEY].get("diagnostics", {}).get("enabled", {})
)
app.middlewares.extend(
[
error_middleware_factory(
api_version_prefix, log_exceptions=not is_diagnostics_enabled,
),
envelope_middleware_factory(api_version_prefix),
]
)
#
# rest API doc at /dev/doc (optional, e.g. for testing since it can be heavy)
#
# NOTE: avoid /api/* since traeffik uses for it's own API
#
log.debug("OAS loaded from %s ", spec_path)
if swagger_doc_enabled:
setup_swagger(
app,
swagger_url="/dev/doc",
swagger_from_file=str(spec_path),
ui_version=3,
)
# alias
setup_rest = setup
__all__ = "setup_rest"
| [
"noreply@github.com"
] | KZzizzle.noreply@github.com |
c45b64bfb7dc8d638eb42afda514c9af33168a82 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/4549962f8535c6892637c74c8c7dd3f8953a1678-<compare_rules>-bug.py | 064c8abad58ff66bf4f274985b7d40b35b3c1717 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | def compare_rules(self):
'\n\n :return:\n '
rules_to_modify = []
rules_to_delete = []
rules_to_add = deepcopy(self.rules)
for current_rule in self.current_rules:
current_rule_passed_to_module = False
for new_rule in self.rules[:]:
if (current_rule['Priority'] == new_rule['Priority']):
current_rule_passed_to_module = True
rules_to_add.remove(new_rule)
modified_rule = self._compare_rule(current_rule, new_rule)
if modified_rule:
modified_rule['Priority'] = int(current_rule['Priority'])
modified_rule['RuleArn'] = current_rule['RuleArn']
modified_rule['Actions'] = new_rule['Actions']
modified_rule['Conditions'] = new_rule['Conditions']
rules_to_modify.append(modified_rule)
break
if ((not current_rule_passed_to_module) and (not current_rule['IsDefault'])):
rules_to_delete.append(current_rule['RuleArn'])
return (rules_to_add, rules_to_modify, rules_to_delete) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
968b4a3eb22447b3acbffbd207b2a8cff46648d8 | c7b4baa2779a0fc02e363f07c88b4d1d8cc33ffe | /gahtc/website/migrations/0034_merge.py | a748bb606b00e44de2215f0d021cbbdb0548dd72 | [] | no_license | NiJeLorg/GAHTC | 6d5c8b2d4b9244c8874ad60c16cd7d55a3535075 | 8ba3360f6e2a8ad0b937a60c3c022eaac4a7cd46 | refs/heads/master | 2022-12-08T19:26:05.800635 | 2018-06-07T02:31:43 | 2018-06-07T02:31:43 | 41,111,268 | 2 | 0 | null | 2022-11-22T01:43:36 | 2015-08-20T18:07:02 | HTML | UTF-8 | Python | false | false | 295 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0032_auto_20161108_0210'),
('website', '0033_profile_verified'),
]
operations = [
]
| [
"jd@nijel.org"
] | jd@nijel.org |
5a4893383d5c402e74ad89801720feac3f460235 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp-obt/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=60/params.py | cb30c863dd706689a5cbcad57cb8e6da369c00fe | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.636000',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 60,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
f53273e4fb7d5bfa568073ebd17ceb3d4f151c7b | 9b4e80a2dc6a660a33e3599515100a172038f922 | /python/HI/dijet_analysis/pp/Pythia6_Dijet250_pp_TuneZ2_5020GeV_cff.py | e1c952c1ed897b630e420d7910bef5bc8c0f8c8f | [] | no_license | Jelov/genproductions | 9013901ebcc58e6cfd13c69a52692dfc1994e280 | c65eab700fd6026ebec068d4b90366cc1387e51b | refs/heads/master | 2021-01-21T09:29:16.399045 | 2015-12-07T16:19:22 | 2015-12-07T16:19:22 | 48,558,617 | 0 | 0 | null | 2015-12-24T22:45:32 | 2015-12-24T22:45:32 | null | UTF-8 | Python | false | false | 1,701 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pyquen2015Settings_cff import *
generator = cms.EDFilter("PyquenGeneratorFilter",
collisionParameters5020GeV,
qgpParameters,
pyquenParameters,
doQuench = cms.bool(False),
bFixed = cms.double(0.0), ## fixed impact param (fm); valid only if cflag_=0
PythiaParameters = cms.PSet(pyquenPythiaDefaultBlock,
parameterSets = cms.vstring('pythiaUESettings',
'ppJets',
'kinematics'),
kinematics = cms.vstring ("CKIN(3)=250", #min pthat
"CKIN(4)=9999" #max pthat
)
),
cFlag = cms.int32(0), ## centrality flag
bMin = cms.double(0.0), ## min impact param (fm); valid only if cflag_!=0
bMax = cms.double(0.0) ## max impact param (fm); valid only if cflag_!=0
)
generator.doIsospin = cms.bool(False)
configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('PYTHIA (unquenched) dijets in NN (pt-hat > 250 GeV) at sqrt(s) = 2.76TeV')
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"yetkin.yilmaz@cern.ch"
] | yetkin.yilmaz@cern.ch |
39f57f94034ec65afb9a31d785b493155269c325 | 70fa6468c768d4ec9b4b14fc94fa785da557f1b5 | /lib/googlecloudsdk/core/resource/yaml_printer.py | 541fac881b57f5f76872b5fb01b72c1a324f928a | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | kylewuolle/google-cloud-sdk | d43286ef646aec053ecd7eb58566ab2075e04e76 | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | refs/heads/master | 2020-04-20T22:10:41.774132 | 2019-01-26T09:29:26 | 2019-01-26T09:29:26 | 169,131,028 | 0 | 0 | NOASSERTION | 2019-02-04T19:04:40 | 2019-02-04T18:58:36 | Python | UTF-8 | Python | false | false | 5,210 | py | # -*- coding: utf-8 -*- #
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YAML format printer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
from googlecloudsdk.core.resource import resource_printer_base
from googlecloudsdk.core.resource import resource_transform
from googlecloudsdk.core.yaml import dict_like
from googlecloudsdk.core.yaml import list_like
import six
from six.moves import range # pylint: disable=redefined-builtin
class YamlPrinter(resource_printer_base.ResourcePrinter):
"""Prints the YAML representations of JSON-serializable objects.
[YAML](http://www.yaml.org), YAML ain't markup language.
Printer attributes:
null=string: Display string instead of `null` for null/None values.
no-undefined: Does not display resource data items with null values.
For example:
printer = YamlPrinter(log.out)
printer.AddRecord({'a': ['hello', 'world'], 'b': {'x': 'bye'}})
produces:
---
a:
- hello
- world
b:
- x: bye
Attributes:
_yaml: Reference to the `yaml` module. Imported locally to improve startup
performance.
"""
def __init__(self, *args, **kwargs):
super(YamlPrinter, self).__init__(*args, retain_none_values=True, **kwargs)
# pylint:disable=g-import-not-at-top, Delay import for performance.
from ruamel import yaml
self._yaml = yaml
null = self.attributes.get('null')
def _FloatPresenter(unused_dumper, data):
return yaml.nodes.ScalarNode(
'tag:yaml.org,2002:float', resource_transform.TransformFloat(data))
def _LiteralLinesPresenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
def _NullPresenter(dumper, unused_data):
if null in ('null', None):
return dumper.represent_scalar('tag:yaml.org,2002:null', 'null')
return dumper.represent_scalar('tag:yaml.org,2002:str', null)
def _OrderedDictPresenter(dumper, data):
return dumper.represent_mapping('tag:yaml.org,2002:map', data.items())
def _UndefinedPresenter(dumper, data):
r = repr(data)
if r == '[]':
return dumper.represent_list([])
if r == '{}':
return dumper.represent_dict({})
dumper.represent_undefined(data)
self._yaml.add_representer(float,
_FloatPresenter,
Dumper=yaml.dumper.SafeDumper)
self._yaml.add_representer(YamlPrinter._LiteralLines,
_LiteralLinesPresenter,
Dumper=yaml.dumper.SafeDumper)
self._yaml.add_representer(None,
_UndefinedPresenter,
Dumper=yaml.dumper.SafeDumper)
self._yaml.add_representer(type(None),
_NullPresenter,
Dumper=yaml.dumper.SafeDumper)
self._yaml.add_representer(collections.OrderedDict,
_OrderedDictPresenter,
Dumper=yaml.dumper.SafeDumper)
class _LiteralLines(six.text_type):
"""A yaml representer hook for literal strings containing newlines."""
def _UpdateTypesForOutput(self, val):
"""Dig through a dict of list of primitives to help yaml output.
Args:
val: A dict, list, or primitive object.
Returns:
An updated version of val.
"""
if isinstance(val, six.string_types) and '\n' in val:
return YamlPrinter._LiteralLines(val)
if list_like(val):
for i in range(len(val)):
val[i] = self._UpdateTypesForOutput(val[i])
return val
if dict_like(val):
for key in val:
val[key] = self._UpdateTypesForOutput(val[key])
return val
return val
def _AddRecord(self, record, delimit=True):
"""Immediately prints the given record as YAML.
Args:
record: A YAML-serializable Python object.
delimit: Prints resource delimiters if True.
"""
record = self._UpdateTypesForOutput(record)
self._yaml.safe_dump(
record,
stream=self._out,
default_flow_style=False,
indent=resource_printer_base.STRUCTURED_INDENTATION,
explicit_start=delimit,
# By default, the yaml module uses encoding=None on Py3 and
# encoding=utf8 on Py2. This is probably so you can write it directly to
# stdout and have it work, but since we put everything through the log
# module that handles the encoding there, we want to maintain everything
# as unicode strings here.
encoding=None)
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
e8d1869e7a5f6cef61fa69fbf0439981ffd4f27f | 402ed5374ab189c8599b56864c5ce066f34b26c6 | /zfit/minimize.py | 9fff38606b3bd8f93797462dbabbb11864a9a636 | [
"BSD-3-Clause"
] | permissive | kailiu77/zfit | db354e9c3eb4a41274af5363834fe231823c6d66 | 8bddb0ed3a0d76fde0aa2cdbf74434b0ee0ae8bb | refs/heads/master | 2020-10-01T23:49:55.751825 | 2019-12-06T15:48:47 | 2019-12-06T15:48:47 | 227,650,723 | 1 | 0 | BSD-3-Clause | 2019-12-12T16:33:54 | 2019-12-12T16:33:53 | null | UTF-8 | Python | false | false | 533 | py | # Copyright (c) 2019 zfit
# from .minimizers.optimizers_tf import RMSPropMinimizer, GradientDescentMinimizer, AdagradMinimizer, AdadeltaMinimizer,
from .minimizers.optimizers_tf import Adam, WrapOptimizer
from .minimizers.minimizer_minuit import Minuit
from .minimizers.minimizers_scipy import Scipy
AdamMinimizer = Adam # legacy
MinuitMinimizer = Minuit # legacy
ScipyMinimizer = Scipy # legacy
__all__ = ['MinuitMinimizer', 'ScipyMinimizer', 'AdamMinimizer',
"WrapOptimizer",
"Adam", "Minuit", "Scipy"]
| [
"mayou36@jonas.eschle.com"
] | mayou36@jonas.eschle.com |
eca89db388397ccdddb20b4aa4430caec6456bc8 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_reran.py | 0bd435238cf900398ead11d36e4a0cb93b838549 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py |
from xai.brain.wordbase.nouns._rerun import _RERUN
#calss header
class _RERAN(_RERUN, ):
def __init__(self,):
_RERUN.__init__(self)
self.name = "RERAN"
self.specie = 'nouns'
self.basic = "rerun"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
6ebd1a7c1b5cc55a594b078e34b3f6bc74f6b175 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03283/s356634724.py | 66f46b19caea622c6741e3c3d780abc573ed2012 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | import sys
sys.setrecursionlimit(10010010)
def abc106_d():
n, m, q = map(int, input().split())
table = [[0]*(n+1) for _ in range(n+1)]
for _ in range(m):
l, r = map(int, input().split())
table[l][r] += 1
query = [tuple(map(int, input().split())) for _ in range(q)]
dp = [[-1]*(n+1) for _ in range(n+1)]
def calc(l, r):
nonlocal dp
if l == 0 or r == 0: return 0
if dp[l][r] != -1: return dp[l][r]
res = calc(l-1, r) + calc(l, r-1) - calc(l-1, r-1) + table[l][r]
dp[l][r] = res
return res
for p, q in query:
ans = calc(q, q) - calc(q, p-1) - calc(p-1, q) + calc(p-1, p-1)
print(ans)
if __name__ == '__main__':
abc106_d() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
cc317c0a962ac26af9736f63157cf024a9de03ae | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_091615.49+132833.1/sdB_sdssj_091615.49+132833.1_lc.py | 9d48a955080db7419fd8f8baacbfa6e2382d130a | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[139.064542,13.475861], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_sdssj_091615.49+132833.1/sdB_sdssj_091615.49+132833.1_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
c167ec7756b4bc4ef5d1f3840ebfad46f6b0f822 | 356b5455a4fb86c49c800a6181323b7fabef2b08 | /ppci/lang/ocaml/io.py | 5170f0ba3e9070d2451879fd2abe2a52d781cc15 | [
"BSD-2-Clause"
] | permissive | obround/ppci | be7d1ce7832513629ee1301e7b67c0ceda38d668 | ba0840bc5f4ffd889f882a814fb26f88cd854379 | refs/heads/master | 2023-02-11T13:47:35.439871 | 2021-01-05T22:33:08 | 2021-01-05T22:33:08 | 327,131,704 | 0 | 0 | BSD-2-Clause | 2021-01-05T22:08:23 | 2021-01-05T22:08:23 | null | UTF-8 | Python | false | false | 598 | py | """ OCaml i/o helpers.
"""
from ...format.io import BaseIoReader
class FileReader(BaseIoReader):
""" OCaml file reader helper with low level primitive read functions. """
def read_byte(self):
return self.read_bytes(1)[0]
def read_u8(self):
return self.read_fmt("B")
def read_s8(self):
return self.read_fmt("b")
def read_u16(self):
return self.read_fmt(">H")
def read_s16(self):
return self.read_fmt(">h")
def read_u32(self):
return self.read_fmt(">I")
def read_s32(self):
return self.read_fmt(">i")
| [
"windel@windel.nl"
] | windel@windel.nl |
c01ac9f16fe7691cc7f818e01188598024e8e91e | 1c5f4a13a5d67201b3a21c6e61392be2d9071f86 | /.VirtualEnv/Lib/site-packages/influxdb_client/domain/log_event.py | 0de871390b38b09d09f1940547d519a8f48899d8 | [] | no_license | ArmenFirman/FastAPI-InfluxDB | 19e3867c2ec5657a9428a05ca98818ca7fde5fd0 | b815509c89b5420f72abf514562e7f46dcd65436 | refs/heads/main | 2023-06-24T20:55:08.361089 | 2021-07-29T00:11:18 | 2021-07-29T00:11:18 | 390,462,832 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,328 | py | # coding: utf-8
"""
Influx OSS API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class LogEvent(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'time': 'datetime',
'message': 'str',
'run_id': 'str'
}
attribute_map = {
'time': 'time',
'message': 'message',
'run_id': 'runID'
}
def __init__(self, time=None, message=None, run_id=None): # noqa: E501,D401,D403
"""LogEvent - a model defined in OpenAPI.""" # noqa: E501
self._time = None
self._message = None
self._run_id = None
self.discriminator = None
if time is not None:
self.time = time
if message is not None:
self.message = message
if run_id is not None:
self.run_id = run_id
@property
def time(self):
"""Get the time of this LogEvent.
Time event occurred, RFC3339Nano.
:return: The time of this LogEvent.
:rtype: datetime
""" # noqa: E501
return self._time
@time.setter
def time(self, time):
"""Set the time of this LogEvent.
Time event occurred, RFC3339Nano.
:param time: The time of this LogEvent.
:type: datetime
""" # noqa: E501
self._time = time
@property
def message(self):
"""Get the message of this LogEvent.
A description of the event that occurred.
:return: The message of this LogEvent.
:rtype: str
""" # noqa: E501
return self._message
@message.setter
def message(self, message):
"""Set the message of this LogEvent.
A description of the event that occurred.
:param message: The message of this LogEvent.
:type: str
""" # noqa: E501
self._message = message
@property
def run_id(self):
"""Get the run_id of this LogEvent.
the ID of the task that logged
:return: The run_id of this LogEvent.
:rtype: str
""" # noqa: E501
return self._run_id
@run_id.setter
def run_id(self, run_id):
"""Set the run_id of this LogEvent.
the ID of the task that logged
:param run_id: The run_id of this LogEvent.
:type: str
""" # noqa: E501
self._run_id = run_id
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, LogEvent):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
| [
"42990136+ArmenFirman@users.noreply.github.com"
] | 42990136+ArmenFirman@users.noreply.github.com |
8170718f2b68613f34342a77d012941363b0f6fb | 840c19fdeb97216ad66b3e7fe236cfc17a061606 | /python/python08_집합형1_문자열.py | 4c76c35c9c641f15d0c2509a96c5dd9896bec337 | [] | no_license | choicoding1026/data | 07b431abdf36bcf7aefdf249fd1251acfd1e0334 | 684ca791108bc6ba0c315a70e3fa712c0ab2cca6 | refs/heads/master | 2022-12-24T17:55:25.092085 | 2020-10-08T04:54:39 | 2020-10-08T04:54:39 | 302,201,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | '''
집합형
1) 문자열
a. 문자열 생성 방법
b. 문자열 제공 함수
c. 인덱싱 및 슬라이싱
d. 문자열 특징
'''
# 1. 문자열 생성 방법 4가지
m = "hello"
m2 = 'hello'
m3 = '''hello'''
m4 = """hello"""
print(m, type(m))
print(m2, type(m2))
print(m3, type(m3))
print(m4, type(m4))
# triple 문자 사용 용도 ==> 문자열이 매우 길때 및 특정한 포맷(들여쓰기, 탭)으로 출력할 때
# 1. "" 또는 '' 사용한 경우
s = "asdfasdfasfasfasfasfasfewbdgaserfaserfwqesfafsdfasfdas" \
"fasfdasdfasfasfdasdfasfdasdfasfdasfdasfdasfasfdasfasfasfa" \
"sfasfasfasfasfasfasfasfasfas"
print(s)
# 2. triple 사용한 경우 ==> 들여쓰기,탭등 포맷형식이 유지된다. 따라서 가독성이 매우 높다.
s2 = '''
<html>
<body>
</body>
</html>
'''
print(s2)
| [
"justin6130@gmail.com"
] | justin6130@gmail.com |
2bef8052418e92e9d29c3d72a0a6fa8684c78926 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02607/s387198280.py | 23e0aeefbb0e2b34051edcabc0f6fe62bf503efd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | N = int(input())
A = [int(i) for i in input().split()]
ans = 0
for i,a in enumerate(A):
idx = i+1
if a%2 == 1 and idx%2 == 1:
ans += 1
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7af3612998917d3a3fe539480b61fad8f29ac57e | b852bcf96bd21f8aad61df473e29249315043af5 | /tests/parsers/plist_plugins/ipod.py | aa0d497cb41ed32f0578e24465c098532344aa88 | [
"Apache-2.0"
] | permissive | tjemg/plaso | cad131da318bd6b23835b0f351f464e7edcdbc4a | 58dd7d03463624c628187edea97eb2665069c29f | refs/heads/master | 2020-04-08T21:53:54.863677 | 2016-09-12T14:17:59 | 2016-09-12T14:17:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,945 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the iPod plist plugin."""
import unittest
from plaso.formatters import ipod as _ # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers.plist_plugins import ipod
from tests.parsers.plist_plugins import test_lib
class TestIPodPlugin(test_lib.PlistPluginTestCase):
"""Tests for the iPod plist plugin."""
def testProcess(self):
"""Tests the Process function."""
plist_name = u'com.apple.iPod.plist'
plugin_object = ipod.IPodPlugin()
storage_writer = self._ParsePlistFileWithPlugin(
plugin_object, [plist_name], plist_name)
self.assertEqual(len(storage_writer.events), 4)
event_object = storage_writer.events[1]
timestamp = timelib.Timestamp.CopyFromString(u'2013-10-09 19:27:54')
self.assertEqual(event_object.timestamp, timestamp)
expected_message = (
u'Device ID: 4C6F6F6E65000000 '
u'Type: iPhone [10016] '
u'Connected 1 times '
u'Serial nr: 526F676572 '
u'IMEI [012345678901234]')
expected_message_short = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_message_short)
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.LAST_CONNECTED)
self.assertEqual(event_object.device_class, u'iPhone')
self.assertEqual(event_object.device_id, u'4C6F6F6E65000000')
self.assertEqual(event_object.firmware_version, 256)
self.assertEqual(event_object.imei, u'012345678901234')
self.assertEqual(event_object.use_count, 1)
event_object = storage_writer.events[3]
timestamp = timelib.Timestamp.CopyFromString(u'1995-11-22 18:25:07')
self.assertEqual(event_object.timestamp, timestamp)
self.assertEqual(event_object.device_id, u'0000A11300000000')
if __name__ == '__main__':
unittest.main()
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
1bdf3cc297d6507dd8194bb94ae2200a0fed1fbe | 8f5c1ad76f3f9aa67d6720154b4884c9fab2ecbc | /toontown/election/DistributedToonfestCogAI.py | fb45a06666860c1eaf845c616553eefdc9c5a469 | [] | no_license | RegDogg/ttr-2014-dev | eb0d9da3e91b9504b83804c27e1a00d87a0b7220 | 8a392ea4697cf15bd83accd01dcf26d0f87557eb | refs/heads/master | 2023-07-13T02:40:56.171517 | 2021-07-12T00:31:28 | 2021-07-12T00:31:28 | 372,103,145 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,233 | py | # Embedded file name: toontown.election.DistributedToonfestCogAI
from direct.interval.IntervalGlobal import *
from direct.fsm.FSM import FSM
from otp.ai.MagicWordGlobal import *
from toontown.election.DistributedHotAirBalloonAI import DistributedHotAirBalloonAI
from DistributedElectionCameraManagerAI import DistributedElectionCameraManagerAI
from DistributedSafezoneInvasionAI import DistributedSafezoneInvasionAI
from DistributedInvasionSuitAI import DistributedInvasionSuitAI
from InvasionMasterAI import InvasionMasterAI
from toontown.toonbase import ToontownGlobals
import SafezoneInvasionGlobals
import ElectionGlobals
import random
from otp.distributed.OtpDoGlobals import *
from direct.task import Task
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
from toontown.election import *
class DistributedToonfestCogAI(DistributedObjectAI, FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedToonfestTowerAI')
def __init__(self, air, operation = 'SpeedUp'):
DistributedObjectAI.__init__(self, air)
FSM.__init__(self, 'ToonfestCogFSM')
self.air = air
self.validOperations = ['SpeedUp', 'SlowDown', 'Reverse']
if operation in self.validOperations:
self.operation = operation
else:
print 'DistributedToonfestCogAI: Operation %s is not a valid operation.' % operation
self.operation = 'SpeedUp'
def enterOff(self):
self.requestDelete()
def setPos(self, x, y, z):
self.sendUpdate('setPosThroughAI', [x, y, z])
def setId(self, id):
self.sendUpdate('setIdThroughAI', [id])
def enterDown(self):
pass
def enterUp(self):
pass
def updateTower(self):
if not isinstance(self.air.toonfestTower, DistributedToonfestTowerAI) or not self.air.toonfestTower:
print 'DistributedToonfestCogAI: ERROR! Could not find the ToonFest Tower.'
else:
base = random.randrange(0, 3)
self.air.toonfestTower.updateTower(self.operation, base)
print 'DistributedToonfestCogAI: Told Tower to ' + self.operation + ' base number ' + str(base + 1) | [
"regdogg.acr@gmail.com"
] | regdogg.acr@gmail.com |
a5d31969ad26e8a685b7b7d70d52b06ac9f25a93 | fa0bd730981a4a7333e7858c03e2a16c75e9cf5c | /Chapter 1/mnist_V7.py | d9038c4c2272d53acc72f1b12840acee6575d0cc | [
"MIT"
] | permissive | PacktPublishing/Deep-Learning-with-TensorFlow-2-and-Keras | 4cb5f7249dcd1efe6ea5a5263fb862240ce303bb | e23d2b4a4292386b70977473805acb2f93ef16ca | refs/heads/master | 2023-02-13T04:04:57.531730 | 2023-02-07T19:23:47 | 2023-02-07T19:23:47 | 228,759,428 | 311 | 214 | MIT | 2021-06-01T14:06:06 | 2019-12-18T04:42:07 | Jupyter Notebook | UTF-8 | Python | false | false | 1,946 | py | import tensorflow as tf
import numpy as np
from tensorflow import keras
# network and training
EPOCHS = 20
BATCH_SIZE = 256
VERBOSE = 1
NB_CLASSES = 10 # number of outputs = number of digits
N_HIDDEN = 128
VALIDATION_SPLIT=0.2 # how much TRAIN is reserved for VALIDATION
DROPOUT = 0.3
# loading MNIST dataset
# verify
# the split between train and test is 60,000, and 10,000 respectly
# one-hot is automatically applied
mnist = keras.datasets.mnist
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
#X_train is 60000 rows of 28x28 values --> reshaped in 60000 x 784
RESHAPED = 784
#
X_train = X_train.reshape(60000, RESHAPED)
X_test = X_test.reshape(10000, RESHAPED)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
#normalize in [0,1]
X_train, X_test = X_train / 255.0, X_test / 255.0
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
#one-hot
Y_train = tf.keras.utils.to_categorical(Y_train, NB_CLASSES)
Y_test = tf.keras.utils.to_categorical(Y_test, NB_CLASSES)
#build the model
model = tf.keras.models.Sequential()
model.add(keras.layers.Dense(N_HIDDEN,
input_shape=(RESHAPED,),
name='dense_layer', activation='relu'))
model.add(keras.layers.Dropout(DROPOUT))
model.add(keras.layers.Dense(N_HIDDEN,
name='dense_layer_2', activation='relu'))
model.add(keras.layers.Dropout(DROPOUT))
model.add(keras.layers.Dense(NB_CLASSES,
name='dense_layer_3', activation='softmax'))
# summary of the model
model.summary()
# compiling the model
model.compile(optimizer='Adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
#training the moodel
model.fit(X_train, Y_train,
batch_size=BATCH_SIZE, epochs=EPOCHS,
verbose=VERBOSE, validation_split=VALIDATION_SPLIT)
#evalute the model
test_loss, test_acc = model.evaluate(X_test, Y_test)
print('Test accuracy:', test_acc)
# making prediction
predictions = model.predict(X_test)
| [
"noreply@github.com"
] | PacktPublishing.noreply@github.com |
117898cb3c43f04c6bf25322181fd0aa10d335c2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02757/s809101133.py | 792d3c9136512c8391a1450d9437aaf983b40525 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | n, p = map(int, input().split())
s = input()[::-1]
if p == 2:
ans = 0
for num, i in enumerate(s):
if int(i)%2 == 0:
ans += n-num
elif p == 5:
ans = 0
for num, i in enumerate(s):
if int(i)%5 == 0:
ans += n-num
else:
C = [0]*p
now = 0
for num, i in enumerate(s):
a = int(i)
now = (now+pow(10, num, p)*a)%p
C[now] += 1
ans = C[0]
for c in C:
ans += c*(c-1)//2
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
df4c5f3748bd5945cdcc4ac29d61933da4417e69 | eb38517d24bb32cd8a33206d4588c3e80f51132d | /proyecto_nn.py | 112dc9751e2184c3eda81cf9370647918e561ce6 | [] | no_license | Fernando23296/l_proy | 2c6e209892112ceafa00c3584883880c856b6983 | b7fdf99b9bd833ca1c957d106b2429cbd378abd3 | refs/heads/master | 2020-04-01T18:01:41.333302 | 2018-12-04T23:45:53 | 2018-12-04T23:45:53 | 153,466,681 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,892 | py | from matplotlib import pyplot as plt
import numpy as np
'''
____
1
283,70
1
308,63
1
253,60
1
281,61
1
303,54
1
279,54
1
343,55
1
335,49
1
290,34
1
327,34
1
299,32
1
287,22
1
169,15
1
0,0
1
287,5
____
2
0,0
2
263,66
2
325,58
2
266,57
2
290,45
2
0,0
2
275,34
2
0,0
2
0,0
2
338,19
2
292,14
2
335,10
2
276,9
2
357,9
2
333,5
2
290,6
2
0,0
____
3
278,74
3
237,67
3
291,63
3
242,43
3
0,0
3
270,34
3
268,29
3
247,22
3
284,9
3
314,7
3
259,7
3
271,6
____
4
0,0
4
229,63
4
0,0
4
0,0
4
226,53
4
0,0
4
280,42
4
226,37
4
282,4
4
237,0
____
5
0,0
5
239,72
5
294,64
5
268,61
5
247,57
5
290,56
5
0,0
5
285,36
5
267,36
5
276,30
5
269,23
5
231,25
5
0,0
5
266,13
5
286,12
5
225,12
5
240,10
5
0,0
5
229,1
____
6
254,70
6
303,67
6
327,61
6
352,51
6
249,52
6
0,0
6
245,20
6
0,0
6
297,16
6
278,11
6
294,9
6
289,2
____
7
0,0
7
241,74
7
0,0
7
259,48
7
260,32
7
244,24
7
258,20
7
255,12
7
254,2
____
8
0,0
8
0,0
8
209,71
8
214,64
8
263,60
8
0,0
8
187,55
8
243,49
8
198,42
8
209,41
8
186,39
8
191,39
8
245,34
8
189,30
8
0,0
8
187,24
8
177,20
8
264,10
8
0,0
8
174,8
8
0,0
8
247,1
8
229,1
____
9
0,0
9
302,72
9
0,0
9
278,69
9
356,68
9
205,70
9
341,50
9
366,49
9
342,44
9
216,43
9
363,43
9
251,41
9
289,35
9
353,36
9
314,33
9
236,34
9
303,30
9
280,30
9
255,30
9
0,0
9
352,25
9
211,22
9
351,19
9
388,16
9
223,14
9
301,11
9
0,0
9
210,12
9
288,7
9
0,0
9
0,0
9
220,5
9
0,0
9
0,0
____
10
209,71
10
311,69
10
258,71
10
215,56
10
0,0
10
0,0
10
224,53
10
383,54
10
339,51
10
325,50
10
201,49
10
363,46
10
202,44
10
293,42
10
277,43
10
243,36
10
313,30
10
374,27
10
353,20
10
365,19
10
314,11
10
0,0
10
310,4
10
304,4
10
221,4
____
11
200,74
11
349,68
11
378,69
11
254,59
11
248,57
11
236,56
11
241,53
11
205,52
11
197,49
11
232,46
11
314,46
11
322,42
11
265,31
11
320,25
11
246,15
11
268,13
11
0,0
11
212,3
11
263,1
____
12
386,0
12
201,0
ex2.jpg
data 1
1[282,0]
2[383,54]
3[356,144]
4[263,212]
5[278,315]
6[276,410]
7[290,718]
8[282,915]
data 2
1[282,0]
2[383,54]
3[278,145]
4[241,302]
5[297,320]
6[226,493]
7[268,561]
8[290,653]
9[282,9915]
data 3
1[[282,0],
2[311,69],
3[243,201],
4[254,230],
5[245,324],
6[247,554],
7[325,666],
8[290,718],
9[282,915]]
data 4
1[[282,0],
2[302,148],
3[243,201],
4[259,276],
5[226,493],
6[268,561],
7[325,666],
8[279,738],
9[282,915]]
data 5
1[[282,0],
2[258,248],
3[297,320],
4[285,416],
5[226,509],
6[325,666],
7[343,739],
8[277,808],
9[282,915]]
data 6
1[[282,0],
2[278,145],
3[241,302],
4[245,324],
5[294,444],
6[226,493],
7[284,541],
8[303,738],
9[282,915]]
data 7
1[[282,0],
2[224,53],
3[205,146],
4[327,365],
5[226,493],
6[268,561],
7[303,738],
8[338,807],
9[282,915]]
especial
[[282 215 278 209 259 297 247 280 291 275 283 282]
[ 0 56 145 193 276 320 437 498 595 642 754 915]]
[[209,71],
[311,69],
[258,71],
[215,56],
[224,53],
[383,54],
[339,51],
[325,50],
[201,49],
[363,46],
[202,44],
[293,42],
[277,43],
[243,36],
[313,30],
[374,27],
[353,20],
[365,19],
[314,11],
[310,4],
[304,4],
[221,4]]
SORTED
[[201, 49], [202, 44], [209, 71], [215, 56], [221, 4], [224, 53], [243, 36], [258, 71], [277, 43], [293, 42], [304, 4], [310, 4], [311, 69], [313, 30], [314, 11], [325, 50], [339, 51], [353, 20], [363, 46], [365, 19], [374, 27], [383, 54]]
____
'''
data = [[304, 4, 1],
[310, 4, 1],
[311, 69, 1],
[313, 30, 1],
[314, 11, 1],
[325, 50, 1],
[339, 51, 1],
[353, 20, 1],
[363, 46, 1],
[365, 19, 1],
[374, 27, 1],
[383, 54, 1],
[282, 20, 0],
[280, 77, 0],
[278, 33, 0],
[270, 88, 0],
[268, 53, 0],
[290, 45, 0],
[320, 34,0],
[323, 44,0],
[330, 45,0]]
valor_random = [298,32]
def sigmoid(x):
return 1/(1+np.exp(-x))
def sigmoid_p(x):
return sigmoid(x) * (1-sigmoid(x))
def train():
w1 = np.random.randn()
w2 = np.random.randn()
b = np.random.randn()
iterations = 10000
learning_rate = 0.1
costs = []
for i in range(iterations):
ri = np.random.randint(len(data))
point = data[ri]
z = point[0] * w1 + point[1] * w2 + b
pred = sigmoid(z)
target = point[2]
# cost for current random point
cost = np.square(pred - target)
dcost_dpred = 2 * (pred - target)
dpred_dz = sigmoid_p(z)
dz_dw1 = point[0]
dz_dw2 = point[1]
dz_db = 1
dcost_dz = dcost_dpred * dpred_dz
dcost_dw1 = dcost_dz * dz_dw1
dcost_dw2 = dcost_dz * dz_dw2
dcost_db = dcost_dz * dz_db
w1 = w1 - learning_rate * dcost_dw1
w2 = w2 - learning_rate * dcost_dw2
b = b - learning_rate * dcost_db
return costs, w1, w2, b
costs, w1, w2, b = train()
print("valor w1:", w1)
print("valor w2:", w2)
print("valor b:", b)
cero=valor_random[0]
uno=valor_random[1]
z = w1 * cero + w2 * uno + b
pred = sigmoid(z)
print(pred)
| [
"fernando23296@gmail.com"
] | fernando23296@gmail.com |
c6efa3651b14b09fdfb53dbc5d496a53f514a83b | 5dc77586e3e0f9de1f032fd2ca68494d8e58928f | /tests/expectations/core/test_expect_column_value_z_scores_to_be_less_than.py | 286de2251857f9da62734fd75119a0cf4b4e6d2d | [
"Apache-2.0"
] | permissive | great-expectations/great_expectations | dd7c22e6277d6b08bee3ff38a015e6e8cd434df6 | b0290e2fd2aa05aec6d7d8871b91cb4478e9501d | refs/heads/develop | 2023-09-04T09:30:26.395518 | 2023-09-02T00:00:13 | 2023-09-02T00:00:13 | 103,071,520 | 8,931 | 1,535 | Apache-2.0 | 2023-09-14T19:57:16 | 2017-09-11T00:18:46 | Python | UTF-8 | Python | false | false | 3,442 | py | from typing import Optional, cast
import pandas as pd
import pytest
from great_expectations import DataContext
from great_expectations.core.expectation_validation_result import (
ExpectationValidationResult,
)
from great_expectations.self_check.util import get_test_validator_with_data
from great_expectations.util import build_in_memory_runtime_context
@pytest.fixture
def z_score_validation_result():
return ExpectationValidationResult(
success=True,
expectation_config={
"expectation_type": "expect_column_value_z_scores_to_be_less_than",
"kwargs": {
"column": "a",
"mostly": 0.9,
"threshold": 4,
"double_sided": True,
},
"meta": {},
},
result={
"element_count": 6,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 0.0,
"unexpected_percent_nonmissing": 0.0,
},
exception_info={
"raised_exception": False,
"exception_traceback": None,
"exception_message": None,
},
meta={},
)
@pytest.mark.unit
def test_pandas_expect_column_value_z_scores_to_be_less_than_impl(
z_score_validation_result,
):
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10]})
context: Optional[DataContext] = cast(
DataContext, build_in_memory_runtime_context(include_spark=False)
)
validator = get_test_validator_with_data(
execution_engine="pandas",
data=df,
context=context,
)
result = validator.expect_column_value_z_scores_to_be_less_than(
column="a", mostly=0.9, threshold=4, double_sided=True
)
assert result == z_score_validation_result
@pytest.mark.postgresql
def test_sa_expect_column_value_z_scores_to_be_less_than_impl(
z_score_validation_result, test_backends
):
if "postgresql" not in test_backends:
pytest.skip("test_database_store_backend_get_url_for_key requires postgresql")
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10]})
context: Optional[DataContext] = cast(
DataContext, build_in_memory_runtime_context(include_spark=False)
)
validator = get_test_validator_with_data(
execution_engine="postgresql",
table_name="expect_column_value_z_scores_to_be_less_than_impl_1",
data=df,
context=context,
)
result = validator.expect_column_value_z_scores_to_be_less_than(
column="a", mostly=0.9, threshold=4, double_sided=True
)
assert result == z_score_validation_result
# noinspection PyUnusedLocal
@pytest.mark.spark
def test_spark_expect_column_value_z_scores_to_be_less_than_impl(
spark_session, basic_spark_df_execution_engine, z_score_validation_result
):
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10]})
context: Optional[DataContext] = cast(
DataContext, build_in_memory_runtime_context(include_pandas=False)
)
validator = get_test_validator_with_data(
execution_engine="spark",
data=df,
context=context,
)
result = validator.expect_column_value_z_scores_to_be_less_than(
column="a", mostly=0.9, threshold=4, double_sided=True
)
assert result == z_score_validation_result
| [
"noreply@github.com"
] | great-expectations.noreply@github.com |
ec00e7616ff2b1478df290425ccd36f44d326853 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_24/models/policy_member_response.py | 004d52f22194a5598a5e1b219c11dcc074aa1f85 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,933 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.24
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_24 import models
class PolicyMemberResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[PolicyMember]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.PolicyMember]
):
"""
Keyword args:
items (list[PolicyMember]): Displays a list of all items after filtering. The values are displayed for each name, if meaningful.
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyMemberResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyMemberResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyMemberResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyMemberResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PolicyMemberResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PolicyMemberResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
77d3496406cf164d4e8e89fe0a5dca548d70ffbf | bae04a67b13b5848ba2bd160792aa563738e9ec9 | /botauth.py | 613e76dbdbd951694cfce1b4de02f761eb2ef361 | [] | no_license | igrekus/stan_bot | 0fb28bf4efed219b117b2d640590565691c24b45 | 43eda93c0799e6f5b2b3676e8cb1a7db32eeae4f | refs/heads/master | 2023-03-18T12:53:45.736623 | 2021-03-09T12:55:25 | 2021-03-09T12:55:25 | 254,895,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,778 | py | import dataset
class BotAuth:
def __init__(self, path='quotes.db'):
# TODO store active permits in memory
self.db = dataset.connect(f'sqlite:///{path}')
self.authorized: dataset.table.Table = self.db['tg_user']
self.permits: dataset.table.Table = self.db['tg_permits']
self.user_permit_map: dataset.table.Table = self.db['tg_user_permits']
self.base_permits = list(self.permits.find(title=['post links', 'post media']))
def bot_user_exists(self, tg_user):
return bool(list(self.authorized.find(tg_id=tg_user.id)))
def register_tg_user(self, tg_user):
if self.bot_user_exists(tg_user):
return False
# TODO make bot user class
new_bot_user = self._upsert_bot_user(tg_user)
self._add_base_permits(new_bot_user)
return True
def has_base_permits(self, bot_user):
if not list(self.authorized.find(tg_id=bot_user.id)):
return False
return bool(list(
self.user_permit_map.find(tg_user=bot_user.id, tg_permit=[perm['id'] for perm in self.base_permits])
))
def voice(self, tg_user):
if not self.bot_user_exists(tg_user):
return False
return self._voice_bot_user({
'tg_id': tg_user.id,
'username': tg_user.username,
'first_name': tg_user.first_name,
'last_name': tg_user.last_name,
})
def devoice(self, tg_user):
if not self.bot_user_exists(tg_user):
return False
return self._devoice_bot_user({
'tg_id': tg_user.id,
'username': tg_user.username,
'first_name': tg_user.first_name,
'last_name': tg_user.last_name,
})
def _voice_bot_user(self, bot_user):
# TODO error handling
self._add_base_permits(bot_user)
return True
def _devoice_bot_user(self, bot_user):
# TODO error handling
self._revoke_base_permits(bot_user)
return True
def _upsert_bot_user(self, user):
new_bot_user = {
'tg_id': user.id,
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
}
self.authorized.upsert(new_bot_user, ['tg_id'])
return new_bot_user
def _add_base_permits(self, new_bot_user):
for perm in self.base_permits:
self.user_permit_map.upsert({
'tg_user': new_bot_user['tg_id'],
'tg_permit': perm['id']
}, ['tg_user', 'tg_permit'])
def _revoke_base_permits(self, bot_user):
for perm in self.base_permits:
self.user_permit_map.delete(tg_user=bot_user['tg_id'], tg_permit=perm['id'])
| [
"upload234@mail.ru"
] | upload234@mail.ru |
4b5037c080276fe1ebc9d520708a9920f70310e5 | 2061caff7999645ff8c590acf77ad5bf2b6da305 | /source/toolkit.py | 3ef5171ebbb3ef34f7536f0930c917808250278e | [
"CC0-1.0"
] | permissive | wezu/pyweek21 | 63e36639fe52f3c6fad2616dbd5c27eb7e4f4bbd | aff8f2b6f6250e45763e77c12595c3dca177e864 | refs/heads/master | 2021-01-09T21:58:33.818859 | 2016-03-19T18:25:08 | 2016-03-19T18:25:08 | 52,712,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,683 | py | from panda3d.core import *
from panda3d.bullet import *
def loadObject(model, H, pos, world, worldNP, root=render, collision_solid=None):
new_model=loader.loadModel(model)
new_model.clearModelNodes()
new_model.reparentTo(root)
new_model.setPos(render, pos)
new_model.setH(render, H)
new_model.setShader(Shader.load(Shader.SLGLSL, path+'shaders/default_v.glsl', path+'shaders/default_f.glsl'))
if collision_solid:
collision_mesh=loader.loadModel(collision_solid)
else:
collision_mesh=loader.loadModel(model)
collision_mesh.setPos(render, pos)
collision_mesh.setH(render, H)
collision_mesh.flattenStrong()
bullet_mesh = BulletTriangleMesh()
geomNodes = collision_mesh.findAllMatches('**/+GeomNode')
geomNode = geomNodes.getPath(0).node()
geom = geomNode.getGeom(0)
bullet_mesh.addGeom(geom)
shape = BulletTriangleMeshShape(bullet_mesh, dynamic=False, bvh=True )
collision = worldNP.attachNewNode(BulletRigidBodyNode('object'))
collision.node().addShape(shape)
collision.setCollideMask(BitMask32.allOn())
world.attachRigidBody(collision.node())
return (new_model, collision)
def tex(file_name, srgb=False):
texture=loader.loadTexture(file_name)
tex_format=texture.getFormat()
if srgb:
if tex_format==Texture.F_rgb:
tex_format=Texture.F_srgb
elif tex_format==Texture.F_rgba:
tex_format=Texture.F_srgb_alpha
texture.setFormat(tex_format)
return texture
def pos2d(x,y):
return Point3(x,0,-y)
def rec2d(width, height):
return (-width, 0, 0, height)
def resetPivot2d(frame):
size=frame['frameSize']
frame.setPos(-size[0], 0, -size[3])
frame.flattenLight()
frame.setTransparency(TransparencyAttrib.MAlpha)
def fixSrgbTextures(model):
for tex_stage in model.findAllTextureStages():
tex=model.findTexture(tex_stage)
if tex:
file_name=tex.getFilename()
tex_format=tex.getFormat()
#print tex_stage, file_name, tex_format
newTex=loader.loadTexture(file_name)
if tex_stage.getMode()==TextureStage.M_normal:
tex_stage.setMode(TextureStage.M_normal_gloss)
if tex_stage.getMode()!=TextureStage.M_normal_gloss:
if tex_format==Texture.F_rgb:
tex_format=Texture.F_srgb
elif tex_format==Texture.F_rgba:
tex_format=Texture.F_srgb_alpha
newTex.setFormat(tex_format)
model.setTexture(tex_stage, newTex, 1)
| [
"grzechotnik1984@gmail.com"
] | grzechotnik1984@gmail.com |
5e18cf19781ab567ab705e5609abb37f764adcdd | bf473d57dff028f3167811a1cb76d7ce8e95b42d | /ocrmypdf/qpdf.py | dcccc2ecf04069eb579e416cb1498c56452b9c55 | [
"MIT"
] | permissive | thecocce/OCRmyPDF | ef6cb0e5e0c55433e2b7db244d5ecc6b0e6b183c | 514efa36fcc2f79ae173f429cb208a63ae968f5b | refs/heads/master | 2020-07-12T01:14:08.685260 | 2016-07-24T18:21:46 | 2016-07-24T18:21:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,819 | py | #!/usr/bin/env python3
# © 2015 James R. Barlow: github.com/jbarlow83
from subprocess import CalledProcessError, check_output, STDOUT, check_call
import sys
import os
from . import ExitCode, get_program
def check(input_file, log):
args_qpdf = [
get_program('qpdf'),
'--check',
input_file
]
try:
check_output(args_qpdf, stderr=STDOUT, universal_newlines=True)
except CalledProcessError as e:
if e.returncode == 2:
log.error("{0}: not a valid PDF, and could not repair it.".format(
input_file))
log.error("Details:")
log.error(e.output)
elif e.returncode == 3:
log.info("qpdf --check returned warnings:")
log.info(e.output)
else:
log.warning(e.output)
return False
return True
def repair(input_file, output_file, log):
args_qpdf = [
get_program('qpdf'), input_file, output_file
]
try:
check_output(args_qpdf, stderr=STDOUT, universal_newlines=True)
except CalledProcessError as e:
if e.returncode == 3 and e.output.find("operation succeeded"):
log.debug('qpdf found and fixed errors: ' + e.output)
log.debug(e.output)
return
if e.returncode == 2 and e.output.find("invalid password"):
log.error("{0}: this PDF is password-protected - password must "
"be removed for OCR".format(input_file))
sys.exit(ExitCode.input_file)
elif e.returncode == 2:
log.error("{0}: not a valid PDF, and could not repair it.".format(
input_file))
log.error("Details: " + e.output)
sys.exit(ExitCode.input_file)
else:
log.error("{0}: unknown error".format(
input_file))
log.error(e.output)
sys.exit(ExitCode.unknown)
def get_npages(input_file, log):
try:
pages = check_output(
[get_program('qpdf'), '--show-npages', input_file],
universal_newlines=True, close_fds=True)
except CalledProcessError as e:
if e.returncode == 2 and e.output.find('No such file'):
log.error(e.output)
sys.exit(ExitCode.input_file)
return int(pages)
def split_pages(input_file, work_folder, npages):
"""Split multipage PDF into individual pages.
Incredibly enough, this multiple process approach is about 70 times
faster than using Ghostscript.
"""
for n in range(int(npages)):
args_qpdf = [
get_program('qpdf'), input_file,
'--pages', input_file, '{0}'.format(n + 1), '--',
os.path.join(work_folder, '{0:06d}.page.pdf'.format(n + 1))
]
check_call(args_qpdf)
| [
"jim@purplerock.ca"
] | jim@purplerock.ca |
e0cf215b999026a8636472b15a796e1222e3847e | 30cffb7452220c2ac2961dd2e0f42e3b359a59c0 | /simscale_sdk/models/zero_gradient_nbc.py | f75838ccaefa9dba193d8b59a8daffd25ce686e9 | [
"MIT"
] | permissive | vpurcarea/simscale-python-sdk | 0bf892d8824f8d4599caa0f345d5ba28e038f5eb | 6f2d12b2d21142bd854042c0fb402c2c797629e4 | refs/heads/master | 2023-03-14T04:31:06.226337 | 2021-03-03T16:20:01 | 2021-03-03T16:20:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,321 | py | # coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class ZeroGradientNBC(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str'
}
attribute_map = {
'type': 'type'
}
def __init__(self, type='ZERO_GRADIENT', local_vars_configuration=None): # noqa: E501
"""ZeroGradientNBC - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self.discriminator = None
self.type = type
@property
def type(self):
"""Gets the type of this ZeroGradientNBC. # noqa: E501
:return: The type of this ZeroGradientNBC. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ZeroGradientNBC.
:param type: The type of this ZeroGradientNBC. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ZeroGradientNBC):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ZeroGradientNBC):
return True
return self.to_dict() != other.to_dict()
| [
"simscale"
] | simscale |
bafdc62773bc9fd08ba308ca2db163705297ec18 | c7a4e634ea260da4c6c94ca716f2910509579e91 | /functional_tests/pages/projects.py | 5a76026fc313007cba773855df88559c19282fb7 | [
"MIT"
] | permissive | XeryusTC/projman | 858a72496ea6eaa23e8e0b511f8c17e037fa37b6 | 3db118d51a9fc362153593f5a862187bdaf0a73c | refs/heads/master | 2016-08-12T09:36:48.371178 | 2016-05-07T21:12:02 | 2016-05-07T21:12:02 | 45,639,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,986 | py | # -*- coding: utf-8 -*-
from page_objects import PageObject, PageElement, MultiPageElement
from selenium.webdriver.support.ui import Select
class BaseProjectPage(PageObject):
body = PageElement(tag_name='body')
content = PageElement(id_='content')
logout = PageElement(name='logout')
overlay = PageElement(id_='mui-overlay')
sidebar = PageElement(id_="sidebar")
sidebar_hide = PageElement(class_name='js-hide-sidebar')
sidebar_show = PageElement(class_name='js-show-sidebar')
inlist_link = PageElement(name='inlist_link', context=True)
action_link = PageElement(link_text='Actions', context=True)
create_project_link = PageElement(link_text='Create project', context=True)
settings_link = PageElement(name='settings')
menu = PageElement(name='menu')
_project_links = MultiPageElement(css="a.project", context=True)
def project_link(self, text):
for link in self._project_links(self.sidebar):
if text == link.text:
return link
class InlistPage(PageObject):
add_box = PageElement(name='text')
add_button = PageElement(xpath="//form//input[@id='submit-id-submit']")
thelist = MultiPageElement(css='#list .full-height')
listrows = MultiPageElement(css='#list .mui-row')
error_lists = MultiPageElement(css='.errorlist')
delete_item = PageElement(class_name='action-delete', context=True)
convert_action = PageElement(class_name='action-convert', context=True)
convert_project = PageElement(class_name='action-project', context=True)
class InlistDeletePage(PageObject):
content = PageElement(id_='content')
confirm = PageElement(xpath="//input[@type='submit']")
class ActionDeletePage(PageObject):
content = PageElement(id_='content')
confirm = PageElement(xpath="//input[@type='submit']")
class ConvertToActionPage(PageObject):
text_box = PageElement(name='text')
convert_button = PageElement(xpath="//input[@type='submit']")
class CreateProjectPage(PageObject):
name_box = PageElement(name='name')
description_box = PageElement(name='description')
create_button = PageElement(name='create')
error_lists = MultiPageElement(css='.errorlist')
class ProjectPage(PageObject):
info = PageElement(id_='info')
title = PageElement(xpath="//div[@id='info']//h1/parent::*")
add_box = PageElement(name='text')
add_button = PageElement(xpath="//form//input[@name='submit']")
edit = PageElement(css='.action-edit')
delete = PageElement(class_name='delete-project')
thelist = MultiPageElement(css='#list .mui-row')
checked_list = MultiPageElement(css='#list .mui-row.checked')
error_lists = MultiPageElement(css='.errorlist')
_item = PageElement(css='.action-item', context=True)
_list_text = PageElement(css='.action-item .action-text', context=True)
_delete_item = PageElement(class_name='action-delete', context=True)
_move_item = PageElement(class_name='action-edit-action', context=True)
_item_deadline = PageElement(css='.action-deadline', context=True)
apply_sort = PageElement(name='sort')
_sort_method = PageElement(name='sort_method')
_sort_order = PageElement(name='sort_order')
@property
def sort_method(self):
return Select(self._sort_method)
@property
def sort_order(self):
return Select(self._sort_order)
def list_text(self, context):
return [self._list_text(row).text for row in context]
def get_list_rows(self, context):
res = {}
for i in range(len(context)):
res[i] = {
'item': self._item(context[i]),
'text': self._list_text(context[i]),
'delete': self._delete_item(context[i]),
'edit': self._move_item(context[i]),
'deadline': self._item_deadline(context[i]),
}
# The following is for compatibility with older FTs
res[i]['move'] = res[i]['edit']
return res
ActionlistPage = ProjectPage
class EditPage(PageObject):
name = PageElement(name='name')
description = PageElement(name='description')
confirm = PageElement(name='update')
class ProjectDeletePage(PageObject):
content = PageElement(id_='content')
confirm = PageElement(xpath="//input[@value='Confirm']")
class EditActionPage(PageObject):
content = PageElement(id_='content')
confirm = PageElement(name='move')
errors = MultiPageElement(css='.errorlist')
form = PageElement(tag_name='form')
deadline_date = PageElement(name='deadline_0')
deadline_time = PageElement(name='deadline_1')
text_box = PageElement(name='text')
_select = PageElement(tag_name='select')
@property
def select(self):
return Select(self._select)
# Compatibility with FTs that test for the move button
MoveActionPage = EditActionPage
| [
"armadillo@onenetbeyond.org"
] | armadillo@onenetbeyond.org |
2a6bfb83510ec56d87a9d444cc346d56a3fdbd9f | 0abae2b0586605f6b99cb498ac8161297a7d72c0 | /synthtorch/models/nconvnet.py | dc51157d7875fd02f732b81b30df5053159876dd | [
"Apache-2.0"
] | permissive | jcreinhold/synthtorch | fc227d5597bb77e2018cb6a6cfee9bc086ff5001 | bb6eb20641b2cae3cbb96421b12e03865b5c5095 | refs/heads/master | 2021-09-25T15:31:39.398836 | 2021-09-24T19:51:46 | 2021-09-24T19:51:46 | 155,944,524 | 23 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,273 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
synthtorch.models.nconvnet
define the class for a N layer CNN with
no max pool, increase in channels, or any of that
fancy stuff. This is generally used for testing
purposes
Author: Jacob Reinhold (jacob.reinhold@jhu.edu)
Created on: Nov 2, 2018
"""
__all__ = ['SimpleConvNet']
from typing import Tuple
import logging
import torch
from torch import nn
logger = logging.getLogger(__name__)
class SimpleConvNet(torch.nn.Module):
def __init__(self, n_layers: int, n_input: int = 1, n_output: int = 1, kernel_size: Tuple[int] = (3, 3, 3),
dropout_prob: float = 0, dim: int = 3, **kwargs):
super(SimpleConvNet, self).__init__()
self.n_layers = n_layers
self.n_input = n_input
self.n_output = n_output
self.kernel_sz = kernel_size
self.dropout_prob = dropout_prob
self.dim = dim
self.criterion = nn.MSELoss()
if isinstance(kernel_size[0], int):
self.kernel_sz = [kernel_size for _ in range(n_layers)]
else:
self.kernel_sz = kernel_size
pad = nn.ReplicationPad3d if dim == 3 else \
nn.ReplicationPad2d if dim == 2 else \
nn.ReplicationPad1d
self.layers = nn.ModuleList([nn.Sequential(
pad([ks // 2 for p in zip(ksz, ksz) for ks in p]),
nn.Conv3d(n_input, n_output, ksz) if dim == 3 else \
nn.Conv2d(n_input, n_output, ksz) if dim == 2 else \
nn.Conv1d(n_input, n_output, ksz),
nn.ReLU(),
nn.InstanceNorm3d(n_output, affine=True) if dim == 3 else \
nn.InstanceNorm2d(n_output, affine=True) if dim == 2 else \
nn.InstanceNorm1d(n_output, affine=True),
nn.Dropout3d(dropout_prob) if dim == 3 else \
nn.Dropout2d(dropout_prob) if dim == 2 else \
nn.Dropout(dropout_prob)) for ksz in self.kernel_sz])
def forward(self, x: torch.Tensor) -> torch.Tensor:
for l in self.layers:
x = l(x)
return x
def predict(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
return self.forward(x)
def freeze(self):
raise NotImplementedError
| [
"jacob.reinhold@jhu.edu"
] | jacob.reinhold@jhu.edu |
ffedf66584fb5b7a988b85720f9c11517f525e05 | a5e71a333a86476b9cb1bdf6989bb5f47dd5e409 | /ScrapePlugins/M/FoolSlide/FoolSlideDownloadBase.py | 395abb1796f52b7fd3c704e900c0f38787d304a6 | [] | no_license | GDXN/MangaCMS | 0e797299f12c48986fda5f2e7de448c2934a62bd | 56be0e2e9a439151ae5302b3e6ceddc7868d8942 | refs/heads/master | 2021-01-18T11:40:51.993195 | 2017-07-22T12:55:32 | 2017-07-22T12:55:32 | 21,105,690 | 6 | 1 | null | 2017-07-22T12:55:33 | 2014-06-22T21:13:19 | Python | UTF-8 | Python | false | false | 5,584 | py |
import os
import os.path
import nameTools as nt
import urllib.parse
import zipfile
import runStatus
import traceback
import bs4
import re
import json
import ScrapePlugins.RetreivalBase
import processDownload
import abc
class FoolContentLoader(ScrapePlugins.RetreivalBase.RetreivalBase):
@abc.abstractmethod
def groupName(self):
return None
@abc.abstractmethod
def contentSelector(self):
return None
retreivalThreads = 1
def getImage(self, imageUrl, referrer):
content, handle = self.wg.getpage(imageUrl, returnMultiple=True, addlHeaders={'Referer': referrer})
if not content or not handle:
raise ValueError("Failed to retreive image from page '%s'!" % referrer)
fileN = urllib.parse.unquote(urllib.parse.urlparse(handle.geturl())[2].split("/")[-1])
fileN = bs4.UnicodeDammit(fileN).unicode_markup
self.log.info("retreived image '%s' with a size of %0.3f K", fileN, len(content)/1000.0)
return fileN, content
def getImageUrls(self, baseUrl):
pageCtnt = self.wg.getpage(baseUrl)
# print("GetImageUrls")
# print("This series contains mature contents and is meant to be viewed by an adult audience." in pageCtnt)
if "This series contains mature contents and is meant to be viewed by an adult audience." in pageCtnt:
self.log.info("Adult check page. Confirming...")
pageCtnt = self.wg.getpage(baseUrl, postData={"adult": "true"})
if "This series contains mature contents and is meant to be viewed by an adult audience." in pageCtnt:
raise ValueError("Wat?")
soup = bs4.BeautifulSoup(pageCtnt, "lxml")
container = soup.find(self.contentSelector[0], id=self.contentSelector[1])
if not container:
raise ValueError("Unable to find javascript container div '%s'" % baseUrl)
# If there is a ad div in the content container, it'll mess up the javascript match, so
# find it, and remove it from the tree.
container.find('div', id='bottombar').decompose()
if container.find('div', class_='ads'):
container.find('div', class_='ads').decompose()
scriptText = container.script.get_text()
if not scriptText:
raise ValueError("No contents in script tag? '%s'" % baseUrl)
jsonRe = re.compile(r'var [a-zA-Z]+ ?= ?(\[.*?\]);', re.DOTALL)
jsons = jsonRe.findall(scriptText)
jsons = [tmp for tmp in jsons if len(tmp)>2]
if not jsons:
# print("Script = ", container.script)
raise ValueError("No JSON variable in script! '%s'" % baseUrl)
valid = False
for item in jsons:
loaded = json.loads(item)
bad = False
for image in loaded:
urlfname = os.path.split(urllib.parse.urlsplit(image['url']).path)[-1]
if image['filename'] != urlfname:
bad = True
if not bad:
arr = loaded
break
imageUrls = []
for item in arr:
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(item['url'])
path = urllib.parse.quote(path)
itemUrl = urllib.parse.urlunsplit((scheme, netloc, path, query, fragment))
imageUrls.append((item['filename'], itemUrl, baseUrl))
if not imageUrls:
raise ValueError("Unable to find contained images on page '%s'" % baseUrl)
return imageUrls
def getLink(self, link):
sourceUrl = link["sourceUrl"]
seriesName = link["seriesName"]
chapterVol = link["originName"]
try:
self.log.info( "Should retreive url - %s", sourceUrl)
self.updateDbEntry(sourceUrl, dlState=1)
imageUrls = self.getImageUrls(sourceUrl)
if not imageUrls:
self.log.critical("Failure on retreiving content at %s", sourceUrl)
self.log.critical("Page not found - 404")
self.updateDbEntry(sourceUrl, dlState=-1)
return
self.log.info("Downloading = '%s', '%s' ('%s images)", seriesName, chapterVol, len(imageUrls))
dlPath, newDir = self.locateOrCreateDirectoryForSeries(seriesName)
if link["flags"] == None:
link["flags"] = ""
if newDir:
self.updateDbEntry(sourceUrl, flags=" ".join([link["flags"], "haddir"]))
chapterName = nt.makeFilenameSafe(chapterVol)
fqFName = os.path.join(dlPath, chapterName+"["+self.groupName+"].zip")
loop = 1
while os.path.exists(fqFName):
fqFName, ext = os.path.splitext(fqFName)
fqFName = "%s (%d)%s" % (fqFName, loop, ext)
loop += 1
self.log.info("Saving to archive = %s", fqFName)
images = []
for imageName, imgUrl, referrerUrl in imageUrls:
dummy_imageName, imageContent = self.getImage(imgUrl, referrerUrl)
images.append([imageName, imageContent])
if not runStatus.run:
self.log.info( "Breaking due to exit flag being set")
self.updateDbEntry(sourceUrl, dlState=0)
return
self.log.info("Creating archive with %s images", len(images))
if not images:
self.updateDbEntry(sourceUrl, dlState=-1, seriesName=seriesName, originName=chapterVol, tags="error-404")
return
#Write all downloaded files to the archive.
arch = zipfile.ZipFile(fqFName, "w")
for imageName, imageContent in images:
arch.writestr(imageName, imageContent)
arch.close()
filePath, fileName = os.path.split(fqFName)
self.updateDbEntry(sourceUrl, downloadPath=filePath, fileName=fileName)
dedupState = processDownload.processDownload(seriesName, fqFName, deleteDups=True, rowId=link['dbId'])
self.log.info( "Done")
self.updateDbEntry(sourceUrl, dlState=2, downloadPath=filePath, fileName=fileName, seriesName=seriesName, originName=chapterVol, tags=dedupState)
return
except Exception:
self.log.critical("Failure on retreiving content at %s", sourceUrl)
self.log.critical("Traceback = %s", traceback.format_exc())
self.updateDbEntry(sourceUrl, dlState=-1)
| [
"something@fake-url.com"
] | something@fake-url.com |
b55b8315eaf1069c0f704f305fdcdc0cfafaf87d | 57ea6657b4deb620c4e29b606a5ec259d22fadcd | /Chatbot_Web/impl/weixin/WXBizMsgCrypt.py | 77d319c0cda95ff7b3c39c660dfa35098682ca0d | [
"Apache-2.0"
] | permissive | orchestor/Chatbot_CN | 021d05849257d66e8e2a65d4ead5a777e09d7d3d | 43922d7f73946d00faad3f27d86188ec18022965 | refs/heads/master | 2020-05-09T12:48:48.124981 | 2019-04-09T13:54:24 | 2019-04-09T13:54:24 | 181,124,145 | 1 | 0 | Apache-2.0 | 2019-04-13T05:11:09 | 2019-04-13T05:11:06 | null | UTF-8 | Python | false | false | 9,372 | py | #!/usr/bin/env python
#-*- encoding:utf-8 -*-
""" 对公众平台发送给公众账号的消息加解密示例代码.
@copyright: Copyright (c) 1998-2014 Tencent Inc.
"""
# ------------------------------------------------------------------------
import base64
import string
import random
import hashlib
import time
import struct
from crypto.Cipher import AES
import xml.etree.cElementTree as ET
import sys
import socket
import ierror
"""
关于Crypto.Cipher模块,ImportError: No module named 'Crypto'解决方案
请到官方网站 https://www.dlitz.net/software/pycrypto/ 下载pycrypto。
下载后,按照README中的“Installation”小节的提示进行pycrypto安装。
"""
class FormatException(Exception):
pass
def throw_exception(message, exception_class=FormatException):
"""my define raise exception function"""
raise exception_class(message)
class SHA1:
"""计算公众平台的消息签名接口"""
def getSHA1(self, token, timestamp, nonce, encrypt):
"""用SHA1算法生成安全签名
@param token: 票据
@param timestamp: 时间戳
@param encrypt: 密文
@param nonce: 随机字符串
@return: 安全签名
"""
try:
sortlist = [token, timestamp, nonce, encrypt]
sortlist.sort()
sha = hashlib.sha1()
sha.update("".join(sortlist))
return ierror.WXBizMsgCrypt_OK, sha.hexdigest()
except Exception as e:
#print e
return ierror.WXBizMsgCrypt_ComputeSignature_Error, None
class XMLParse:
"""提供提取消息格式中的密文及生成回复消息格式的接口"""
# xml消息模板
AES_TEXT_RESPONSE_TEMPLATE = """<xml>
<Encrypt><![CDATA[%(msg_encrypt)s]]></Encrypt>
<MsgSignature><![CDATA[%(msg_signaturet)s]]></MsgSignature>
<TimeStamp>%(timestamp)s</TimeStamp>
<Nonce><![CDATA[%(nonce)s]]></Nonce>
</xml>"""
def extract(self, xmltext):
"""提取出xml数据包中的加密消息
@param xmltext: 待提取的xml字符串
@return: 提取出的加密消息字符串
"""
try:
xml_tree = ET.fromstring(xmltext)
encrypt = xml_tree.find("Encrypt")
touser_name = xml_tree.find("ToUserName")
return ierror.WXBizMsgCrypt_OK, encrypt.text, touser_name.text
except Exception as e:
#print e
return ierror.WXBizMsgCrypt_ParseXml_Error,None,None
def generate(self, encrypt, signature, timestamp, nonce):
"""生成xml消息
@param encrypt: 加密后的消息密文
@param signature: 安全签名
@param timestamp: 时间戳
@param nonce: 随机字符串
@return: 生成的xml字符串
"""
resp_dict = {
'msg_encrypt' : encrypt,
'msg_signaturet': signature,
'timestamp' : timestamp,
'nonce' : nonce,
}
resp_xml = self.AES_TEXT_RESPONSE_TEMPLATE % resp_dict
return resp_xml
class PKCS7Encoder():
"""提供基于PKCS7算法的加解密接口"""
block_size = 32
def encode(self, text):
""" 对需要加密的明文进行填充补位
@param text: 需要进行填充补位操作的明文
@return: 补齐明文字符串
"""
text_length = len(text)
# 计算需要填充的位数
amount_to_pad = self.block_size - (text_length % self.block_size)
if amount_to_pad == 0:
amount_to_pad = self.block_size
# 获得补位所用的字符
pad = chr(amount_to_pad)
return text + pad * amount_to_pad
def decode(self, decrypted):
"""删除解密后明文的补位字符
@param decrypted: 解密后的明文
@return: 删除补位字符后的明文
"""
pad = ord(decrypted[-1])
if pad<1 or pad >32:
pad = 0
return decrypted[:-pad]
class Prpcrypt(object):
"""提供接收和推送给公众平台消息的加解密接口"""
def __init__(self,key):
#self.key = base64.b64decode(key+"=")
self.key = key
# 设置加解密模式为AES的CBC模式
self.mode = AES.MODE_CBC
def encrypt(self,text,appid):
"""对明文进行加密
@param text: 需要加密的明文
@return: 加密得到的字符串
"""
# 16位随机字符串添加到明文开头
text = self.get_random_str() + struct.pack("I",socket.htonl(len(text))) + text + appid
# 使用自定义的填充方式对明文进行补位填充
pkcs7 = PKCS7Encoder()
text = pkcs7.encode(text)
# 加密
cryptor = AES.new(self.key,self.mode,self.key[:16])
try:
ciphertext = cryptor.encrypt(text)
# 使用BASE64对加密后的字符串进行编码
return ierror.WXBizMsgCrypt_OK, base64.b64encode(ciphertext)
except Exception as e:
#print e
return ierror.WXBizMsgCrypt_EncryptAES_Error,None
def decrypt(self,text,appid):
"""对解密后的明文进行补位删除
@param text: 密文
@return: 删除填充补位后的明文
"""
try:
cryptor = AES.new(self.key,self.mode,self.key[:16])
# 使用BASE64对密文进行解码,然后AES-CBC解密
plain_text = cryptor.decrypt(base64.b64decode(text))
except Exception as e:
#print e
return ierror.WXBizMsgCrypt_DecryptAES_Error,None
try:
pad = ord(plain_text[-1])
# 去掉补位字符串
#pkcs7 = PKCS7Encoder()
#plain_text = pkcs7.encode(plain_text)
# 去除16位随机字符串
content = plain_text[16:-pad]
xml_len = socket.ntohl(struct.unpack("I",content[ : 4])[0])
xml_content = content[4 : xml_len+4]
from_appid = content[xml_len+4:]
except Exception as e:
#print e
return ierror.WXBizMsgCrypt_IllegalBuffer,None
if from_appid != appid:
return ierror.WXBizMsgCrypt_ValidateAppid_Error,None
return 0,xml_content
def get_random_str(self):
""" 随机生成16位字符串
@return: 16位字符串
"""
rule = string.letters + string.digits
str = random.sample(rule, 16)
return "".join(str)
class WXBizMsgCrypt(object):
#构造函数
#@param sToken: 公众平台上,开发者设置的Token
# @param sEncodingAESKey: 公众平台上,开发者设置的EncodingAESKey
# @param sAppId: 企业号的AppId
def __init__(self,sToken,sEncodingAESKey,sAppId):
try:
self.key = base64.b64decode(sEncodingAESKey+"=")
assert len(self.key) == 32
except:
throw_exception("[error]: EncodingAESKey unvalid !", FormatException)
#return ierror.WXBizMsgCrypt_IllegalAesKey)
self.token = sToken
self.appid = sAppId
def EncryptMsg(self, sReplyMsg, sNonce, timestamp = None):
#将公众号回复用户的消息加密打包
#@param sReplyMsg: 企业号待回复用户的消息,xml格式的字符串
#@param sTimeStamp: 时间戳,可以自己生成,也可以用URL参数的timestamp,如为None则自动用当前时间
#@param sNonce: 随机串,可以自己生成,也可以用URL参数的nonce
#sEncryptMsg: 加密后的可以直接回复用户的密文,包括msg_signature, timestamp, nonce, encrypt的xml格式的字符串,
#return:成功0,sEncryptMsg,失败返回对应的错误码None
pc = Prpcrypt(self.key)
ret,encrypt = pc.encrypt(sReplyMsg, self.appid)
if ret != 0:
return ret,None
if timestamp is None:
timestamp = str(int(time.time()))
# 生成安全签名
sha1 = SHA1()
ret,signature = sha1.getSHA1(self.token, timestamp, sNonce, encrypt)
if ret != 0:
return ret,None
xmlParse = XMLParse()
return ret,xmlParse.generate(encrypt, signature, timestamp, sNonce)
def DecryptMsg(self, sPostData, sMsgSignature, sTimeStamp, sNonce):
# 检验消息的真实性,并且获取解密后的明文
# @param sMsgSignature: 签名串,对应URL参数的msg_signature
# @param sTimeStamp: 时间戳,对应URL参数的timestamp
# @param sNonce: 随机串,对应URL参数的nonce
# @param sPostData: 密文,对应POST请求的数据
# xml_content: 解密后的原文,当return返回0时有效
# @return: 成功0,失败返回对应的错误码
# 验证安全签名
xmlParse = XMLParse()
ret,encrypt,touser_name = xmlParse.extract(sPostData)
if ret != 0:
return ret, None
sha1 = SHA1()
ret,signature = sha1.getSHA1(self.token, sTimeStamp, sNonce, encrypt)
if ret != 0:
return ret, None
if not signature == sMsgSignature:
return ierror.WXBizMsgCrypt_ValidateSignature_Error, None
pc = Prpcrypt(self.key)
ret,xml_content = pc.decrypt(encrypt,self.appid)
return ret,xml_content
| [
"charlesxu86@163.com"
] | charlesxu86@163.com |
d09e00e653b574f9a8970cd1e584175a9a92737e | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2811/60651/233596.py | adf251acd00fc9d70e91606146ff79bfffcadcaa | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | inlist=input().split()
modlist=[]
for i in inlist[2,1+list[1]]:
moi=i%list[0]
if moi is not in modlist:
list.append(i%moi)
else:
print(i)
break
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
724c22209cc7ae9cdd3b6fab93b7e2622f3ee760 | 5e9576c368e98927e2965bd2fb23bd35d9993d69 | /featuretools/primitives/standard/aggregation/percent_true.py | 32ce39a1834e8fab60536294b99d282ccf61eb9d | [
"BSD-3-Clause"
] | permissive | alteryx/featuretools | c6e319e063e8e84e7684bf232376f95dc5272160 | c284c2d27a95b81e0bae913ac90df2b02c8f3b37 | refs/heads/main | 2023-08-25T12:21:33.945418 | 2023-08-23T16:30:25 | 2023-08-23T16:30:25 | 102,908,804 | 1,783 | 201 | BSD-3-Clause | 2023-09-07T18:53:19 | 2017-09-08T22:15:17 | Python | UTF-8 | Python | false | false | 2,090 | py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable, Double
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library, import_or_none
dd = import_or_none("dask.dataframe")
class PercentTrue(AggregationPrimitive):
"""Determines the percent of `True` values.
Description:
Given a list of booleans, return the percent
of values which are `True` as a decimal.
`NaN` values are treated as `False`,
adding to the denominator.
Examples:
>>> percent_true = PercentTrue()
>>> percent_true([True, False, True, True, None])
0.6
"""
name = "percent_true"
input_types = [
[ColumnSchema(logical_type=BooleanNullable)],
[ColumnSchema(logical_type=Boolean)],
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
stack_on = []
stack_on_exclude = []
default_value = 0
compatibility = [Library.PANDAS, Library.DASK]
description_template = "the percentage of true values in {}"
def get_function(self, agg_type=Library.PANDAS):
if agg_type == Library.DASK:
def chunk(s):
def format_chunk(x):
return x[:].fillna(False)
chunk_sum = s.agg(lambda x: format_chunk(x).sum())
chunk_len = s.agg(lambda x: len(format_chunk(x)))
if chunk_sum.dtype == "bool":
chunk_sum = chunk_sum.astype("int64")
if chunk_len.dtype == "bool":
chunk_len = chunk_len.astype("int64")
return (chunk_sum, chunk_len)
def agg(val, length):
return (val.sum(), length.sum())
def finalize(total, length):
return total / length
return dd.Aggregation(self.name, chunk=chunk, agg=agg, finalize=finalize)
def percent_true(s):
return s.fillna(False).mean()
return percent_true
| [
"noreply@github.com"
] | alteryx.noreply@github.com |
2e1f37842c48c239ce71d64acfa606a8846c5601 | 7be67ecaee241769a69f3f5dae1bb6f99feb5e84 | /venv/bin/xhtml2pdf | 9de81088105b51d156f564e3a715ca49edad96a4 | [] | no_license | asadlive84/Billing-Cable-TV-Providers- | 6e5e6412a84045749869253b49a2a53564b52c96 | 7f1927030e9cb57573e1cfe5a5110614ef251d02 | refs/heads/dev | 2022-03-17T21:42:41.443783 | 2019-11-22T14:48:50 | 2019-11-22T14:48:50 | 213,041,047 | 1 | 0 | null | 2019-12-05T00:29:16 | 2019-10-05T17:23:09 | Python | UTF-8 | Python | false | false | 435 | #!/home/asad/a/Billing-Cable-TV-Providers-/venv/bin/python3.7
# EASY-INSTALL-ENTRY-SCRIPT: 'xhtml2pdf==0.2.3','console_scripts','xhtml2pdf'
__requires__ = 'xhtml2pdf==0.2.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('xhtml2pdf==0.2.3', 'console_scripts', 'xhtml2pdf')()
)
| [
"asadlive.sohel@gmail.com"
] | asadlive.sohel@gmail.com | |
e034515fd347be397beff6bc24ce3093eecb2309 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03733/s782233089.py | 46581aa9c8c3b8e38789b9a77690606af0e916cd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | N, T = map(int, input().split())
t = list(map(int, input().split()))
cnt = T
time = T
for i in range(1, N):
if(t[i] <= time):
cnt += (t[i]-time)+T
time = t[i]+T
else:
time = t[i]+T
cnt += T
print(cnt) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8fee26e0cffef8bdca1787ef08b772d94ae98f5d | d05a59feee839a4af352b7ed2fd6cf10a288a3cb | /xlsxwriter/test/comparison/test_simple08.py | 6162995e3717146a1053b12a45aae23f8f91ccc5 | [
"BSD-2-Clause-Views"
] | permissive | elessarelfstone/XlsxWriter | 0d958afd593643f990373bd4d8a32bafc0966534 | bb7b7881c7a93c89d6eaac25f12dda08d58d3046 | refs/heads/master | 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 | NOASSERTION | 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null | UTF-8 | Python | false | false | 800 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('simple08.xlsx')
def test_create_file(self):
"""Test '0' number format. GH103."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({'num_format': 1})
worksheet.write(0, 0, 1.23, format1)
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
732fd0b2c4f87cd1ee44405eca6fe74734235e07 | 718a104a65581faa87980583bb321e093db341d3 | /data.py | 334b5bbc65297410a64db75e3971f1c7e9e5c1a7 | [
"Apache-2.0"
] | permissive | JHWen/Load-Forecast | 14e3909ec48b5f1a578ee4c727dd234c498f3eb3 | f65c623f33b4e19eb1035860c1df33926c747599 | refs/heads/master | 2020-04-12T08:36:43.176495 | 2018-12-20T07:22:03 | 2018-12-20T07:22:03 | 162,390,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,020 | py | import pandas as pd
import numpy as np
import logging
import random
def load_data(path):
data = pd.read_csv(path, delimiter=',')
"""
Year,Month,Day,Hour,Value,Value1,Value2,Value3,dayOfWeek,isWorkday,isHoliday,Season,
Tem,RH,Precipitation,File,value_oneweek_before,value_oneday_before,value_onedayavg_before
"""
# names = ['Month', 'Day', 'Hour', 'dayOfWeek', 'isWorkday', 'isHoliday', 'Season', 'Tem', 'RH',
# 'value_oneweek_before', 'value_oneday_before', 'value_onedayavg_before', 'Value']
# names = ['dayOfWeek', 'isWorkday', 'isHoliday', 'Season', 'Tem', 'RH',
# 'value_oneweek_before', 'value_oneday_before', 'value_onedayavg_before', 'Value']
#
# data = df[names].values
index_zero_value = []
for i in range(data.shape[0]):
if data['Value'][i] == 0:
index_zero_value.append(i)
df = data.loc[:]
for i in index_zero_value:
df.loc[i, 'Value'] = None
df = df.dropna()
# end
max_value = np.max(df['Value'])
min_value = np.min(df['Value'])
dfy = pd.DataFrame({'Value': (df['Value'] - min_value) / (max_value - min_value)})
dfX = pd.DataFrame({'dayOfWeek': df['dayOfWeek'],
'isWorkday': df['isWorkday'], 'isHoliday': df['isHoliday'],
'Season': df['Season'],
'Tem': (df['Tem'] - np.mean(df['Tem'])) / (np.max(df['Tem']) - np.min(df['Tem'])),
'RH': (df['RH'] - np.mean(df['RH'])) / (np.max(df['RH']) - np.min(df['RH']))})
df_X = np.array(dfX)
df_y = np.array(dfy)
data_ = np.concatenate((df_X, df_y), axis=1)
return data_, max_value, min_value
def get_train_data(data, shuffle=False, input_size=9, batch_size=60, time_step=15, train_begin=0, train_end=2000):
train_data = data[train_begin:train_end]
if shuffle:
random.shuffle(data)
# 标准化
mean = np.mean(train_data, axis=0)
std = np.std(train_data, axis=0)
# normalized_train_data = (train_data - mean) / std
normalized_train_data = train_data
train_x, train_y = [], [] # 训练集
for i in range(len(normalized_train_data) - time_step):
if len(train_x) == batch_size:
yield train_x, train_y
train_x, train_y = [], []
x = normalized_train_data[i:i + time_step, :input_size]
y = normalized_train_data[i:i + time_step, input_size, np.newaxis]
train_x.append(x.tolist())
train_y.append(y.tolist())
def get_test_data(data, input_size=6, time_step=15, test_begin=2000, test_end=2500):
test_data = data[test_begin:test_end]
mean = np.mean(test_data, axis=0)
std = np.std(test_data, axis=0)
# normalized_test_data = (test_data - mean) / std
normalized_test_data = test_data
size = (len(normalized_test_data) + time_step - 1) // time_step # 有size个sample
test_x, test_y = [], []
for i in range(size - 1):
x = normalized_test_data[i * time_step:(i + 1) * time_step, :input_size]
y = normalized_test_data[i * time_step:(i + 1) * time_step, input_size]
test_x.append(x.tolist())
test_y.extend(y)
return test_x, test_y
def get_logger(filename):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
if not logger.handlers:
logger.addHandler(handler)
return logger
if __name__ == '__main__':
data, max_value, min_value = load_data('./data_path/HourLoadSet.csv')
# batches = get_train_data(data)
test_x, test_y = get_test_data(data=data, test_begin=15000, test_end=17000)
test_y = np.array(test_y)
test_y_ = test_y * (max_value - min_value) + min_value
print(max_value, min_value)
for i, j in zip(test_y_, test_y):
print(i, j)
| [
"test@example.com"
] | test@example.com |
12a20fa1740ade148a3cbe7e0244b55abdca0b40 | ebd6f68d47e192da7f81c528312358cfe8052c8d | /swig/Examples/test-suite/python/arrays_global_runme.py | fa3b9f2ec2c153899bedb2354e0ab4637717a0b9 | [
"LicenseRef-scancode-swig",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | inishchith/DeepSpeech | 965ad34d69eb4d150ddf996d30d02a1b29c97d25 | dcb7c716bc794d7690d96ed40179ed1996968a41 | refs/heads/master | 2021-01-16T16:16:05.282278 | 2020-05-19T08:00:33 | 2020-05-19T08:00:33 | 243,180,319 | 1 | 0 | Apache-2.0 | 2020-02-26T05:54:51 | 2020-02-26T05:54:50 | null | UTF-8 | Python | false | false | 419 | py | import arrays_global
arrays_global.cvar.array_i = arrays_global.cvar.array_const_i
from arrays_global import *
BeginString_FIX44a
cvar.BeginString_FIX44b
BeginString_FIX44c
cvar.BeginString_FIX44d
cvar.BeginString_FIX44d
cvar.BeginString_FIX44b = "12"'\0'"45"
cvar.BeginString_FIX44b
cvar.BeginString_FIX44d
cvar.BeginString_FIX44e
BeginString_FIX44f
test_a("hello", "hi", "chello", "chi")
test_b("1234567", "hi")
| [
"inishchith@gmail.com"
] | inishchith@gmail.com |
02b0318d190b9e1fde135410ecc2cb5fcac416c1 | 46fda2ea47f311ee7fefc6f6210811c7f4bd74ad | /science/py-geometer/files/patch-setup.py | 264db73014f73c40718fa55442e380c6cf4faf73 | [
"BSD-2-Clause"
] | permissive | truenas/ports | ad560a8adde884dc0cfc4b292bbbcad91903b287 | da4ed13ad08a6af5c54361f45964fa1177367c68 | refs/heads/truenas/13.0-stable | 2023-09-02T03:00:28.652837 | 2023-08-16T16:05:00 | 2023-08-16T16:05:00 | 8,656,293 | 18 | 9 | NOASSERTION | 2023-09-12T15:15:34 | 2013-03-08T17:35:37 | null | UTF-8 | Python | false | false | 292 | py | --- setup.py.orig 2020-07-08 15:51:34 UTC
+++ setup.py
@@ -19,7 +19,7 @@ VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
- 'numpy>=1.15,<1.20', 'sympy>=1.3,<=1.7'
+ 'numpy>=1.15,<1.21', 'sympy>=1.3,<=1.7'
]
# What packages are optional?
| [
"sunpoet@FreeBSD.org"
] | sunpoet@FreeBSD.org |
eb257264160ee57ec88f4c264424ccfdd9d82b1f | 6245db4e53782ab380de287f945bc026e3c6b281 | /python_workbook/str_formatting.py | 4a6974a3408e59c5880a9538906397759ee6403f | [] | no_license | rashmierande/python_exercise | e510174820efb793cfe435ad53c6baa34c829d86 | 9703b67a1f5614594244b4d8d2c42ebfb6a2aaec | refs/heads/master | 2021-01-23T00:53:17.235391 | 2018-03-15T05:46:53 | 2018-03-15T05:46:53 | 85,846,226 | 0 | 1 | null | 2018-01-21T18:11:11 | 2017-03-22T15:42:09 | Python | UTF-8 | Python | false | false | 722 | py | '''
Question: The code is supposed to ask the user to enter their name and surname
and then it prints out those user submitted values. Instead, the code throws a TypeError.
Please fix it so that the expected output is printed out.
Expected output:
Your first name is John and your second name is Smith
'''
firstname = input("Enter first name: ")
secondname = input("Enter second name: ")
print("Your first name is %s and your second name is %s" % (firstname, secondname))
#Each of the %s placeholders expects one value after % to be replaced with,
# but you need to pass these values inside a tuple.
# So, putting variables firstname and secondname inside a tuple fixes the code.
#Python expects a tuple after % . | [
"eranderashmi25@gmail.com"
] | eranderashmi25@gmail.com |
e99d95e4437b2da14fa498b7f5928e952b46ebb1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02265/s085080213.py | 540cdcdb6eb525c25d43e70a920abb364407eaae | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | import collections
d=collections.deque()
for _ in range(int(input())):
e=input()
if'i'==e[0]:d.appendleft(e.split()[1])
else:
if' '==e[6]:
m=e.split()[1]
if m in d:d.remove(m)
elif len(e)%2:d.popleft()
else:d.pop()
print(*d)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
89e378268e626c1a3c788218339b4d1c759b6ea6 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/29/usersdata/109/9631/submittedfiles/atividade.py | bc96354444dfb5afcff956647e27a7783ced551a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
cont=0
n=int(input('Digite o valor de n:'))
while True:
s=(n//10)
cont=cont+1
n=s
if s<1:
break
print cont
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
3ff68cf51fa51f689c89f1ac91c1fa528c276bb3 | a947525caa6940262099b24ebafa61900691ef22 | /trainer/agents.py | 28f7f1ea781b5e3a95ba15393f59060638c78c98 | [] | no_license | metal-tile/dqn-tensorflow | 527e9b8fa89eea4a4e6375d4244b9f2b10847473 | 9552307dfb4dacde69bdb268350d8ca0b4a02693 | refs/heads/master | 2020-05-19T05:48:47.323835 | 2019-05-04T06:15:31 | 2019-05-04T06:15:31 | 184,858,118 | 0 | 0 | null | 2019-05-04T05:59:24 | 2019-05-04T05:59:24 | null | UTF-8 | Python | false | false | 7,666 | py | import numpy as np
import tensorflow as tf
from . import repmem
class DQN:
def __init__(
self,
input_shape,
n_actions,
q_fn,
learning_rate,
discount_factor=0.99
):
"""
Parameters
----------
input_shape: the shape of input stat
- type: list of int
- example: [84, 84, 4] for Atari game in the original DQN paper
n_actions: the number of actions the agent can choose
- type: int
q_fn: a function building the computation graph for q-network
- type: callable
- input of q_fn: Tensor of shape [None, input_shape[0], input_shape[1], ...] and n_actions
- output of q_fn: Tensor of shape [None, n_actions]
learning_rate: the step size of the optimization method
- type: float
"""
self.learning_rate = learning_rate
self.n_actions = n_actions
self.gamma = discount_factor
self.input_shape = input_shape
self.q_fn = q_fn
# References to graph nodes are assigned after running `build_graph` method
self.x_ph, self.y_ph, self.a_ph = None, None, None
self.q, self.loss, self.train_ops = None, None, None
self.target_x_ph, self.target_q = None, None
self.assign_ops = None
def build_graph(self):
# Create placeholders
self.x_ph = tf.placeholder(tf.float32, shape=[None]+list(self.input_shape), name="x_ph")
self.y_ph = tf.placeholder(tf.float32, shape=[None], name="y_ph")
self.a_ph = tf.placeholder(tf.int64, shape=[None], name="a_ph")
# Build q network
with tf.variable_scope("qnet"):
self.q = self.q_fn(self.x_ph, self.n_actions)
self.loss = self._build_loss(self.y_ph, self.q, self.a_ph)
# Build target q network
self.target_x_ph = tf.placeholder(tf.float32, shape=[None] + list(self.input_shape), name="target_x_ph")
with tf.variable_scope("target_qnet"):
self.target_q = self.q_fn(self.target_x_ph, self.n_actions)
# Build update target q-network ops
q_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="qnet")
target_q_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="target_qnet")
self.train_ops = self._build_optimizer(self.loss, self.learning_rate)
self.assign_ops = [tf.assign(target_q_vars[i], q_vars[i]) for i in range(len(q_vars))]
@staticmethod
def _build_loss(y_t_ph, q_t, a_ph):
with tf.name_scope("loss"):
a_t_one_hot = tf.one_hot(a_ph, q_t.get_shape()[1].value)
q_t_acted = tf.reduce_sum(q_t * a_t_one_hot, reduction_indices=1)
loss = tf.losses.mean_squared_error(labels=y_t_ph, predictions=q_t_acted)
return loss
@staticmethod
def _build_optimizer(loss, learning_rate):
global_step = tf.train.get_or_create_global_step()
optim = tf.train.RMSPropOptimizer(learning_rate=learning_rate, momentum=0.95, epsilon=1e-2)
train_op = optim.minimize(loss, global_step=global_step)
return train_op
def update(self, sess, x_t, a_t, r_t, x_t_plus_1, terminal):
# Compute target score
fd = {self.target_x_ph: x_t_plus_1}
q_t_plus_1 = np.max(sess.run(self.target_q, feed_dict=fd), axis=1)
y_t = r_t + q_t_plus_1 * (1-terminal) * self.gamma
# Run optimization operation
fd = {self.x_ph: x_t, self.y_ph: y_t, self.a_ph: a_t}
_, train_loss = sess.run([self.train_ops, self.loss], feed_dict=fd)
return train_loss
def act(self, sess, x_t):
return sess.run(self.q, feed_dict={self.x_ph: x_t})
def update_target_q_network(self, sess):
sess.run(self.assign_ops)
def train_and_play_game(
agent,
env,
random_action_decay,
max_episodes,
replay_memory_size,
batch_size,
update_frequency,
target_sync_frequency,
final_exploration_frame,
log_frequency=5,
action_repeat=4,
max_no_op=30,
checkpoint_dir=None,
):
replay_memory = repmem.ReplayMemory(memory_size=replay_memory_size)
total_reward_list = []
with tf.Graph().as_default() as g:
agent.build_graph()
episode_count = step_count = action_count = frame_count = 0
with tf.train.MonitoredTrainingSession(
save_summaries_steps=100,
checkpoint_dir=checkpoint_dir,
) as mon_sess:
# Training loop
while episode_count < max_episodes:
# random_action_prob = max(random_action_decay**episode_count, 0.05)
random_action_prob = max(1 - float(frame_count)/final_exploration_frame*0.95, 0.05)
# Play a new game
previous_observation = env.reset()
done = False
total_reward = 0
# Initial action
action = np.random.randint(agent.n_actions)
while not done:
# Act at random in first some frames
# for _ in range(np.random.randint(1, max_no_op)):
# previous_observation, _, _, _ = env.step(env.action_space.sample())
# print(episode_count, step_count, action_count, frame_count)
if frame_count % target_sync_frequency == 0:
agent.update_target_q_network(mon_sess)
# Frame skip
if frame_count % action_repeat == 0:
# Act at random with a fixed probability
if np.random.rand() <= random_action_prob:
action = np.random.randint(agent.n_actions)
# Act following the policy on the other games
else:
q = agent.act(mon_sess, np.array([previous_observation]))
action = q.argmax()
# print(q)
action_count += 1
# Receive the results from the game simulator
observation, reward, done, info = env.step(action)
total_reward += reward
# Store the experience
if frame_count % action_repeat == 0:
replay_memory.store(previous_observation, action, reward, observation, done)
previous_observation = observation
# Update q network every update_interval
if action_count % update_frequency == 0:
mini_batch = replay_memory.sample(size=batch_size)
train_loss = agent.update(
mon_sess, mini_batch[0], mini_batch[1], mini_batch[2], mini_batch[3], mini_batch[4]
)
step_count += 1
frame_count += 1
episode_count += 1
total_reward_list.append(total_reward)
# Show log every log_interval
if episode_count % log_frequency == 0:
print("Episode: {} Frame: {} Test: {}".format(episode_count, frame_count, len(total_reward_list)))
print(
"Average Reward: {} Training Loss: {} Epsilon: {}".format(
np.mean(total_reward_list[-50:]),
np.mean(train_loss),
random_action_prob
)
)
| [
"shuhei.fujiwara@gmail.com"
] | shuhei.fujiwara@gmail.com |
7bb62eea1b4ae6548c56888022465b75c9c17c5a | 1ecb394b10e9622a5a5d8845b44e4585f464d42e | /nncp-api/biz/dlt.py | 6b61757ea3ab90e5869038b48a07359ec2fefec2 | [] | no_license | dragonflylxp/lottory | 7ec28d196f58692d9d417aa5d6963c182afe260a | b04f115df325a58148dc19d7cdfc21b28892a6a1 | refs/heads/master | 2020-04-28T08:53:09.007092 | 2020-04-17T10:50:41 | 2020-04-17T10:50:41 | 175,145,951 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py | # coding: utf-8
import traceback
import ews
import define
from decimal import Decimal
from hook import Hook
from util.configer import *
from cbrpc import get_rpc_conn, RpcException
from commonEntity.Dlt import DltBean
from commonEntity.User import UserBean
import session
from util.tools import Log
logger = Log().getLog()
@ews.route_sync_func('/dlt/issue')
def dlt_issue(handler, *args, **kwargs):
ret = DltBean().get_dlt_expect_list()
return handler.ok(ret)
@ews.route_sync_func('/dlt/trade', kwargs={'ck': (UserWarning,),
'lotid': (UserWarning, 'unsigned int'),
'wtype': (UserWarning, 'unsigned int'),
'beishu': (UserWarning, 'unsigned int'),
'zhushu': (UserWarning, 'unsigned int'),
'allmoney': (UserWarning, 'unsigned int'),
'couponid': (UserWarning, 'int'),
'expect': (UserWarning,),
'selecttype': (UserWarning,),
'fileorcode': (UserWarning,)})
@Hook.pre_hook('check_lotto')
def trade(handler, *args, **kwargs):
ck = handler.json_args.get("ck", "")
uid = session.get_by_ck(ck).get('uid')
params = handler.json_args
params.update({"uid": uid})
pid = None
try:
# 账户检查
paymoney = UserBean().check_account(params)
# 下单
with get_rpc_conn("trade") as proxy:
try:
resp = proxy.call("place_order", params)
except RpcException as ex:
raise ews.EwsError(ews.STATUS_RPC_TRADE_ERROR, ex.message)
except:
logger.error(traceback.format_exc())
raise
account = UserBean().user_account({"uid": uid})
ret = {"pid": resp.get("pid"), "balance": Decimal(account.get("balance"))-Decimal(paymoney), "balance_draw": account.get("balance_draw")}
return handler.ok(ret)
| [
"noreply@github.com"
] | dragonflylxp.noreply@github.com |
8afaab8c0cea56e8f41ab9c4f1c2441543615eef | fdd41bb26f1e7a17f5a424fe082a46bcc355abed | /setup.py | a14bf381aa58d1657f67532225f0ea61bddeb340 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | SamuelMarks/ml-params-jax | 4978d5d4c422825be748138f87422cb0d4604ec1 | fd9ac6efe5f2c7ec6d4d41ccc9e032219992a219 | refs/heads/master | 2022-11-15T20:03:32.678882 | 2020-07-05T10:40:22 | 2020-07-05T10:40:22 | 276,909,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | # -*- coding: utf-8 -*-
from ast import parse
from distutils.sysconfig import get_python_lib
from functools import partial
from os import path, listdir
from platform import python_version_tuple
from setuptools import setup, find_packages
if python_version_tuple()[0] == '3':
imap = map
ifilter = filter
else:
from itertools import imap, ifilter
if __name__ == '__main__':
package_name = 'ml_params_jax'
with open(path.join(package_name, '__init__.py')) as f:
__author__, __version__ = imap(
lambda buf: next(imap(lambda e: e.value.s, parse(buf).body)),
ifilter(lambda line: line.startswith('__version__') or line.startswith('__author__'), f)
)
to_funcs = lambda *paths: (partial(path.join, path.dirname(__file__), package_name, *paths),
partial(path.join, get_python_lib(prefix=''), package_name, *paths))
_data_join, _data_install_dir = to_funcs('_data')
setup(
name=package_name,
author=__author__,
version=__version__,
install_requires=['pyyaml'],
test_suite=package_name + '.tests',
packages=find_packages(),
package_dir={package_name: package_name},
data_files=[
(_data_install_dir(), list(imap(_data_join, listdir(_data_join()))))
]
)
| [
"807580+SamuelMarks@users.noreply.github.com"
] | 807580+SamuelMarks@users.noreply.github.com |
6c742dc924f39f16f10aa3ddcde7be364acf3c92 | 20fc010bcc1b23b8df29c969eee725f3083ac117 | /mayan/apps/folders/tests/test_models.py | a5beb8df62ea4fc10f50dca53b49bab5128b6c5f | [
"Apache-2.0"
] | permissive | fire-studio/mayan-edms | c5c943e16ea0c780a4c6c61d3bc702d00590eb61 | 6dc45a1c7f5f19219fc748e1578f200301a18f5b | refs/heads/master | 2021-01-01T17:09:08.733373 | 2017-07-22T06:42:50 | 2017-07-22T06:45:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,780 | py | from __future__ import unicode_literals
from django.test import override_settings
from common.tests import BaseTestCase
from documents.models import DocumentType
from documents.tests import TEST_DOCUMENT_PATH, TEST_DOCUMENT_TYPE
from ..models import Folder
from .literals import TEST_FOLDER_LABEL
@override_settings(OCR_AUTO_OCR=False)
class FolderTestCase(BaseTestCase):
def setUp(self):
super(FolderTestCase, self).setUp()
self.document_type = DocumentType.objects.create(
label=TEST_DOCUMENT_TYPE
)
with open(TEST_DOCUMENT_PATH) as file_object:
self.document = self.document_type.new_document(
file_object=file_object
)
def tearDown(self):
self.document_type.delete()
super(FolderTestCase, self).tearDown()
def test_folder_creation(self):
folder = Folder.objects.create(label=TEST_FOLDER_LABEL)
self.assertEqual(Folder.objects.all().count(), 1)
self.assertEqual(list(Folder.objects.all()), [folder])
def test_addition_of_documents(self):
folder = Folder.objects.create(label=TEST_FOLDER_LABEL)
folder.documents.add(self.document)
self.assertEqual(folder.documents.count(), 1)
self.assertEqual(list(folder.documents.all()), [self.document])
def test_addition_and_deletion_of_documents(self):
folder = Folder.objects.create(label=TEST_FOLDER_LABEL)
folder.documents.add(self.document)
self.assertEqual(folder.documents.count(), 1)
self.assertEqual(list(folder.documents.all()), [self.document])
folder.documents.remove(self.document)
self.assertEqual(folder.documents.count(), 0)
self.assertEqual(list(folder.documents.all()), [])
| [
"roberto.rosario.gonzalez@gmail.com"
] | roberto.rosario.gonzalez@gmail.com |
6d15dc1ae9a416953dddc829e437664a04cf34cc | 24f664aa2344d4f5d5e7b048ac4e85231715c4c8 | /deeplearning/deeptune/opencl/heterogeneous_mapping/models/base.py | 7e8490aaae299610f8b1c95cea719f8d82a1ac1c | [] | no_license | speycode/clfuzz | 79320655e879d1e0a06a481e8ec2e293c7c10db7 | f2a96cf84a7971f70cb982c07b84207db407b3eb | refs/heads/master | 2020-12-05T13:44:55.486419 | 2020-01-03T14:14:03 | 2020-01-03T14:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,330 | py | # Copyright (c) 2017, 2018, 2019 Chris Cummins.
#
# DeepTune is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DeepTune is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DeepTune. If not, see <https://www.gnu.org/licenses/>.
"""Base class for OpenCL heterogeneous device mapping models."""
import pathlib
import typing
import pandas as pd
from deeplearning.clgen.corpuses import atomizers
from labm8.py import app
FLAGS = app.FLAGS
class HeterogeneousMappingModel(object):
"""A model for predicting OpenCL heterogeneous device mappings.
Attributes:
__name__ (str): Model name.
__basename__ (str): Shortened name, used for files
"""
__name__ = None
__basename__ = None
def init(self, seed: int, atomizer: atomizers.AtomizerBase) -> None:
"""Initialize the model.
Do whatever is required to setup a new heterogeneous model here.
This method is called prior to training and predicting.
This method may be omitted if no initial setup is required.
Args:
seed (int): The seed value used to reproducible results. May be 'None',
indicating that no seed is to be used.
atomizer: The atomizer used to tokenize training examples.
"""
pass
# TODO(cec): Switch to exclusively pathlib.Path for argument.
def save(self, outpath: typing.Union[str, pathlib.Path]) -> None:
"""Save model state.
This must capture all of the relevant state of the model. It is up
to implementing classes to determine how best to save the model.
Args:
outpath (str): The path to save the model state to.
"""
raise NotImplementedError
# TODO(cec): Switch to exclusively pathlib.Path for argument.
def restore(self, inpath: typing.Union[str, pathlib.Path]) -> None:
"""Load a trained model from file.
This is called in place of init() if a saved model file exists. It
must restore all of the required model state.
Args:
inpath (str): The path to load the model from. This is the same path as
was passed to save() to create the file.
"""
raise NotImplementedError
def train(
self, df: pd.DataFrame, platform_name: str, verbose: bool = False
) -> None:
"""Train a model.
Args:
df: The dataframe of training data.
platform_name: The name of the gpu being trained for
verbose: Whether to print verbose status messages during training.
"""
raise NotImplementedError
def predict(
self, df: pd.DataFrame, platform_name: str, verbose: bool = False
) -> typing.Iterable[int]:
"""Make predictions for programs.
Args:
df: The dataframe of training data.
platform_name: The name of the gpu being trained for
verbose: Whether to print verbose status messages during training.
Returns:
A sequence of predicted 'y' values (optimal device mappings).
"""
raise NotImplementedError
| [
"chrisc.101@gmail.com"
] | chrisc.101@gmail.com |
aaef15c38545b71f401e174ffe09c7f011928a7f | e38e87ed5e500290ba0c2f774227920625ee5c54 | /examples/computing_embeddings.py | 7e84b597291985f1773e4aeaa58989017cf20739 | [
"Python-2.0",
"Apache-2.0"
] | permissive | ishine/text2vec | 39d363b94ddbc9e664939041ae63ad4e352b894b | 71842f67f7be9f0d4fee5b6e0e2562a1c553818e | refs/heads/master | 2023-08-25T19:49:54.215412 | 2021-11-05T06:37:18 | 2021-11-05T06:37:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | # -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
This basic example loads a pre-trained model from the web and uses it to
generate sentence embeddings for a given list of sentences.
"""
import sys
sys.path.append('..')
from text2vec import SBert
from text2vec import Word2Vec
def compute_emb(model):
# Embed a list of sentences
sentences = ['卡',
'银行卡',
'如何更换花呗绑定银行卡',
'花呗更改绑定银行卡',
'This framework generates embeddings for each input sentence',
'Sentences are passed as a list of string.',
'The quick brown fox jumps over the lazy dog.']
sentence_embeddings = model.encode(sentences)
print(type(sentence_embeddings), sentence_embeddings.shape)
# The result is a list of sentence embeddings as numpy arrays
for sentence, embedding in zip(sentences, sentence_embeddings):
print("Sentence:", sentence)
print("Embedding:", embedding)
print("")
if __name__ == '__main__':
sbert_model = SBert('paraphrase-multilingual-MiniLM-L12-v2')
compute_emb(sbert_model)
w2v_model = Word2Vec('w2v-light-tencent-chinese')
compute_emb(w2v_model)
| [
"shibing624@126.com"
] | shibing624@126.com |
adf0f62316b8fa8d727bcb11f2ea9d19dc9f0b06 | 7c8c7fe5a7aea0a023624b31433f281a642bd488 | /tslearn/tests/test_variablelength.py | ba22bb86e4e2cd321192159d489d56ebcc001bcb | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | wzpy/tslearn | 918b969a69fd1a003037c94224ff31959f8b271f | 2d7f3ea710b8d7f21ab24d212e930046b9c142ad | refs/heads/master | 2020-07-15T04:38:48.217622 | 2019-08-30T22:04:05 | 2019-08-30T22:04:05 | 205,481,146 | 1 | 0 | BSD-2-Clause | 2019-08-31T01:53:11 | 2019-08-31T01:53:11 | null | UTF-8 | Python | false | false | 3,258 | py | import numpy as np
from numpy.testing import assert_allclose, assert_array_less
from sklearn.model_selection import cross_val_score, KFold
from tslearn.neighbors import KNeighborsTimeSeriesClassifier
from tslearn.svm import TimeSeriesSVC, TimeSeriesSVR
from tslearn.clustering import GlobalAlignmentKernelKMeans, TimeSeriesKMeans
from tslearn.utils import to_time_series_dataset
__author__ = 'Romain Tavenard romain.tavenard[at]univ-rennes2.fr'
def test_variable_length_knn():
X = to_time_series_dataset([[1, 2, 3, 4],
[1, 2, 3],
[2, 5, 6, 7, 8, 9],
[3, 5, 6, 7, 8]])
y = [0, 0, 1, 1]
clf = KNeighborsTimeSeriesClassifier(metric="dtw", n_neighbors=1)
clf.fit(X, y)
assert_allclose(clf.predict(X), [0, 0, 1, 1])
clf = KNeighborsTimeSeriesClassifier(metric="softdtw", n_neighbors=1)
clf.fit(X, y)
assert_allclose(clf.predict(X), [0, 0, 1, 1])
def test_variable_length_svm():
X = to_time_series_dataset([[1, 2, 3, 4],
[1, 2, 3],
[2, 5, 6, 7, 8, 9],
[3, 5, 6, 7, 8]])
y = [0, 0, 1, 1]
rng = np.random.RandomState(0)
clf = TimeSeriesSVC(kernel="gak", random_state=rng)
clf.fit(X, y)
assert_allclose(clf.predict(X), [0, 0, 1, 1])
y_reg = [-1., -1.3, 3.2, 4.1]
clf = TimeSeriesSVR(kernel="gak")
clf.fit(X, y_reg)
assert_array_less(clf.predict(X[:2]), 0.)
assert_array_less(-clf.predict(X[2:]), 0.)
def test_variable_length_clustering():
# TODO: here we just check that they can accept variable-length TS, not
# that they do clever things
X = to_time_series_dataset([[1, 2, 3, 4],
[1, 2, 3],
[2, 5, 6, 7, 8, 9],
[3, 5, 6, 7, 8]])
rng = np.random.RandomState(0)
clf = GlobalAlignmentKernelKMeans(n_clusters=2, random_state=rng)
clf.fit(X)
clf = TimeSeriesKMeans(n_clusters=2, metric="dtw", random_state=rng)
clf.fit(X)
clf = TimeSeriesKMeans(n_clusters=2, metric="softdtw", random_state=rng)
clf.fit(X)
def test_variable_cross_val():
# TODO: here we just check that they can accept variable-length TS, not
# that they do clever things
X = to_time_series_dataset([[1, 2, 3, 4],
[1, 2, 3],
[1, 2, 3, 4],
[1, 2, 3],
[2, 5, 6, 7, 8, 9],
[3, 5, 6, 7, 8],
[2, 5, 6, 7, 8, 9],
[3, 5, 6, 7, 8]])
y = [0, 0, 0, 0, 1, 1, 1, 1]
rng = np.random.RandomState(0)
cv = KFold(n_splits=2, shuffle=True)
for estimator in [
TimeSeriesSVC(kernel="gak", random_state=rng),
TimeSeriesSVR(kernel="gak"),
KNeighborsTimeSeriesClassifier(metric="dtw", n_neighbors=1),
KNeighborsTimeSeriesClassifier(metric="softdtw", n_neighbors=1)
]:
# TODO: cannot test for clustering methods since they don't have a
# score method yet
cross_val_score(estimator, X=X, y=y, cv=cv)
| [
"romain.tavenard@univ-rennes2.fr"
] | romain.tavenard@univ-rennes2.fr |
069516998750956b7549ff532bbaf794a91c42e7 | e638e9fda0e672fa9a414515d0c05a24ab55ad38 | /SparseMatrixMultiplication.py | 73e01038302f2d5dfc6d050502ff25dfad32a2a7 | [] | no_license | zjuzpz/Algorithms | 8d1c7d50429aa5540eb817dc5495a20fc3f11125 | 2df1a58aa9474f2ecec2ee7c45ebf12466181391 | refs/heads/master | 2021-01-21T05:55:48.768728 | 2020-08-04T22:44:08 | 2020-08-04T22:44:08 | 44,586,024 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | """
311. Sparse Matrix Multiplication
Given two sparse matrices A and B, return the result of AB.
You may assume that A's column number is equal to B's row number.
Example:
A = [
[ 1, 0, 0],
[-1, 0, 3]
]
B = [
[ 7, 0, 0 ],
[ 0, 0, 0 ],
[ 0, 0, 1 ]
]
| 1 0 0 | | 7 0 0 | | 7 0 0 |
AB = | -1 0 3 | x | 0 0 0 | = | -7 0 3 |
| 0 0 1 |
"""
# O(l * m * n)
# O(l * m)
class Solution(object):
def multiply(self, A, B):
"""
:type A: List[List[int]]
:type B: List[List[int]]
:rtype: List[List[int]]
"""
if not A or not B:
return []
res = [[0 for j in range(len(B[0]))] for i in range(len(A))]
for i in range(len(A)):
for j in range(len(A[0])):
if A[i][j] != 0:
for k in range(len(res[0])):
res[i][k] += A[i][j] * B[j][k]
return res
if __name__ == "__main__":
A = [[1, 0, 0], [-1, 0, 3]]
B = [[7, 0, 0], [0, 0, 0], [0, 0, 1]]
print(Solution().multiply(A, B))
| [
"zjuzpz@gmail.com"
] | zjuzpz@gmail.com |
e4ae6e2272d209c56491720c42c47c6bb8eb751e | 675989e2669b8b281b39de85dab2fe781cdaca6a | /macro/beam_profile/plots_beam.py | acd3481bec53ea81dba6acf6c9242a38deb0afa8 | [] | no_license | adamjaro/lmon | c439615c2aae861102e32440f823b05b7b054715 | fd80ae93620ff12f8043f04d19538355fbdff81a | refs/heads/master | 2023-06-01T06:29:09.749097 | 2023-05-08T17:55:55 | 2023-05-08T17:55:55 | 223,854,949 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,691 | py | #!/usr/bin/python3
from pandas import read_csv, DataFrame
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from scipy.stats import norm
from scipy.optimize import curve_fit
import numpy as np
#_____________________________________________________________________________
def main():
iplot = 2
funclist = []
funclist.append( plot_x ) # 0
funclist.append( plot_y ) # 1
funclist.append( plot_z ) # 2
funclist[iplot]()
#main
#_____________________________________________________________________________
def plot_x():
#x of primary vertex
infile = "data/vtx_18x275_3p3_r2.csv"
#infile = "data/vtx_18x275_3p4.csv"
inp = read_csv(infile)
#print(inp)
nbins = 60
#plt.style.use("dark_background")
#col = "lime"
col = "black"
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
set_axes_color(ax, col)
set_grid(plt, col)
hx = plt.hist(inp["x"], bins=nbins, color="blue", density=True, histtype="step", lw=2)
#Gaussian fit, bin centers and values
centers = (0.5*(hx[1][1:]+hx[1][:-1]))
fit_data = DataFrame({"E": centers, "density": hx[0]})
pars, cov = curve_fit(lambda x, mu, sig : norm.pdf(x, loc=mu, scale=sig), fit_data["E"], fit_data["density"])
#fit function
x = np.linspace(plt.xlim()[0], plt.xlim()[1], 300)
y = norm.pdf(x, pars[0], pars[1])
plt.plot(x, y, "-", label="norm", color="red")
ax.set_xlabel("$x$ (mm)")
ax.set_ylabel("Normalized counts")
leg = legend()
leg.add_entry(leg_lin("red"), "Gaussian fit:")
leg.add_entry(leg_txt(), "$\mu$ (mm): {0:.4f} $\pm$ {1:.4f}".format( pars[0], np.sqrt(cov[0,0]) ))
leg.add_entry(leg_txt(), "$\sigma$ (mm): {0:.4f} $\pm$ {1:.4f}".format( pars[1], np.sqrt(cov[1,1]) ))
leg.draw(plt, col)
fig.savefig("01fig.pdf", bbox_inches = "tight")
plt.close()
#plot_x
#_____________________________________________________________________________
def plot_y():
#y of primary vertex
infile = "data/vtx_18x275_3p3_r2.csv"
#infile = "data/vtx_18x275_3p4.csv"
inp = read_csv(infile)
nbins = 60
#plt.style.use("dark_background")
#col = "lime"
col = "black"
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
set_axes_color(ax, col)
set_grid(plt, col)
hx = plt.hist(inp["y"], bins=nbins, color="blue", density=True, histtype="step", lw=2)
#Gaussian fit, bin centers and values
centers = (0.5*(hx[1][1:]+hx[1][:-1]))
fit_data = DataFrame({"E": centers, "density": hx[0]})
pars, cov = curve_fit(lambda x, mu, sig : norm.pdf(x, loc=mu, scale=sig), fit_data["E"], fit_data["density"])
#fit function
x = np.linspace(plt.xlim()[0], plt.xlim()[1], 300)
y = norm.pdf(x, pars[0], pars[1])
plt.plot(x, y, "-", label="norm", color="red")
plt.rc("text", usetex = True)
plt.rc("text.latex", preamble=r"\usepackage{siunitx}")
ax.set_xlabel("$y$ (mm)")
ax.set_ylabel("Normalized counts")
leg = legend()
leg.add_entry(leg_lin("red"), "Gaussian fit:")
leg.add_entry(leg_txt(), "$\mu$ (\si{\micro\meter}): "+"{0:.4f} $\pm$ {1:.4f}".format( pars[0]*1e3, np.sqrt(cov[0,0]*1e3) ))
leg.add_entry(leg_txt(), "$\sigma$ (\si{\micro\meter}): "+"{0:.4f} $\pm$ {1:.4f}".format( pars[1]*1e3, np.sqrt(cov[1,1]*1e3) ))
leg.draw(plt, col)
fig.savefig("01fig.pdf", bbox_inches = "tight")
plt.close()
#plot_y
#_____________________________________________________________________________
def plot_z():
#z of primary vertex
infile = "data/vtx_18x275_3p3_r2.csv"
#infile = "data/vtx_18x275_3p4.csv"
inp = read_csv(infile)
nbins = 50
plt.style.use("dark_background")
col = "lime"
#col = "black"
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
set_axes_color(ax, col)
set_grid(plt, col)
hx = plt.hist(inp["z"], bins=nbins, color="blue", density=True, histtype="step", lw=2)
#Gaussian fit, bin centers and values
centers = (0.5*(hx[1][1:]+hx[1][:-1]))
fit_data = DataFrame({"E": centers, "density": hx[0]})
pars, cov = curve_fit(lambda x, mu, sig : norm.pdf(x, loc=mu, scale=sig), fit_data["E"], fit_data["density"])
#fit function
x = np.linspace(plt.xlim()[0], plt.xlim()[1], 300)
y = norm.pdf(x, pars[0], pars[1])
plt.plot(x, y, "-", label="norm", color="red")
ax.set_xlabel("$z$ (mm)")
ax.set_ylabel("Normalized counts")
leg = legend()
leg.add_entry(leg_lin("red"), "Gaussian fit:")
leg.add_entry(leg_txt(), "$\mu$ (mm): {0:.3f} $\pm$ {1:.3f}".format( pars[0], np.sqrt(cov[0,0]) ))
leg.add_entry(leg_txt(), "$\sigma$ (mm): {0:.3f} $\pm$ {1:.3f}".format( pars[1], np.sqrt(cov[1,1]) ))
leg.draw(plt, col)
fig.savefig("01fig.pdf", bbox_inches = "tight")
plt.close()
#plot_z
#_____________________________________________________________________________
def set_axes_color(ax, col):
#[t.set_color('red') for t in ax.xaxis.get_ticklines()]
#[t.set_color('red') for t in ax.xaxis.get_ticklabels()]
ax.xaxis.label.set_color(col)
ax.yaxis.label.set_color(col)
ax.tick_params(which = "both", colors = col)
ax.spines["bottom"].set_color(col)
ax.spines["left"].set_color(col)
ax.spines["top"].set_color(col)
ax.spines["right"].set_color(col)
#set_axes_color
#_____________________________________________________________________________
def set_grid(px, col="lime"):
px.grid(True, color = col, linewidth = 0.5, linestyle = "--")
#set_grid
#_____________________________________________________________________________
class legend:
def __init__(self):
self.items = []
self.data = []
def add_entry(self, i, d):
self.items.append(i)
self.data.append(d)
def draw(self, px, col=None, **kw):
leg = px.legend(self.items, self.data, **kw)
if col is not None:
px.setp(leg.get_texts(), color=col)
if col != "black":
leg.get_frame().set_edgecolor("orange")
return leg
#_____________________________________________________________________________
def leg_lin(col, sty="-"):
return Line2D([0], [0], lw=2, ls=sty, color=col)
#_____________________________________________________________________________
def leg_txt():
return Line2D([0], [0], lw=0)
#_____________________________________________________________________________
def leg_dot(fig, col, siz=8):
return Line2D([0], [0], marker="o", color=fig.get_facecolor(), markerfacecolor=col, markersize=siz)
#_____________________________________________________________________________
if __name__ == "__main__":
main()
| [
"jaroslav.adam@cern.ch"
] | jaroslav.adam@cern.ch |
95841f3405e5128f4dfbcdc76b3bc8bc7578f644 | eafd99d910af8cdcf1ec1b96d03c342e5306af49 | /tf-idf.py | 803e8ec98d0ee3310165b47a2f88d53407dc526f | [] | no_license | paulohq/cosine | 0323db87138a1f7b34f680b20f93b0a41a2c72b4 | f22c3c7b6af9eb149046d70654dd82a7bf69c738 | refs/heads/master | 2020-05-09T19:36:50.164238 | 2020-04-13T23:41:31 | 2020-04-13T23:41:31 | 181,383,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,179 | py | # teste de geração de tfidf do corpus
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
import math
def computeTF(wordDict, bagOfWords):
tfDict = {}
bagOfWordsCount = len(bagOfWords)
for word, count in wordDict.items():
tfDict[word] = count / float(bagOfWordsCount)
return tfDict
def computeIDF(documents):
import math
N = len(documents)
idfDict = dict.fromkeys(documents[0].keys(), 0)
for document in documents:
for word, val in document.items():
if val > 0:
idfDict[word] += 1
for word, val in idfDict.items():
idfDict[word] = math.log(N / float(val))
return idfDict
def computeTFIDF(tfBagOfWords, idfs):
tfidf = {}
for word, val in tfBagOfWords.items():
tfidf[word] = val * idfs[word]
return tfidf
print(stopwords.words('english'))
documentA = "the man keeps walking" #'the man went out for a walk'
documentB = "the children study" #'the children sat around the fire'
documentC = "the woman teach the lesson"
documentD = "the woman teach the children" #"the idiot speak shit"
bagOfWordsA = documentA.split(' ')
bagOfWordsB = documentB.split(' ')
bagOfWordsC = documentC.split(' ')
bagOfWordsD = documentD.split(' ')
uniqueWords = set(bagOfWordsA).union(set(bagOfWordsB)).union(set(bagOfWordsC)).union(set(bagOfWordsD))
numOfWordsA = dict.fromkeys(uniqueWords, 0)
for word in bagOfWordsA:
numOfWordsA[word] += 1
numOfWordsB = dict.fromkeys(uniqueWords, 0)
for word in bagOfWordsB:
numOfWordsB[word] += 1
numOfWordsC = dict.fromkeys(uniqueWords, 0)
for word in bagOfWordsC:
numOfWordsC[word] += 1
numOfWordsD = dict.fromkeys(uniqueWords, 0)
for word in bagOfWordsD:
numOfWordsD[word] += 1
print("Num Palavras A:",numOfWordsA)
print("Num Palavras B:",numOfWordsB)
print("Num Palavras C:",numOfWordsC)
print("Num Palavras D:",numOfWordsD)
tfA = computeTF(numOfWordsA, bagOfWordsA)
print("tfA", tfA)
tfB = computeTF(numOfWordsB, bagOfWordsB)
print("tfB", tfB)
tfC = computeTF(numOfWordsC, bagOfWordsC)
print("tfC", tfC)
tfD = computeTF(numOfWordsD, bagOfWordsD)
print("tfD", tfD)
idfs = computeIDF([numOfWordsA, numOfWordsB, numOfWordsC, numOfWordsD])
print("IDF:", idfs)
tfidfA = computeTFIDF(tfA, idfs)
tfidfB = computeTFIDF(tfB, idfs)
tfidfC = computeTFIDF(tfC, idfs)
tfidfD = computeTFIDF(tfD, idfs)
print("TFIDF A: ", tfidfA)
print("TFIDF B: ", tfidfB)
print("TFIDF C: ", tfidfC)
print("TFIDF D: ", tfidfD)
df = pd.DataFrame([tfidfA, tfidfB, tfidfC, tfidfD])
print("df")
print(df)
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform([documentA, documentB, documentC, documentD])
feature_names = vectorizer.get_feature_names()
print("feature names:", feature_names)
dense = vectors.todense()
denselist = dense.tolist()
dfsk = pd.DataFrame(denselist, columns=feature_names)
for doc in denselist:
print(doc)
n = 0.0
y = 0
for i in range(len(doc)):
n = n + doc[i] * doc[i]
n = math.sqrt(n)
print("l2 norm = ", n)
# for c in doc:
# print(c, ",")
# print("\n")
print("df sklearn:")
print(dfsk) | [
"="
] | = |
3c69f7778504bf4fa0dba20be9624a0e8fa8483a | 0ffdf8ab6c5a875bfd8c3e06456131a0f3abad62 | /contrib/devtools/update-translations.py | 9266596bcb8562a640361a6811c64b7a51f9e5df | [
"MIT"
] | permissive | FYNCOIN-Foundation/FYNCOIN | d06be9163090155a540b369512b9f6ec7f2410f3 | 835ce3be2fb20632fb9443293d86caad620a1f7e | refs/heads/master | 2020-03-26T11:55:35.065862 | 2018-09-02T10:26:22 | 2018-09-02T10:26:22 | 144,866,105 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 8,148 | py | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'fyn_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| [
"hemant.singh.leu@gmail.com"
] | hemant.singh.leu@gmail.com |
cbd89cb2e8587a7d490be95bfed1e308b413fcbe | aa8fe9e165df16bd17aa5720b8043c533adde9bb | /init/00_lockfile.py | 4843f9c2f344b55760f6b74481b55a42eabab1c1 | [] | no_license | teamdiamond/qtlab | 31d2ccaee2ada84a027f2160553f54757e6f6cdf | 67d5bbd58c5f4d4ac3914774b56071d51f121010 | refs/heads/master | 2022-12-14T12:07:35.223055 | 2019-06-25T06:53:57 | 2019-06-25T06:53:57 | 15,255,712 | 0 | 4 | null | 2022-12-07T23:37:45 | 2013-12-17T13:54:53 | Python | UTF-8 | Python | false | false | 322 | py | from lib import config, lockfile
import os
_lockname = os.path.join(config.get_execdir(), 'qtlab.lock')
lockfile.set_filename(_lockname)
del _lockname
msg = "QTlab already running, start with '-f' to force start.\n"
msg += "Press s<enter> to start anyway or just <enter> to quit."
lockfile.check_lockfile(msg)
| [
"wolfgangpfff@gmail.com"
] | wolfgangpfff@gmail.com |
4d5b0222e92f9df4bd437156b9910a3f4474331e | 13cf11440998376d3b52a49f1e4fb8936c360ac4 | /tests/saliency_tests/visualizer_tests/test_visualizer_utils.py | db7ce7e9b4dd0f1fb233f8a3f1d4c2e5d8a52f95 | [
"MIT"
] | permissive | k-ishiguro/chainer-chemistry | 87e3db724de0e99042d9585cd4bd5fff38169339 | aec33496def16e76bdfbefa508ba01ab9f79a592 | refs/heads/master | 2021-07-06T22:58:20.127907 | 2019-02-04T02:51:34 | 2019-02-04T02:51:34 | 169,345,375 | 1 | 1 | MIT | 2020-07-30T06:04:13 | 2019-02-06T02:27:39 | Python | UTF-8 | Python | false | false | 1,802 | py | import numpy
import pytest
from chainer_chemistry.saliency.visualizer.visualizer_utils import abs_max_scaler # NOQA
from chainer_chemistry.saliency.visualizer.visualizer_utils import min_max_scaler # NOQA
from chainer_chemistry.saliency.visualizer.visualizer_utils import normalize_scaler # NOQA
from chainer_chemistry.saliency.visualizer.visualizer_utils import red_blue_cmap # NOQA
def test_abs_max_scaler():
saliency = numpy.array([1., 2., 3.])
result = abs_max_scaler(saliency)
expected = numpy.array([1. / 3, 2. / 3., 1.])
assert numpy.allclose(result, expected)
# test with 0 arrays
saliency = numpy.array([0, 0, 0])
result = abs_max_scaler(saliency)
expected = numpy.array([0, 0, 0])
assert numpy.allclose(result, expected)
def test_min_max_scaler():
saliency = numpy.array([1., -3., 3.])
result = min_max_scaler(saliency)
expected = numpy.array([4. / 6, 0., 1.])
assert numpy.allclose(result, expected)
# test with 0 arrays
saliency = numpy.array([0, 0, 0])
result = min_max_scaler(saliency)
expected = numpy.array([0, 0, 0])
assert numpy.allclose(result, expected)
def test_normalize_scaler():
saliency = numpy.array([1., 2., 3.])
result = normalize_scaler(saliency)
expected = numpy.array([1./6., 2./6, 3./6.])
assert numpy.allclose(result, expected)
# test with 0 arrays
saliency = numpy.array([0, 0, 0])
result = normalize_scaler(saliency)
expected = numpy.array([0, 0, 0])
assert numpy.allclose(result, expected)
def test_red_blue_cmap():
assert red_blue_cmap(1) == (1., 0., 0.) # Red
assert red_blue_cmap(0) == (1., 1., 1.) # White
assert red_blue_cmap(-1) == (0., 0., 1.) # Blue
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
| [
"acc1ssnn9terias@gmail.com"
] | acc1ssnn9terias@gmail.com |
a3a4b899eeb29945f03056946232a708ce516fbb | 36cfda71d39c79ba671b8f86d473bc8b802ae348 | /C++问答Code/delete.py | 0fcdd4b60511ec7c85134158caccace5d5275a59 | [] | no_license | lichangke/QuestionAndAnswer | b05e9b0f2ea12c61a7a27f59c81bcf7ebd903c83 | dd89c2d786050c6b69c4ee93a9eef8d4a22fbfa6 | refs/heads/master | 2023-05-02T21:44:53.225373 | 2021-04-27T08:40:37 | 2021-04-27T08:40:37 | 357,549,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | import os
import shutil
import os.path
import stat
DeleteDirList = [".idea","cmake-build-debug"]
def funcDeleteDir(path):
for parent, dirnames, filenames in os.walk(path): # 遍历文件夹下面的所有文件夹
for dirname in dirnames:
strfilepath = parent + os.sep + dirname
if os.path.isdir(strfilepath):
if dirname in DeleteDirList:
shutil.rmtree(strfilepath) # 删除此文件夹
else:
funcDeleteDir(strfilepath)
if __name__ == '__main__':
path = str(".")
funcDeleteDir(path) | [
"986740304@qq.com"
] | 986740304@qq.com |
d7eb16377f4e485d6f9ced4d428c49500c312ff0 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.0_rd=1_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=81/params.py | 9c610d772bb2c0361c637b278eaa66262e55aa96 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.107095',
'max_util': '3.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 81,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
07e4a5831a138c258c31661783f57549e8e2aa79 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/models/win_rm_configuration_py3.py | 7ee36d3a256e666ff72aa6de041509fd0721681d | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,036 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WinRMConfiguration(Model):
"""Describes Windows Remote Management configuration of the VM.
:param listeners: The list of Windows Remote Management listeners
:type listeners:
list[~azure.mgmt.compute.v2018_10_01.models.WinRMListener]
"""
_attribute_map = {
'listeners': {'key': 'listeners', 'type': '[WinRMListener]'},
}
def __init__(self, *, listeners=None, **kwargs) -> None:
super(WinRMConfiguration, self).__init__(**kwargs)
self.listeners = listeners
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
2f6a82d63491db26eebcd1c31b0fac8b1e2040a2 | d5e787f85b37f966ccdf0cd5f7a7061eae1c70a8 | /src/core/celery.py | 0def2c6a5fba8808a1f787d4065df7b5ec14710d | [
"MIT"
] | permissive | iBuilder-Tech/phase | 5ee6cd1fb410de0d067e7b5b8adfea3c4411b62c | cc8f9b9f3e2c31b139d5cce433667c8d5ba2c6f2 | refs/heads/main | 2023-07-10T18:24:20.291136 | 2021-08-31T13:22:17 | 2021-08-31T13:22:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings.production')
from django.conf import settings # noqa
app = Celery('phase')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def celery_debug_task(self):
print('This is a debug task to verify that Celery works')
| [
"thibault@miximum.fr"
] | thibault@miximum.fr |
b8dfd6bf8be08eccce64129d19717fa8d4ac4eed | 997c82f5d9684945fb2f5d5481dc4d251a93755f | /famapy/metamodels/bdd_metamodel_withObjects/models/bdd_model.py | af0274543db70bddfc413d11b3eaabfab872f8f1 | [] | no_license | jmhorcas/famapy-aafms | a6e45b5fff2c820037daf95151df5bc6895b1611 | bcc80f7061bed4d6bfd536f9d53cf195bffa01e6 | refs/heads/main | 2023-08-24T05:51:47.337325 | 2021-10-15T10:18:20 | 2021-10-15T10:18:20 | 389,559,981 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,424 | py | from collections import defaultdict
from dd.autoref import BDD, Function
from famapy.metamodels.cnf_metamodel.models.cnf_model import CNFNotation, CNFLogicConnective
class BDDModel:
"""A Binary Decision Diagram (BDD) representation of the feature model given as a CNF formula.
It relies on the dd module: https://pypi.org/project/dd/
"""
CNF_NOTATION = CNFNotation.JAVA_SHORT
NOT = CNF_NOTATION.value[CNFLogicConnective.NOT]
AND = CNF_NOTATION.value[CNFLogicConnective.AND]
OR = CNF_NOTATION.value[CNFLogicConnective.OR]
def __init__(self):
self.bdd = BDD() # BDD manager
self.cnf_formula = None
self.root = None
self.variables = []
def from_cnf(self, cnf_formula: str, variables: list[str]):
self.cnf_formula = cnf_formula
self.variables = variables
# Declare variables
for v in self.variables:
self.bdd.declare(v)
# Build the BDD
self.root = self.bdd.add_expr(self.cnf_formula)
# Reorder variables
# variable_order = self.bdd.vars
# var = self.bdd.var_at_level(0)
# level = self.root.level
# variable_order[self.root.var] = 0
# variable_order[var] = level
# self.bdd.reorder(variable_order)
# self.root = self.bdd.var(self.bdd.var_at_level(0))
def index(self, n: Function) -> int:
"""Position of the variable that labels the node `n` in the ordering (i.e., the level).
Example: node `n4` is labeled `B`, and `B` is in the second position of the ordering `[A,B,C]`.
thus var(n4) = 2.
"""
if n.node == -1 or n.node == 1: # index(n0) = index(n1) = s + 1, being s the number of variables.
return len(self.bdd.vars) + 1
else:
return n.level + 1
def get_high_node(self, node: Function) -> Function:
return ~node.high if node.negated and not self.is_terminal_node(node.high) else node.high
def get_low_node(self, node: Function) -> Function:
return ~node.low if node.negated and not self.is_terminal_node(node.low) else node.low
def is_terminal_node(self, node: Function) -> bool:
return node.var is None
# def traverse(self):
# root = self.root
# self.mark = defaultdict(bool)
# self._traverse(root)
# def _traverse(self, n):
# print('-----')
# print(f'n: {n} (var={n.var}), (level={n.level}), (id={n.node}), (negated={n.negated})')
# self.mark[n.node] = not self.mark[n.node]
# if not self.is_terminal_node(n):
# #level, low, high = self.bdd.succ(n)
# level = n.level
# low = n.low #self.get_low_node(n)
# high = n.high #self.get_high_node(n)
# print(f'|--level: {level}')
# print(f'|--low: {low} (var={low.var}), (level={low.level}), (id={low.node}), (negated={low.negated})')
# print(f'|--high: {high} (var={high.var}), (level={high.level}), (id={high.node}), (negated={high.negated})')
# if self.is_terminal_node(low) and low.negated:
# print(f'negated: {~low}')
# print('-----')
# if self.mark[n.node] != self.mark[low.node]:
# self._traverse(low)
# if self.mark[n.node] != self.mark[high.node]:
# self._traverse(high)
| [
"jhorcas@us.es"
] | jhorcas@us.es |
380d2de8a0216873416cca9c2d5e636526a7dd16 | e73003ad3417daf4eb4b4e9909b42225833aedea | /0x07-python-test_driven_development/0-add_integer.py | 0a8fd23207973a5fd688889a15bd00d764bc346b | [] | no_license | Alphamj/holbertonschool-higher_level_programming | 34f10da3407969928a333af8a6ef52d2817d838a | 379ed7fc70f8ba7e4c41e07b4ae804c21d540725 | refs/heads/master | 2023-03-15T13:11:04.545118 | 2020-09-25T01:38:33 | 2020-09-25T01:38:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | #!/usr/bin/python3
"""
My add module
add_integer: function that add two numbers
Return: the add of two intigers
"""
def add_integer(a, b=98):
""" Return the add of intigers
a and b are intigers
"""
if not isinstance(a, (int, float)) or isinstance(a, bool):
raise TypeError("a must be an integer")
elif not isinstance(b, (int, float)) or isinstance(b, bool):
raise TypeError("b must be an integer")
else:
a = int(round(a))
b = int(round(b))
return (a + b)
| [
"1482@holbertonschool.com"
] | 1482@holbertonschool.com |
da35d8a5edab37d07c1c2a4664be8b6030f98f66 | 35cf6fc79b8d6c335add8e55e0f4dca6f2816d1d | /Python_Study/Selenium自动化/Day02/Email/send_email.py | f4b9ce3225290a4c822ebb3ee2db697acd756345 | [] | no_license | KongChan1988/51CTO-Treasure | 08b4ca412ad8a09d67c1ea79c7149f8573309ca4 | edb2e4bd11d39ac24cd240f3e815a88361867621 | refs/heads/master | 2021-07-04T15:57:56.164446 | 2019-07-24T15:28:36 | 2019-07-24T15:28:36 | 97,453,749 | 5 | 8 | null | 2019-10-30T22:05:12 | 2017-07-17T08:34:59 | Python | UTF-8 | Python | false | false | 2,369 | py | # -*- coding:utf-8 -*-
# Author:D.Gray
import unittest
import smtplib
import time
import os
from email.mime.text import MIMEText
from email.header import Header
from HTMLTestRunner import HTMLTestRunner
print("开始发送邮件".center(50,"-"))
def sendReport(file_new):
with open(file_new,'rb') as f:
email_body = f.read()
msg = MIMEText
# print("发送邮件".center(50,"-"))
# def sendReport(file_new):
# '''
#
# :param file_new:
# :return:
# '''
# with open(file_new,"rb") as f:
# new_body = f.read()
# msg = MIMEText(new_body,"html","utf-8") #构造MIMEText对象,作为邮件内容的形式进行附加
# msg["Subject"] = Header("自动化测试报告","utf-8")
# msg["From"] = "wangwei@linkmores.com" #发送地址
# msg["to"] = "wangwei@linkmores.com" #收件地址
#
# smtp = smtplib.SMTP("smtp.mxhichina.com") #邮件服务器地址
# smtp.login("wangwei@linkmores.com","sdchendijayD1988") #邮箱账号和密码
# smtp.sendmail(msg["From"],msg["to"].split(";"),msg.as_string()) #多个收件人用 ;号分割
# smtp.quit()
# print("The HTML Send Out".center(50,"-") )
#
# def newReport(testReport):
# lists = os.listdir(testReport) #操作本地目录 列出本地目录文件
# lists2 = sorted(lists) #获得排序后的测试报告列表
# file_name = os.path.join(testReport,lists2[-1]) #获得最新一条HTML报告
# # print(file_name)
# return file_name
#
# print("开始运行".center(50,"-"))
# if __name__ == '__main__':
# test_dir = "E:\\python_work\\51CTO_Python\Selenium自动化\Day02\Email" #测试用例路径
# test_report = "E:\\python_work\\51CTO_Python\Selenium自动化\Day02\Email\TestReport" #测试报告路径
#
# discover = unittest.defaultTestLoader.discover(test_dir,"baidu.py") #加载测试函数
# now = time.strftime("%Y-%m-%d %H%M%S") #当前时间
# file_path = os.path.join(test_report,"%sresult.html"%now) #拼接出测试报告名称
# with open(file_path,"wb") as fe:
# runner = HTMLTestRunner(stream=fe,title="测试结果",description="测试执行结果")
# runner.run(discover)
# new_report = newReport(test_report) #获取最新测试报告
# print(new_report)
# sendReport(new_report) #发送最新测试报告
| [
"wangwei_198811@163.com"
] | wangwei_198811@163.com |
c47994dc7e144c848612afbf957dd5ef9965dc65 | 079c07c5d97eb60d36269e27309e84b25ea0aaeb | /guidehero-backend/tests/api/ask/test_set_askers.py | fa36cc6582edab111b92747b6fc3a4ca8ef0038f | [] | no_license | itdream-dev/python | 3aa44329673f05e2a86e1cba56cb88101c777233 | eda81b802b99f45933bdf0d22b508837cfa538f0 | refs/heads/master | 2023-03-05T12:27:42.776870 | 2020-05-11T15:54:45 | 2020-05-11T15:54:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | import unittest
import json
from ..base import ApiBaseTestCase
from lib.models.card import Card
from lib.models.user import User
from lib.models.user_role_card import UserRoleCard
from lib.models.card_role import CardRole
@unittest.skip('set_askers endpoint is obsolete')
class SetAskersTestCase(ApiBaseTestCase):
def setUp(self):
super(SetAskersTestCase, self).setUp()
# input data
self.owner = User(username='owner')
self.db.session.add(self.owner)
self.deck = Card(type=Card.DECK, name='test_deck', creator=self.owner, is_ask_mode_enabled=True)
self.db.session.add(self.deck)
self.asker = User(username='asker')
self.db.session.add(self.asker)
self.db.session.commit()
self.asker_ids = None
def call_target(self):
asker_ids = self.asker_ids
if asker_ids is None:
asker_ids = [self.asker.id]
data = {
'deck_id': self.deck.id,
'user_ids': asker_ids,
}
response = self.client.post('/api/v1/deck/set_askers', data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status, '200 OK')
return response
def assert_askers(self, asker_ids):
urc_s = list(UserRoleCard.query.filter(UserRoleCard.card_id == self.deck.id, UserRoleCard.role_id == CardRole.JOINED).all())
self.assertEqual(len(urc_s), len(asker_ids))
urc_s = sorted(urc_s, key=lambda it: it.user_id)
asker_ids = sorted(asker_ids)
for idx in range(0, len(asker_ids)):
self.assertEqual(urc_s[idx].user_id, asker_ids[idx])
def test_success(self):
self.call_target()
self.assert_askers([self.asker.id])
| [
"skyclean906@gmail.com"
] | skyclean906@gmail.com |
916623c6560a309b4929336812aad08d5b56189e | d253cb1e5b52a67a2e20030e6f0f9ddcdbcbd3aa | /config/testing.py | f3ddff8c058d926e91e16aadf7a6bd2603e50f2f | [] | no_license | TheFifthMan/flask-basic-template | 84e3c8b0acdbd2ede121f267e92ca0ee298e1bce | 313ce8c6827b1d33cfb0039c57d6d1aa4be8555b | refs/heads/master | 2020-04-07T19:03:19.565438 | 2019-01-29T05:44:03 | 2019-01-29T05:44:03 | 158,634,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | from .general import Config
import os
class TestingConfig(Config):
Testing = True
SQLALCHEMY_DATABASE_URI = os.getenv("TEST_SQLALCHEMY_DATABASE_URI") or "mysql+pymysql://root:qwe123@127.0.0.1/flasky"
| [
"John.Wen@ehealth.com"
] | John.Wen@ehealth.com |
3aab521adda4453f55ff28cfe6d24b1ec4c84c96 | 757c2daa5e2ea70005783d1e5ac78aec47712e9c | /python/elb/61-create-elb.py | bf88664c92a82190829253256ebdc602123d79a9 | [] | no_license | obulpathi/aws | ab9e6f6a62e8d76f196b063da87c8d8c0d09d25e | 35c2181377c4c536b7e1d6fb9386705a90f85763 | refs/heads/master | 2021-01-17T04:43:10.358333 | 2017-04-10T21:02:23 | 2017-04-10T21:02:23 | 21,324,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | import boto.ec2.elb
from boto.ec2.elb import HealthCheck
elb_conn = boto.ec2.elb.connect_to_region('us-east-1')
# ELB requires a few pieces to be setup
hc = HealthCheck(
interval=20,
healthy_threshold=3,
unhealthy_threshold=5,
target='TCP:22'
# target='HTTP:8080/health'
)
zones = ['us-east-1a', 'us-east-1b', 'us-east-1c', 'us-east-1d']
ports = [(80, 80, 'http')]
#ports = [(80, 8080, 'http'), (443, 8443, 'tcp')]
# Now create a new load balancer
lb = elb_conn.create_load_balancer('pywebdev-lb', zones, ports)
print 'New ELB: ', lb
print 'New ELB public DNS: ', lb.dns_name
# Add the health check configuration to the ELB.
lb.configure_health_check(hc)
| [
"obulpathi@gmail.com"
] | obulpathi@gmail.com |
988ece74cdaa2dcd3a36b809507d1bc66cfb90f4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03072/s343667969.py | 87c47d33b4536d07dce370dc61baa549b2bf3c3d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | n = int(input())
h = list(map(int, input().split()))
res = 1
h_max = h[0]
for i in range(1, len(h)):
if h[i] >= h_max:
res += 1
h_max = h[i]
print(res)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c2e31a747260c7ac46d579c9c49836e9b9311df8 | 63ce52a8dcbbb4f64b1f3265f54904a928128af6 | /ben_projects/RoboQuasar1.0/camera/analyzers.py | d6eea271bf5e1ef59b86465592640a1e97d35357 | [] | no_license | Woz4tetra/Self-Driving-Buggy | 0ab629242e07ad8aa706573bbab0809f895d5533 | ceba78445e1b0dcc6922cd67e8be23d78eb1667a | refs/heads/master | 2021-01-10T18:51:35.253412 | 2016-01-15T02:56:50 | 2016-01-15T02:56:50 | 40,282,642 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,281 | py | # applies houghlines or cascade classfier filters on an input camera frame
import numpy as np
import cv2
class LineFollower:
def __init__(self, expected, y_bottom, width, height):
self.centerRho, self.centerTheta = expected
self.width, self.height = width, height
self.yBottom = y_bottom
def isEqual(self, currentTheta, existedTheta, tolerance):
'''
if rho and theta are close enough,
set these lines as equivalent
To minimize # of lines on screen
'''
if abs(currentTheta - existedTheta) <= tolerance:
return True
return False
def merge(self, line_set):
occurance = len(line_set)
# if occurs more than once,
# merge and return a single median (rho, theta)
if occurance > 1:
medianTheta = np.median(line_set[0][0])
medianRho = np.median(line_set[0][1])
line_set = [occurance, medianRho, medianTheta]
else:
line_set = [occurance, line_set[0][0], line_set[0][1]]
return line_set
def findAverageLines(self, lines):
'''
findAvgLines is not supposed to draw;
use update to blur and find lines,
then use findAvglines func to return avgline
'''
rightRho, rightTheta, leftRho, leftTheta = [], [], [], []
# Divide lines into left and right groups, accoridng to sign of gradient
for currentLine in lines:
# notes on indexing: currentline has format[[x1, y1]]
(rho, theta) = (currentLine[0], currentLine[1])
if theta > 0:
# lines with negative gradient; (y increases downwards in frame)
leftTheta.append(theta)
leftRho.append(rho)
elif theta <= 0:
rightTheta.append(theta)
rightRho.append(rho)
if len(leftRho) != 0:
avgLeftRho = np.median([leftRho])
avgLeftTheta = np.median([leftTheta])
else:
(avgLeftRho, avgLeftTheta) = (0, 0)
if len(rightRho) != 0:
avgRightRho = np.median([rightRho])
avgRightTheta = np.median([rightTheta])
else: (avgRightRho, avgRightTheta) = (0, 0)
self.avgCenterRho = (avgLeftRho + avgRightRho) / 2.0
self.avgCenterTheta = (avgLeftTheta + avgRightTheta) / 2.0
return [(avgLeftRho, avgLeftTheta), (avgRightRho, avgRightTheta)]
def findLineCoord(self, rho, theta):
# turn avgLines into avgLinesCoord =[(x1, y1), (x2, y2)]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * -b)
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * -b)
y2 = int(y0 - 1000 * a)
return (x1, y1, x2, y2)
def findDistLine(self,rho,theta):
(x1,y1,x2,y2) = self.findLineCoord(rho,theta)
Xavg = (x1+x2)/2
Yavg = (y1+y2)/2
return (self.width-Xavg,self.height-Yavg)
def key(self, item):
return item[0]
def difference(self, expected, actual, y_bottom):
return 0, 0 # distance difference, theta difference
''' need to filter out unneeded lines before taking avg'''
def update(self, frame, draw_avg=True, draw_all=True,
maxNumLines = 10, tolerance = 0.04):
frame = frame[90:360,::]
frame_lines = cv2.medianBlur(frame, 5)
frame_lines = cv2.Canny(frame_lines, 1, 100)
frame_lines = cv2.medianBlur(frame_lines, 3)
#smooth the edge detection image for easier use(above)
lines = cv2.HoughLines(frame_lines, rho=1, theta=np.pi / 180,
threshold=100,
min_theta=-60 * np.pi / 180,
max_theta= 60 * np.pi / 180)
linesDrawn = []
# updating lines, after merge in similar ones
# condense lines together (remove "duplicates")
if lines != None:
lines = lines[:,0] # removing one layer of brackets
'''
tests on merging and sorting starts here,
1) lines are sorted accoriding to their rho value (len)
(could also sort according to theta)
2) while loops compare neighboring ones to partition them,
3) merge func also append multiplicity/ occurance of that partition
4) all lines are sorted based on # of occurance
'''
lines.sort(axis = 0) #sort on rho
print("LINES HERE----", lines)
i = -1 #used in loop
while i < (len(lines)/2 - 1):
# len(lines) doublecounts (rho, theta)
i += 1
temp = []
temp.append(np.array(lines[i]))
while self.isEqual(lines[i][1], lines[i+1][1], tolerance):
# ugly syntax, but it's comparing neighboring theta vals
temp.append(lines[i+1])
i += 1
temp = self.merge(temp)
linesDrawn.append(temp)
linesDrawn = np.array(linesDrawn)
#print (len(linesDrawn), "number of lines after merge") #for information purposes
#Sort the lines by distance from center by the average lines
temp = []
if lines != None:
#makes a list with dist from center included
for i in range(len(linesDrawn)):
(rho, theta) = (linesDrawn[i][1], linesDrawn[i][2])
dist = self.findDist(rho,theta)
temp.append(dist,rho,theta)
#now want to sort this list based on dist
sorted_list = sorted(temp, key=self.key)
sorted_list = np.array(sorted_list)
#now draw the lines
if draw_all == True:
idx = 0
while idx < (maxNumLines/2 -1) and idx < len(sorted_list):
(dist, rho, theta) = (linesDrawn[idx][1], linesDrawn[idx][2])
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * -b)
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * -b)
y2 = int(y0 - 1000 * a)
cv2.line(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
idx +=1
if lines is not None:
averaged_line = self.findAverageLines(lines[:maxNumLines])
(rho1, theta1) = (averaged_line)[0]
(rho2, theta2) = (averaged_line)[1]
(x1, y1, x2, y2) = self.findLineCoord(rho1, theta1)
(x3, y3, x4, y4) = self.findLineCoord(rho2, theta2)
# get coordinates of lines before drawing
if draw_avg:
cv2.line(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.line(frame, (x3, y3), (x4, y4), (0, 255, 0), 2)
else:
averaged_line = None, None
return frame, self.difference((self.centerRho, self.centerTheta
), averaged_line, self.yBottom)
| [
"woz4tetra@gmail.com"
] | woz4tetra@gmail.com |
841b4a196027f6232cc3d170f481da13576dc249 | 2e74cff6c9639f3903ccde662e79359d0724285e | /2019_late/20190826/aa.py | f51aeefafcffc501a66e2b90d4c18c5bb3a5c82a | [] | no_license | dodonmountain/algorithm | e29988071f651e51ba65e3926302f94a3d4074a5 | ce33e0d74220839aed4b17a47fa0069458a4324e | refs/heads/master | 2022-11-05T05:14:01.527015 | 2022-11-01T04:29:37 | 2022-11-01T04:29:37 | 200,008,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | import sys
sys.stdin = open('input4881.txt', 'r')
def backtracking(k, n, sum_temp):
global min_sum
if k == n:
# print('sum_temp_final: {}'.format(sum_temp))
if min_sum > sum_temp:
min_sum = sum_temp
return
if sum_temp > min_sum: # 시간 초과 나서 가지치기 해줌!
return
for i in range(N):
if used_col[i]:
continue
sum_temp += lst[k][i]
# print('lst[{}][{}]: {}, sum_temp: {}'.format(k, i, lst[k][i], sum_temp))
used_col[i] = True
backtracking(k + 1, n, sum_temp)
sum_temp -= lst[k][i]
used_col[i] = False
for t in range(1, int(input()) + 1):
N = int(input())
lst = []
for j in range(N):
lst.append(list(map(int, input().split())))
used_col = [False] * N
min_sum = 10*N
backtracking(0, N, 0)
print('#{} {}'.format(t, min_sum)) | [
"lkh151515@gmail.com"
] | lkh151515@gmail.com |
31178d0de5aff1e94f4abb2ac1bf30250a058b35 | f2889a13368b59d8b82f7def1a31a6277b6518b7 | /203.py | 4cc915c1697a80508539b0d9a6f8f2a77f90cc65 | [] | no_license | htl1126/leetcode | dacde03de5c9c967e527c4c3b29a4547154e11b3 | c33559dc5e0bf6879bb3462ab65a9446a66d19f6 | refs/heads/master | 2023-09-01T14:57:57.302544 | 2023-08-25T15:50:56 | 2023-08-25T15:50:56 | 29,514,867 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | # ref: https://leetcode.com/discuss/33150/python-solution
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def removeElements(self, head, val):
"""
:type head: ListNode
:type val: int
:rtype: ListNode
"""
dummy = ListNode(-1)
dummy.next = head
next = dummy
while next is not None and next.next is not None:
if next.next.val == val:
next.next = next.next.next
else:
next = next.next
return dummy.next
if __name__ == '__main__':
sol = Solution()
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(6)
head.next.next.next = ListNode(3)
head.next.next.next.next = ListNode(4)
head.next.next.next.next.next = ListNode(5)
head.next.next.next.next.next.next = ListNode(6)
res = sol.removeElements(head, 6)
while res:
print res.val
res = res.next
| [
"b93902098@ntu.edu.tw"
] | b93902098@ntu.edu.tw |
0b0c72893b0cb54445a840ddd464ccffc5ea4a81 | b73e14ea63e0d728a23b780bd5b0eb4d8fac4362 | /hassio-google-drive-backup/backup/drive/folderfinder.py | 86319775f85ffbcf72329dd692eaedf91c8715ae | [
"MIT"
] | permissive | agusalex/hassio-google-drive-backup | 7c722c26e2c45382aeabef706842674ba6c176d3 | c97fd6e7e4f95d48b85d3cfe67a01bdc2103da9c | refs/heads/master | 2023-04-03T06:42:49.797408 | 2021-04-05T23:30:33 | 2021-04-05T23:30:33 | 354,999,639 | 0 | 1 | MIT | 2021-04-05T23:30:33 | 2021-04-05T23:26:33 | null | UTF-8 | Python | false | false | 7,601 | py | import os
import os.path
from datetime import timedelta
from typing import Any, Dict
from aiohttp.client_exceptions import ClientResponseError
from injector import inject, singleton
from ..config import Config, Setting
from ..exceptions import (BackupFolderInaccessible, BackupFolderMissingError,
GoogleDrivePermissionDenied, LogInToGoogleDriveError)
from ..time import Time
from .driverequests import DriveRequests
from ..logger import getLogger
logger = getLogger(__name__)
FOLDER_MIME_TYPE = 'application/vnd.google-apps.folder'
FOLDER_NAME = 'Home Assistant Snapshots'
FOLDER_CACHE_SECONDS = 30
@singleton
class FolderFinder():
@inject
def __init__(self, config: Config, time: Time, drive_requests: DriveRequests):
self.config = config
self.drivebackend: DriveRequests = drive_requests
self.time = time
# The cached folder id
self._folderId = None
# When the fodler id was last cached
self._folder_queryied_last = None
# These get set when an existing folder is found and should cause the UI to
# prompt for what to do about it.
self._existing_folder = None
self._use_existing = None
def resolveExisting(self, val):
if self._existing_folder:
self._use_existing = val
else:
self._use_existing = None
async def get(self):
if self._existing_folder and self._use_existing is not None:
if self._use_existing:
await self.save(self._existing_folder)
else:
await self.create()
self._use_existing = None
if not self._folder_queryied_last or self._folder_queryied_last + timedelta(seconds=FOLDER_CACHE_SECONDS) < self.time.now():
try:
self._folderId = await self._readFolderId()
except (BackupFolderMissingError, BackupFolderInaccessible):
if not self.config.get(Setting.SPECIFY_SNAPSHOT_FOLDER):
# Search for a folder, they may have created one before
self._existing_folder = await self._search()
if self._existing_folder:
self._folderId = self._existing_folder.get('id')
else:
# Create folder, since no other folder is available
await self.create()
else:
raise
self._folder_queryied_last = self.time.now()
return self._folderId
def getExisting(self):
return self._existing_folder
async def save(self, folder: Any) -> str:
if not isinstance(folder, str):
folder = folder.get('id')
logger.info("Saving snapshot folder: " + folder)
with open(self.config.get(Setting.FOLDER_FILE_PATH), 'w') as folder_file:
folder_file.write(folder)
self._folderId = folder
self._folder_queryied_last = self.time.now()
self._existing_folder = None
def reset(self):
if os.path.exists(self.config.get(Setting.FOLDER_FILE_PATH)):
os.remove(self.config.get(Setting.FOLDER_FILE_PATH))
self._folderId = None
self._folder_queryied_last = None
self._existing_folder = None
def getCachedFolder(self):
return self._folderId
def deCache(self):
self._folderId = None
self._folder_queryied_last = None
async def _readFolderId(self) -> str:
# First, check if we cached the drive folder
if not os.path.exists(self.config.get(Setting.FOLDER_FILE_PATH)):
raise BackupFolderMissingError()
if os.path.exists(self.config.get(Setting.FOLDER_FILE_PATH)):
with open(self.config.get(Setting.FOLDER_FILE_PATH), "r") as folder_file:
folder_id: str = folder_file.readline()
if await self._verify(folder_id):
return folder_id
else:
raise BackupFolderInaccessible(folder_id)
async def _search(self) -> str:
folders = []
try:
async for child in self.drivebackend.query("mimeType='" + FOLDER_MIME_TYPE + "'"):
if self._isValidFolder(child):
folders.append(child)
except ClientResponseError as e:
# 404 means the folder doesn't exist (maybe it got moved?)
if e.status == 404:
"Make Error"
raise LogInToGoogleDriveError()
else:
raise e
if len(folders) == 0:
return None
folders.sort(key=lambda c: Time.parse(c.get("modifiedTime")))
# Found a folder, which means we're probably using the add-on from a
# previous (or duplicate) installation. Record and return the id but don't
# persist it until the user chooses to do so.
folder = folders[len(folders) - 1]
logger.info("Found " + folder.get('name'))
return folder
async def _verify(self, id):
if self.drivebackend.isCustomCreds():
# If the user is using custom creds and specifying the snapshot folder, then chances are the
# app doesn't have permission to access the parent folder directly. Ironically, we can still
# query for children and add/remove snapshots. Not a huge deal, just
# means we can't verify the folder still exists, isn't trashed, etc. Just let it be valid
# and handle potential errors elsewhere.
return True
# Query drive for the folder to make sure it still exists and we have the right permission on it.
try:
folder = await self.drivebackend.get(id)
if not self._isValidFolder(folder):
logger.info("Provided snapshot folder {0} is invalid".format(id))
return False
return True
except ClientResponseError as e:
if e.status == 404:
# 404 means the folder doesn't exist (maybe it got moved?) but can also mean that we
# just don't have permission to see the folder. Often we can still upload into it, so just
# let it pass without further verification and let other error handling (on upload) identify problems.
return True
else:
raise e
except GoogleDrivePermissionDenied:
# Lost permission on the backup folder
return False
def _isValidFolder(self, folder) -> bool:
try:
caps = folder.get('capabilities')
if folder.get('trashed'):
return False
elif not caps['canAddChildren']:
return False
elif not caps['canListChildren']:
return False
elif not caps.get('canDeleteChildren', False) and not caps.get('canRemoveChildren', False):
return False
elif folder.get("mimeType") != FOLDER_MIME_TYPE:
return False
except Exception:
return False
return True
async def create(self) -> str:
logger.info('Creating folder "{}" in "My Drive"'.format(FOLDER_NAME))
file_metadata: Dict[str, str] = {
'name': FOLDER_NAME,
'mimeType': FOLDER_MIME_TYPE,
'appProperties': {
"backup_folder": "true",
},
}
folder = await self.drivebackend.createFolder(file_metadata)
await self.save(folder)
return folder.get('id')
| [
"stephen@beechens.com"
] | stephen@beechens.com |
8a52414ded940232afbcfaec121283acd02cb77d | bc233c24523f05708dd1e091dca817f9095e6bb5 | /bitmovin_api_sdk/models/limit_transform_unit_depth_recursion_mode.py | 0ba484b9233f3c3819003b1483ecb20fa575e34a | [
"MIT"
] | permissive | bitmovin/bitmovin-api-sdk-python | e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd | b0860c0b1be7747cf22ad060985504da625255eb | refs/heads/main | 2023-09-01T15:41:03.628720 | 2023-08-30T10:52:13 | 2023-08-30T10:52:13 | 175,209,828 | 13 | 14 | MIT | 2021-04-29T12:30:31 | 2019-03-12T12:47:18 | Python | UTF-8 | Python | false | false | 313 | py | # coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
class LimitTransformUnitDepthRecursionMode(Enum):
DISABLED = "DISABLED"
LEVEL_1 = "LEVEL_1"
LEVEL_2 = "LEVEL_2"
LEVEL_3 = "LEVEL_3"
LEVEL_4 = "LEVEL_4"
| [
"openapi@bitmovin.com"
] | openapi@bitmovin.com |
b11eff7cd71907dd1ef5d3d85b2f65a8f197cec1 | 27750e8d10776babf3ef459365e31f020071384b | /tensor2tensor/data_generators/desc2code_test.py | 24b7568d0b862caca630bdcb5fbadcc04dc2b4d7 | [
"Apache-2.0"
] | permissive | rmbrad/tensor2tensor | 364da5e065075b363fc539cea67ce12008cbd23a | 45a787e46b32bdb18b70f835cba0b3270267e19b | refs/heads/master | 2021-05-16T17:10:10.342682 | 2017-08-11T23:21:37 | 2017-08-11T23:21:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py | # coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for desc2code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.data_generators import desc2code
import tensorflow as tf
CODE_CPP_IN = """
#include <iostream>
void main() { // This comment will be removed
// This too.
//
/* Not this one */
\t
\t
int a \t\n = 3;//
//
}
"""
CODE_CPP_OUT = ("#include <iostream> void main() { /* Not this one */ int a = "
"3; }")
class Desc2codeTest(tf.test.TestCase):
def testCppPreprocess(self):
"""Check that the file correctly preprocess the code source."""
cpp_pb = desc2code.Desc2CodeCppProblem()
self.assertEqual( # Add space beween two lines
cpp_pb.preprocess_target("firstline//comm1\nsecondline//comm2\n"),
"firstline secondline")
# Checking for boths comments and spaces
self.assertEqual(cpp_pb.preprocess_target(CODE_CPP_IN), CODE_CPP_OUT)
self.assertEqual(
cpp_pb.preprocess_target(" not removed //abcd "),
"not removed //abcd")
if __name__ == "__main__":
tf.test.main()
| [
"rsepassi@google.com"
] | rsepassi@google.com |
557b2ddc68afe5bdfefe61daf150e73970c74871 | 900b98964288a9cb0aaf2e45706ae2b32f92657f | /examples/adspygoogle/dfp/v201208/update_orders.py | bda89f799cad962bb451e7cf7fe364f5bbc28388 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | krux/adspygoogle | df2405c2042aa9c9a83d97b8442afe68572e3e2e | 6505a71122f45fe3e675f27f2c29f67a1768069b | refs/heads/master | 2022-02-22T08:55:19.777002 | 2022-02-11T22:42:19 | 2022-02-11T22:42:19 | 7,103,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,331 | py | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates the notes of each order up to the first 500. To
determine which orders exist, run get_all_orders.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
order_service = client.GetService('OrderService', version='v201208')
# Create statement object to get all orders.
filter_statement = {'query': 'LIMIT 500'}
# Get orders by statement.
response = order_service.GetOrdersByStatement(filter_statement)[0]
orders = []
if 'results' in response:
orders = response['results']
if orders:
# Update each local order object by changing its notes.
updated_orders = []
for order in orders:
# Archived orders cannot be updated.
if not Utils.BoolTypeConvert(order['isArchived']):
order['notes'] = 'Spoke to advertiser. All is well.'
updated_orders.append(order)
# Update orders remotely.
orders = order_service.UpdateOrders(updated_orders)
# Display results.
if orders:
for order in orders:
print ('Order with id \'%s\', name \'%s\', advertiser id \'%s\', and '
'notes \'%s\' was updated.'
% (order['id'], order['name'], order['advertiserId'],
order['notes']))
else:
print 'No orders were updated.'
else:
print 'No orders found to update.'
| [
"charlie@schluting.com"
] | charlie@schluting.com |
263d4468d717c24d9dbb54540b3466b94e3b6850 | 6c46cde091086cc302fa417ad00283702221b487 | /인프런/섹션 6/6. 중복순열 구하기/AA3.py | adcf3c6bc22fe5386e6d6fed6fd8f95e690d09fe | [] | no_license | Hugo-Oh/study_DataStructure | 4da1e358ad1458f52075065d7bd54540cc8b8ad4 | da492bbe0267d73cefb71c7ada129cfc41b7dcee | refs/heads/master | 2023-06-19T14:40:14.590970 | 2021-07-21T15:07:11 | 2021-07-21T15:07:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | import sys
#sys.stdin = open("input.txt", "rt")
N, M = map(int, input().split())
arr = [0] * M
cnt = 0
def DFS(n):
global cnt
if n == M:
for i in arr:
print(i, end = " ")
print()
cnt += 1
return
else:
for i in range(1, N + 1):
arr[n] = i
DFS(n+1)
DFS(0)
print(cnt)
| [
"hyok28@yonsei.ac.kr"
] | hyok28@yonsei.ac.kr |
3f2523ace4187d1f2fba4fa4a681546956844653 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_19/models/admin_api_token_response.py | 03c5ebe3b8f1b0eba86d1b226fd09b8c7ece9747 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,841 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.19
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_19 import models
class AdminApiTokenResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[AdminApiToken]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.AdminApiToken]
):
"""
Keyword args:
items (list[AdminApiToken])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminApiTokenResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminApiTokenResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminApiTokenResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminApiTokenResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AdminApiTokenResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AdminApiTokenResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
acfdefacf1fe49a47bbfeab9d602460b1834d794 | 097952b49b373e2a391cd7b7f32ac1732379beaa | /src/z3c/tabular/testing.py | a470ec0c1108886d51cc2dc0e97e3d75f2ce107b | [
"ZPL-2.1"
] | permissive | zopefoundation/z3c.tabular | d9942205aa1af3d62768ae702c403365ec9e7e54 | e222b9cc245e044e4a38d1b64fe1aec79465ff1f | refs/heads/master | 2023-06-21T20:17:56.129955 | 2020-12-16T16:12:51 | 2020-12-16T16:12:51 | 8,727,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | py | ##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id:$
"""
__docformat__ = "reStructuredText"
from zope.browserpage import metaconfigure
from zope.app.testing import setup
import z3c.macro.tales
import z3c.table.testing
###############################################################################
#
# testing setup
#
###############################################################################
def setUp(test):
test.globs = {'root': setup.placefulSetUp(True)}
metaconfigure.registerType('macro', z3c.macro.tales.MacroExpression)
z3c.table.testing.setUpAdapters()
def tearDown(test):
setup.placefulTearDown()
| [
"marius@gedmin.as"
] | marius@gedmin.as |
3d76d571204087973ff5d24b2ece337edc64f28d | be0a3aa7b83b87c5d2c257b538545bdded39c051 | /Chatbot_KG/model/KE/HolE.py | 7e95c8a6ef8798bcb9b6f12ab9a937dec413c0a4 | [
"Apache-2.0"
] | permissive | water123li/Chatbot_CN | 480e3bc6d6c0d8b6b0823452556acef14df1c2c3 | e63808030c6cc516020075cdcd0c332120a998fc | refs/heads/master | 2022-01-25T10:34:34.726243 | 2019-06-13T10:44:44 | 2019-06-13T10:44:44 | 192,504,292 | 1 | 0 | Apache-2.0 | 2019-06-18T09:01:55 | 2019-06-18T09:01:55 | null | UTF-8 | Python | false | false | 3,681 | py | #coding:utf-8
import numpy as np
import tensorflow as tf
from .Model import Model
class HolE(Model):
def _cconv(self, a, b):
return tf.ifft(tf.fft(a) * tf.fft(b)).real
def _ccorr(self, a, b):
a = tf.cast(a, tf.complex64)
b = tf.cast(b, tf.complex64)
return tf.real(tf.ifft(tf.conj(tf.fft(a)) * tf.fft(b)))
r'''
HolE employs circular correlations to create compositional representations.
HolE can capture rich interactions but simultaneously remains efficient to compute.
'''
def _calc(self, head, tail, rel):
relation_mention = tf.nn.l2_normalize(rel, 1)
entity_mention = self._ccorr(head, tail)
return -tf.sigmoid(tf.reduce_sum(relation_mention * entity_mention, 1, keep_dims = True))
def embedding_def(self):
#Obtaining the initial configuration of the model
config = self.get_config()
#Defining required parameters of the model, including embeddings of entities and relations
self.ent_embeddings = tf.get_variable(name = "ent_embeddings", shape = [config.entTotal, config.hidden_size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.rel_embeddings = tf.get_variable(name = "rel_embeddings", shape = [config.relTotal, config.hidden_size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.parameter_lists = {"ent_embeddings":self.ent_embeddings, \
"rel_embeddings":self.rel_embeddings, }
def loss_def(self):
#Obtaining the initial configuration of the model
config = self.get_config()
#To get positive triples and negative triples for training
#The shapes of pos_h, pos_t, pos_r are (batch_size, 1)
#The shapes of neg_h, neg_t, neg_r are (batch_size, negative_ent + negative_rel)
pos_h, pos_t, pos_r = self.get_positive_instance(in_batch = True)
neg_h, neg_t, neg_r = self.get_negative_instance(in_batch = True)
#Embedding entities and relations of triples, e.g. pos_h_e, pos_t_e and pos_r_e are embeddings for positive triples
pos_h_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, pos_h), [-1, config.hidden_size])
pos_t_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, pos_t), [-1, config.hidden_size])
pos_r_e = tf.reshape(tf.nn.embedding_lookup(self.rel_embeddings, pos_r), [-1, config.hidden_size])
neg_h_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, neg_h), [-1, config.hidden_size])
neg_t_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, neg_t), [-1, config.hidden_size])
neg_r_e = tf.reshape(tf.nn.embedding_lookup(self.rel_embeddings, neg_r), [-1, config.hidden_size])
#Calculating score functions for all positive triples and negative triples
#The shape of _p_score is (batch_size, 1, 1)
#The shape of _n_score is (batch_size, negative_ent + negative_rel, 1)
_p_score = tf.reshape(self._calc(pos_h_e, pos_t_e, pos_r_e), [-1, 1])
_n_score = tf.reshape(self._calc(neg_h_e, neg_t_e, neg_r_e), [-1, config.negative_rel + config.negative_ent])
#The shape of p_score is (batch_size, 1)
#The shape of n_score is (batch_size, 1)
p_score = _p_score
n_score = tf.reduce_mean(_n_score, 1, keep_dims = True)
#Calculating loss to get what the framework will optimize
self.loss = tf.reduce_sum(tf.maximum(p_score - n_score + config.margin, 0))
def predict_def(self):
config = self.get_config()
predict_h, predict_t, predict_r = self.get_predict_instance()
predict_h_e = tf.nn.embedding_lookup(self.ent_embeddings, predict_h)
predict_t_e = tf.nn.embedding_lookup(self.ent_embeddings, predict_t)
predict_r_e = tf.nn.embedding_lookup(self.rel_embeddings, predict_r)
self.predict = tf.reduce_sum(self._calc(predict_h_e, predict_t_e, predict_r_e), 1, keep_dims = True) | [
"charlesxu86@163.com"
] | charlesxu86@163.com |
6253aae47154a92d62c55de0c48e638cda492064 | 6969dbf9ff8fabf811efa04cc76207e955c0d481 | /simics/monitorCore/idaFuns.py | 6430e8accb60cd4855aa86f78717c87114b37f66 | [] | no_license | heruix/RESim | c6f5a1919afa6872d3175b5b4012ea2b45438797 | bf514e9c08fced46ee752dd14d498971a059bc16 | refs/heads/master | 2020-06-16T13:40:32.389194 | 2019-07-06T00:14:48 | 2019-07-06T00:14:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,689 | py | import os
import json
class IDAFuns():
def __init__(self, path, lgr):
self.funs = {}
self.lgr = lgr
self.did_paths = []
#self.lgr.debug('IDAFuns for path %s' % path)
if os.path.isfile(path):
with open(path) as fh:
jfuns = json.load(fh)
for sfun in jfuns:
fun = int(sfun)
self.funs[fun] = jfuns[sfun]
def getFunPath(self, path):
fun_path = path+'.funs'
if not os.path.isfile(fun_path):
''' No functions file, check for symbolic links '''
#self.lgr.debug('is link? %s' % path)
if os.path.islink(path):
actual = os.path.join(os.path.dirname(path), os.readlink(path))
#self.lgr.debug('actual %s' % actual)
fun_path = actual+'.funs'
return fun_path
def add(self, path, offset):
if path in self.did_paths:
return
else:
self.did_paths.append(path)
funfile = self.getFunPath(path)
if os.path.isfile(funfile):
with open(funfile) as fh:
#self.lgr.debug('IDAFuns add for path %s offset 0x%x' % (path, offset))
newfuns = json.load(fh)
for f in newfuns:
fun = int(f)+offset
self.funs[fun] = {}
self.funs[fun]['start'] = fun
self.funs[fun]['end'] = newfuns[f]['end']+offset
self.funs[fun]['name'] = newfuns[f]['name']
#self.lgr.debug('idaFun add %s was %s %x %x now %x %x %x' % (newfuns[f]['name'], f, newfuns[f]['start'], newfuns[f]['end'], fun, self.funs[fun]['start'], self.funs[fun]['end']))
else:
#self.lgr.debug('IDAFuns NOTHING at %s' % funfile)
pass
def isFun(self, fun):
if fun in self.funs:
return True
else:
return False
def getName(self, fun):
if fun in self.funs:
return self.funs[fun]['name']
else:
return None
def inFun(self, ip, fun):
#self.lgr.debug('is 0x%x in %x ' % (ip, fun))
if fun in self.funs:
if ip >= self.funs[fun]['start'] and ip <= self.funs[fun]['end']:
return True
else:
return False
def getFun(self, ip):
for fun in self.funs:
if ip >= self.funs[fun]['start'] and ip <= self.funs[fun]['end']:
return fun
#print('ip 0x%x start 0x%x - 0x%x' % (ip, self.funs[fun]['start'], self.funs[fun]['end']))
return None
| [
"mfthomps@nps.edu"
] | mfthomps@nps.edu |
ffe101fce315842b655c25a42ead6035be8f11af | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_table26.py | b49983db23e74cea0ced01081727ac1e0965388c | [
"BSD-2-Clause-Views"
] | permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 1,139 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('table26.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column('C:D', 10.288)
worksheet.set_column('F:G', 10.288)
worksheet.add_table('C2:D3')
worksheet.add_table('F3:G3', {'header_row': 0})
# These tables should be ignored since the ranges are incorrect.
import warnings
warnings.filterwarnings('ignore')
worksheet.add_table('I2:J2')
worksheet.add_table('L3:M3', {'header_row': 1})
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
7e15463225ac1ec4cf055441229ae9fc9583bf7c | 19a32440205b2caeec67c73c10d917b5fb30a86a | /isi_sdk/models/groups_group_members.py | 7588806de998c9e5618d140042d2af2c4dbe5e33 | [
"MIT",
"Apache-2.0"
] | permissive | marrotte/isilon_sdk_python | 480e84312f5924a506aeb09c9c7cae79a2b9b7f4 | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | refs/heads/master | 2020-03-23T07:31:40.376316 | 2016-06-07T23:44:31 | 2016-06-07T23:44:31 | 141,277,076 | 1 | 0 | MIT | 2018-07-17T11:02:08 | 2018-07-17T11:02:08 | null | UTF-8 | Python | false | false | 3,949 | py | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class GroupsGroupMembers(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
GroupsGroupMembers - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'members': 'list[GroupsGroupMember]',
'resume': 'str'
}
self.attribute_map = {
'members': 'members',
'resume': 'resume'
}
self._members = None
self._resume = None
@property
def members(self):
"""
Gets the members of this GroupsGroupMembers.
:return: The members of this GroupsGroupMembers.
:rtype: list[GroupsGroupMember]
"""
return self._members
@members.setter
def members(self, members):
"""
Sets the members of this GroupsGroupMembers.
:param members: The members of this GroupsGroupMembers.
:type: list[GroupsGroupMember]
"""
self._members = members
@property
def resume(self):
"""
Gets the resume of this GroupsGroupMembers.
Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: The resume of this GroupsGroupMembers.
:rtype: str
"""
return self._resume
@resume.setter
def resume(self, resume):
"""
Sets the resume of this GroupsGroupMembers.
Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:param resume: The resume of this GroupsGroupMembers.
:type: str
"""
self._resume = resume
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"dmoxon@isilon.com"
] | dmoxon@isilon.com |
81400f6a60d5ce716e6bfe14b26e3e9e580e1531 | 2293c76c3d18e2fcd44ded90bd40113d26285663 | /pyeccodes/defs/grib2/tables/4/4_2_0_1_table.py | df23e0fdf6df61f8139bc1cf8b1185d7b5dd5ad9 | [
"Apache-2.0"
] | permissive | ecmwf/pyeccodes | b1f121dbddf68d176a03805ed5144ba0b37ac211 | dce2c72d3adcc0cb801731366be53327ce13a00b | refs/heads/master | 2022-04-23T10:37:40.524078 | 2020-04-18T06:30:29 | 2020-04-18T06:30:29 | 255,554,540 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 6,520 | py | def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'Specific humidity', 'units': 'kg kg-1'},
{'abbr': 1, 'code': 1, 'title': 'Relative humidity', 'units': '%'},
{'abbr': 2, 'code': 2, 'title': 'Humidity mixing ratio', 'units': 'kg kg-1'},
{'abbr': 3, 'code': 3, 'title': 'Precipitable water', 'units': 'kg m-2'},
{'abbr': 4, 'code': 4, 'title': 'Vapor pressure', 'units': 'Pa'},
{'abbr': 5, 'code': 5, 'title': 'Saturation deficit', 'units': 'Pa'},
{'abbr': 6, 'code': 6, 'title': 'Evaporation', 'units': 'kg m-2'},
{'abbr': 7, 'code': 7, 'title': 'Precipitation rate', 'units': 'kg m-2 s-1'},
{'abbr': 8, 'code': 8, 'title': 'Total precipitation', 'units': 'kg m-2'},
{'abbr': 9,
'code': 9,
'title': 'Large scale precipitation (non-convective)',
'units': 'kg m-2'},
{'abbr': 10,
'code': 10,
'title': 'Convective precipitation',
'units': 'kg m-2'},
{'abbr': 11, 'code': 11, 'title': 'Snow depth', 'units': 'm'},
{'abbr': 12,
'code': 12,
'title': 'Snowfall rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 13,
'code': 13,
'title': 'Water equivalent of accumulated snow depth',
'units': 'kg m-2'},
{'abbr': 14, 'code': 14, 'title': 'Convective snow', 'units': 'kg m-2'},
{'abbr': 15, 'code': 15, 'title': 'Large scale snow', 'units': 'kg m-2'},
{'abbr': 16, 'code': 16, 'title': 'Snow melt', 'units': 'kg m-2'},
{'abbr': 17, 'code': 17, 'title': 'Snow age', 'units': 'day'},
{'abbr': 18, 'code': 18, 'title': 'Absolute humidity', 'units': 'kg m-3'},
{'abbr': 19,
'code': 19,
'title': 'Precipitation type',
'units': 'Code table 4.201'},
{'abbr': 20,
'code': 20,
'title': 'Integrated liquid water',
'units': 'kg m-2'},
{'abbr': 21, 'code': 21, 'title': 'Condensate', 'units': 'kg kg-1'},
{'abbr': 22, 'code': 22, 'title': 'Cloud mixing ratio', 'units': 'kg kg-1'},
{'abbr': 23,
'code': 23,
'title': 'Ice water mixing ratio',
'units': 'kg kg-1'},
{'abbr': 24, 'code': 24, 'title': 'Rain mixing ratio', 'units': 'kg kg-1'},
{'abbr': 25, 'code': 25, 'title': 'Snow mixing ratio', 'units': 'kg kg-1'},
{'abbr': 26,
'code': 26,
'title': 'Horizontal moisture convergence',
'units': 'kg kg-1 s-1'},
{'abbr': 27, 'code': 27, 'title': 'Maximum relative humidity', 'units': '%'},
{'abbr': 28,
'code': 28,
'title': 'Maximum absolute humidity',
'units': 'kg m-3'},
{'abbr': 29, 'code': 29, 'title': 'Total snowfall', 'units': 'm'},
{'abbr': 30,
'code': 30,
'title': 'Precipitable water category',
'units': 'Code table 4.202'},
{'abbr': 31, 'code': 31, 'title': 'Hail', 'units': 'm'},
{'abbr': 32,
'code': 32,
'title': 'Graupel (snow pellets)',
'units': 'kg kg-1'},
{'abbr': 33,
'code': 33,
'title': 'Categorical rain',
'units': 'Code table 4.222'},
{'abbr': 34,
'code': 34,
'title': 'Categorical freezing rain',
'units': 'Code table 4.222'},
{'abbr': 35,
'code': 35,
'title': 'Categorical ice pellets',
'units': 'Code table 4.222'},
{'abbr': 36,
'code': 36,
'title': 'Categorical snow',
'units': 'Code table 4.222'},
{'abbr': 37,
'code': 37,
'title': 'Convective precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 38,
'code': 38,
'title': 'Horizontal moisture divergence',
'units': 'kg kg-1 s-1'},
{'abbr': 39,
'code': 39,
'title': 'Percent frozen precipitation',
'units': '%'},
{'abbr': 40, 'code': 40, 'title': 'Potential evaporation', 'units': 'kg m-2'},
{'abbr': 41,
'code': 41,
'title': 'Potential evaporation rate',
'units': 'W m-2'},
{'abbr': 42, 'code': 42, 'title': 'Snow cover', 'units': '%'},
{'abbr': 43,
'code': 43,
'title': 'Rain fraction of total cloud water',
'units': 'Proportion'},
{'abbr': 44, 'code': 44, 'title': 'Rime factor', 'units': 'Numeric'},
{'abbr': 45,
'code': 45,
'title': 'Total column integrated rain',
'units': 'kg m-2'},
{'abbr': 46,
'code': 46,
'title': 'Total column integrated snow',
'units': 'kg m-2'},
{'abbr': 51, 'code': 51, 'title': 'Total column water', 'units': 'kg m-2'},
{'abbr': 52,
'code': 52,
'title': 'Total precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 53,
'code': 53,
'title': 'Total snowfall rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 54,
'code': 54,
'title': 'Large scale precipitation rate',
'units': 'kg m-2 s-1'},
{'abbr': 55,
'code': 55,
'title': 'Convective snowfall rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 56,
'code': 56,
'title': 'Large scale rate water equivalent',
'units': 'kg m-2 s-1'},
{'abbr': 57, 'code': 57, 'title': 'Total snowfall rate', 'units': 'm s-1'},
{'abbr': 58,
'code': 58,
'title': 'Convective snowfall rate',
'units': 'm s-1'},
{'abbr': 59,
'code': 59,
'title': 'Large scale snowfall rate',
'units': 'm s-1'},
{'abbr': 60,
'code': 60,
'title': 'Snow depth water equivalent',
'units': 'kg m-2'},
{'abbr': None, 'code': 255, 'title': 'Missing'})
| [
"baudouin.raoult@ecmwf.int"
] | baudouin.raoult@ecmwf.int |
08e6f1e1db4df41d78df96ea52bdc0967f4d8f87 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/session_persistence.py | 60f23e36214ae6ab55ee3ba9361efc5946182bfb | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,074 | py | # coding: utf-8
import re
import six
class SessionPersistence:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'type': 'str',
'cookie_name': 'str',
'persistence_timeout': 'int'
}
attribute_map = {
'type': 'type',
'cookie_name': 'cookie_name',
'persistence_timeout': 'persistence_timeout'
}
def __init__(self, type=None, cookie_name=None, persistence_timeout=None):
"""SessionPersistence - a model defined in huaweicloud sdk"""
self._type = None
self._cookie_name = None
self._persistence_timeout = None
self.discriminator = None
self.type = type
if cookie_name is not None:
self.cookie_name = cookie_name
if persistence_timeout is not None:
self.persistence_timeout = persistence_timeout
@property
def type(self):
"""Gets the type of this SessionPersistence.
会话保持的类型。SOURCE_IP:根据请求的源IP,将同一IP的请求发送到同一个后端云服务器上。HTTP_COOKIE:客户端第一次发送请求时,负载均衡器自动生成cookie并将该cookie插入响应消息中,后续请求会发送到处理第一个请求的后端云服务器上。APP_COOKIE:客户端第一次发送请求时,后端服务器生成cookie并将该cookie插入响应消息中,后续请求会发送到处理第一个请求的后端云服务器上。当后端云服务器的protocol为TCP时,只按SOURCE_IP生效当后端云服务器的protocol为HTTP时,只按HTTP_COOKIE或APP_COOKIE生效
:return: The type of this SessionPersistence.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this SessionPersistence.
会话保持的类型。SOURCE_IP:根据请求的源IP,将同一IP的请求发送到同一个后端云服务器上。HTTP_COOKIE:客户端第一次发送请求时,负载均衡器自动生成cookie并将该cookie插入响应消息中,后续请求会发送到处理第一个请求的后端云服务器上。APP_COOKIE:客户端第一次发送请求时,后端服务器生成cookie并将该cookie插入响应消息中,后续请求会发送到处理第一个请求的后端云服务器上。当后端云服务器的protocol为TCP时,只按SOURCE_IP生效当后端云服务器的protocol为HTTP时,只按HTTP_COOKIE或APP_COOKIE生效
:param type: The type of this SessionPersistence.
:type: str
"""
self._type = type
@property
def cookie_name(self):
"""Gets the cookie_name of this SessionPersistence.
cookie的名称。只有当会话保持的类型是APP_COOKIE时可以指定。
:return: The cookie_name of this SessionPersistence.
:rtype: str
"""
return self._cookie_name
@cookie_name.setter
def cookie_name(self, cookie_name):
"""Sets the cookie_name of this SessionPersistence.
cookie的名称。只有当会话保持的类型是APP_COOKIE时可以指定。
:param cookie_name: The cookie_name of this SessionPersistence.
:type: str
"""
self._cookie_name = cookie_name
@property
def persistence_timeout(self):
"""Gets the persistence_timeout of this SessionPersistence.
会话保持的超时时间。取值范围:[1,60](分钟):当后端云服务器的protocol为TCP、UDP时[1,1440](分钟):当后端云服务器的protocol为HTTP时。当type为APP_COOKIE时该字段不生效。
:return: The persistence_timeout of this SessionPersistence.
:rtype: int
"""
return self._persistence_timeout
@persistence_timeout.setter
def persistence_timeout(self, persistence_timeout):
"""Sets the persistence_timeout of this SessionPersistence.
会话保持的超时时间。取值范围:[1,60](分钟):当后端云服务器的protocol为TCP、UDP时[1,1440](分钟):当后端云服务器的protocol为HTTP时。当type为APP_COOKIE时该字段不生效。
:param persistence_timeout: The persistence_timeout of this SessionPersistence.
:type: int
"""
self._persistence_timeout = persistence_timeout
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SessionPersistence):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
1143ef9ee7a22fa72650167e37cbfb456f031bf6 | f7c5e3f5834206a7b0d1dadd773d1de032f731e7 | /dmerce2/DMS/DGP/cipher.py | e22f959d5edf51dd21a61bfffe4001a9e3efd429 | [] | no_license | rbe/dmerce | 93d601462c50dfbbf62b577803ae697d3abde333 | 3cfcae894c165189cc3ff61e27ca284f09e87871 | refs/heads/master | 2021-01-01T17:06:27.872197 | 2012-05-04T07:22:26 | 2012-05-04T07:22:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,710 | py | import mainlib
class Cipher:
"the base class for all blockcipher algorithms"
def __init__(self, blocklen, wordlen, rounds, times):
self.__blocklen = blocklen
self.__wordlen = wordlen
self.__rounds = rounds
self.__wpb = self.__blocklen / self.__wordlen
self.__times = times
def NewData(self, data):
x = mainlib.String_Mince(mainlib.String_Chop(data, self.__blocklen), self.__wpb)
for i in range(0, len(x)):
for j in range(0, len(x[i])):
x[i][j] = mainlib.S2L_Convert(x[i][j])
self.__data = x
def Fillup(self, data):
return mainlib.String_Fillup(data, self.__blocklen)
def Filldown(self, data):
return mainlib.String_Filldown(data)
def OutFormat(self, output):
x = ''
for i in range(0, len(output)):
for j in range(0, len(output[i])):
x = x + mainlib.L2S_Convert(output[i][j], self.__wordlen)
return x
# Key Schedule
def E_Key_Create(self, list):
k = []
k.append(self.KeyExpand(list[0]))
for i in range(1, self.__times, 2):
k.append(self.KeyInvert(self.KeyExpand(list[i])))
k.append(self.KeyExpand(list[i+1]))
self.__ekey = k
def D_Key_Create(self, list):
list.reverse()
k = []
k.append(self.KeyInvert(self.KeyExpand(list[0])))
for i in range(1, self.__times, 2):
k.append(self.KeyExpand(list[i]))
k.append(self.KeyInvert(self.KeyExpand(list[i+1])))
self.__dkey = k
def IVinit(self, data):
y = []
data = mainlib.L2S_Convert(data, self.__blocklen)
for i in range(0, self.__wpb):
x = mainlib.String_Chop(data, self.__wordlen)
for j in range(0, len(x)):
x[j] = mainlib.S2L_Convert(x[j])
y.append(x)
y = y[0]
self.__iv = y
# ECB
def ECB_Encrypt(self):
"Electronic Codebook mode encryption"
output = []
for i in range(0, len(self.__data)):
x = self.Encrypt(self.__data[i], self.__ekey[0])
for j in range(1, self.__times, 2):
x = self.Decrypt(x, self.__ekey[j])
x = self.Encrypt(x, self.__ekey[j+1])
output.append(x)
return output
def ECB_Decrypt(self):
"Electronic Codebook mode decryption"
output = []
for i in range(0, len(self.__data)):
x = self.Decrypt(self.__data[i], self.__dkey[0])
for j in range(1, self.__times, 2):
x = self.Encrypt(x, self.__dkey[j])
x = self.Decrypt(x, self.__dkey[j+1])
output.append(x)
return output
# CBC
def CBC_Encrypt(self):
"Cipher Block Chaining mode encryption"
output = []
output.append(self.__iv)
for i in range(0, len(self.__data)):
x = []
for l in range(0, self.__wpb):
x.append(self.__data[i][l] ^ output[i][l])
x = self.Encrypt(x, self.__ekey[0])
for j in range(1, self.__times, 2):
x = self.Decrypt(x, self.__ekey[j])
x = self.Encrypt(x, self.__ekey[j+1])
output.append(x)
output[0:1] = []
return output
def CBC_Decrypt(self):
"Cipher Block Chaining mode decryption"
output = []
date = self.__data[:]
date.insert(0, self.__iv)
for i in range(0, len(self.__data)):
x = self.__data[i][:]
x = self.Decrypt(x, self.__dkey[0])
for j in range(1, self.__times, 2):
x = self.Encrypt(x, self.__dkey[j])
x = self.Decrypt(x, self.__dkey[j+1])
for l in range(0, self.__wpb):
x[l] = x[l] ^ date[i][l]
output.append(x)
return output
# CFB
def CFBE(self, data, key, iv):
pass
def CFBD(self, data, key, iv):
pass
# OFB
def OFBE(self, data, key, iv):
pass
def OFBD(self, data, key, iv):
pass
# API
def Encipher(self, data, key):
self.E_Key_Create(key)
data = self.Fillup(data)
self.NewData(data)
c = self.ECB_Encrypt()
return self.OutFormat(c)
def Decipher(self, data, key):
self.D_Key_Create(key)
self.NewData(data)
p = self.ECB_Decrypt()
p = self.OutFormat(p)
p = self.Filldown(p)
return p
# Counter
# BC
# PCBC
# CBCC
# OFBNLF
# PBC
# PFB
# CBCPD
| [
"ralf@art-of-coding.eu"
] | ralf@art-of-coding.eu |
e1323e36e8c66e4c7d890636ed43c37dfb9b447c | 9059d9cbad4188ed2980f551151b9678ffb68b44 | /mycode/13_exception/user_exception.py | c1e32cf9426a407991ac8f3a3d2e38c4390e01b1 | [] | no_license | mhee4321/python_basic | ad0e64fa21ecfab231a6627ba6abeea82d725690 | 86031975a9121efe5785e83f663255a7b4e4ba77 | refs/heads/master | 2023-02-11T20:31:54.353219 | 2021-01-07T05:44:31 | 2021-01-07T05:44:31 | 326,850,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # 사용자 정의 예외 클래스 선언
class NegativePriceException(Exception):
# constructor 선언
def __int__(self):
print("Price can't be Negative")
raise AttributeError
def calc_price(value):
price = value * 100
if price < 0:
# NegativePriceException를 강제로 발생시킨다.
raise NegativePriceException
return price
print(calc_price(10))
print(calc_price(-10)) | [
"nannanru@gmail.com"
] | nannanru@gmail.com |
9e050f1bf998c8bf9bdb136685e56d99f8240ab7 | 8d910dcc2d15f1b21de02b9b46aee23c3f1df4cc | /004-median-of-two-sorted-arrays.py | ee0f710d58279c8e8e36102961dcef78cf9425df | [] | no_license | MonikaBhasin7/leetcode-1 | a7fa6115a7d3b604f8a1abb311d4cdc43182edef | 77ff978aa259bd7e5da0d70037503c83e8cc78cc | refs/heads/master | 2020-08-08T06:39:48.724618 | 2019-10-08T21:48:02 | 2019-10-08T21:48:02 | 213,760,531 | 0 | 0 | null | 2019-10-08T21:41:21 | 2019-10-08T21:41:20 | null | UTF-8 | Python | false | false | 1,816 | py | """
Problem Link: https://leetcode.com/problems/median-of-two-sorted-arrays/
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
You may assume nums1 and nums2 cannot be both empty.
Example 1:
nums1 = [1, 3]
nums2 = [2]
The median is 2.0
Example 2:
nums1 = [1, 2]
nums2 = [3, 4]
The median is (2 + 3)/2 = 2.5
"""
class Solution:
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
if len(nums1) > len(nums2):
return self.findMedianSortedArrays(nums2,nums1)
x,y = len(nums1),len(nums2)
print(x,y)
low, high = 0,x
while low <= high:
partitionX = (low+high)//2
partitionY = ((x+y+1)//2) - partitionX
if partitionX < x and nums2[partitionY-1] > nums1[partitionX]:
low = partitionX + 1
elif partitionX > 0 and nums1[partitionX-1] > nums2[partitionY]:
high = partitionX - 1
else:
if partitionX == 0:
maxLeft = nums2[partitionY-1]
elif partitionY == 0:
maxLeft = nums1[partitionX-1]
else:
maxLeft = max(nums1[partitionX-1],nums2[partitionY-1])
if (x+y) % 2 == 1:
return maxLeft
if partitionX == x:
minRight = nums2[partitionY]
elif partitionY == y:
minRight = nums1[partitionX]
else:
minRight = min(nums1[partitionX],nums2[partitionY])
return (maxLeft+minRight)/2 | [
"anant.kaushik2@gmail.com"
] | anant.kaushik2@gmail.com |
a85b8eda787f00e981f98efa8c36f34310fba38b | 8370083dbbbd32740ad1862637809396dc7984e2 | /paresh75/a1.py | ecd7b956c3d295ccc274e1b1a60f1a7febd2eb21 | [] | no_license | parshuramsail/PYTHON_LEARN | a919b14aab823e0f5e769d8936ddbfb357133db2 | 8c76720bf73f13cf96930e6d4d5128e6ba9aa535 | refs/heads/main | 2023-07-14T16:25:26.240555 | 2021-08-29T17:10:19 | 2021-08-29T17:10:19 | 401,095,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | # READ
#f=open("ram.txt","rt")
#content=f.read(3) # read only 3 characters
#print(content)
#content=f.read(344)
#print(content)
#f.close()
#f=open("ram.txt","rt")
#content=f.read()
## print(line,end="")
#f.close()
#RADLINE()
#f=open("ram.txt","rt")
#print(f.readline()) # to print one line
#print(f.readline())#to print next line
#print(f.readline())#to print next line
#f.close()
#READINES()
# TO STORE LINES IN LIST
#f=open("ram.txt","rt")
##print(f.readlines())
#f.close()
#OUTPUT:
#['ram is good boy\n', 'ram is king of this universe\n', 'ram is very smart\n']
| [
"64275709+parshuramsail@users.noreply.github.com"
] | 64275709+parshuramsail@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.