blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
617a6254379110f96f106c107563e54e1020e433 | 4fdab62336c7b4bde236351fabdd79cdec0b3d20 | /post_questions/forms.py | 4c4c245e3a02abe390e3f312eabf270eee9106b9 | [] | no_license | aashiqms/ask_new | 8e6f76fbe41da68d32967c5ab3a14a76d469f50d | 7f7a5e1b5e9b498030415607b8419e90147cd597 | refs/heads/master | 2022-07-04T21:51:56.107360 | 2020-05-17T18:03:19 | 2020-05-17T18:03:19 | 264,604,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | from django import forms
from post_questions.models import Post, Comment, Answer
from django.forms import ModelForm
class QuestionForm(ModelForm):
class Meta:
model = Post
fields = ['author', 'questions']
# widget = {
# 'title': forms.TextInput(attrs={'class': 'textinputclass'}),
# }
class CommentForm(ModelForm):
class Meta:
model = Comment
fields = ['author', 'text']
# widget = {
# 'title': forms.TextInput(attrs={'class': 'textinputclass'}),
# }
class AnswerForm(ModelForm):
class Meta:
model = Answer
fields = ['author', 'text']
# widget = {
# 'title': forms.TextInput(attrs={'class': 'textinputclass'}),
# }
form = QuestionForm()
form_A = AnswerForm()
| [
"aashiqms@outlook.com"
] | aashiqms@outlook.com |
c49b0c124c8eeb7a4917b06d1acffade30e7bf1f | 72df811521e3da73187388c737599ddd0a4631fc | /START_PYTHON/4日/11.バンボクムンwhile/03.while.py | 4bc0180ed23f1f2e825dba7d5922b98565f5a4a0 | [] | no_license | munsangu/20190615python | e7480044cfa07dcb65e8ca168c7cf9ecb73ffe7a | c496bc471503ac0e2ba0d928c01085c8b569d173 | refs/heads/master | 2020-06-04T14:59:49.822865 | 2020-02-08T11:49:16 | 2020-02-08T11:49:16 | 192,071,616 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # 내가 원하는 만큼 입력받고 그 숫자의 평균을 구하는 프로그램
num = 1
count = 0
sum = 0
while num !=0: #num이 0이 아니면 반복
num = int(input("정수 입력: "))
count += 1 # 몇 번 입력받았는지 count
sum += num
count -= 1 # 0을 입력한 count 하나 제외
avg = sum / count
print("평균 : %.2f"%avg)
| [
"ds-3632@hanmail.net"
] | ds-3632@hanmail.net |
50a7fad07fc0b5935c80ee5dfac6b9b8555e9a9d | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_1/swnrei001/question3.py | 4e473093a1d821898c258c5628e999860a896c5f | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | def spam():
firstname = input("Enter first name:\n")
lastname = input("Enter last name:\n")
money = eval(input("Enter sum of money in USD:\n"))
country = input("Enter country name:\n")
letter = """Dearest """ + firstname + """\nIt is with a heavy heart that I inform you of the death of my father,
General Fayk """ + lastname + """, your long lost relative from Mapsfostol.
My father left the sum of """ + str(money) + """USD for us, your distant cousins.
Unfortunately, we cannot access the money as it is in a bank in """ + country + """.
I desperately need your assistance to access this money.
I will even pay you generously, 30% of the amount - """ + str(money * 0.3) + """USD,
for your help. Please get in touch with me at this email address asap.
Yours sincerely
Frank """ + lastname
print()
print(letter)
spam() | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
e7371d1f3476d8e3b2d7580d5f8194845330d249 | b66c12a4304c6af00d58a1f83e453dbc739ae60d | /survey/features/about_us_steps.py | 3d0b8c9390b1acfe7127b065e4d6a78de8c6afde | [
"BSD-2-Clause"
] | permissive | madhavaramu/uSurvey | 060dae008f975a7cdb77ef8b0c5d820842422637 | 681e1d91fbedf94e840858e1ef09538777ce3e50 | refs/heads/uSurvey | 2020-04-11T06:28:17.220192 | 2016-12-06T13:24:45 | 2016-12-06T13:24:45 | 68,372,980 | 0 | 1 | null | 2016-09-16T11:03:44 | 2016-09-16T11:03:43 | null | UTF-8 | Python | false | false | 1,563 | py | from lettuce import *
from survey.features.page_objects.root import AboutPage, EditAboutUsPage
from survey.models import AboutUs
@step(u'And I visit the about us page')
def and_i_visit_the_about_us_page(step):
world.page = AboutPage(world.browser)
world.page.visit()
@step(u'And I have about us content')
def and_i_have_about_us_content(step):
world.about_us = AboutUs.objects.create(content="blah blah")
@step(u'Then I should see the sample about us information')
def then_i_should_see_the_sample_about_us_information(step):
world.page.is_text_present(world.about_us.content)
@step(u'When I click the edit link')
def when_i_click_the_edit_link(step):
world.page.click_by_css("#edit-about_us")
@step(u'Then I should see the existing content in a text area')
def then_i_should_see_the_existing_content_in_a_text_area(step):
world.page = EditAboutUsPage(world.browser)
world.form_data = {'content': world.about_us.content}
world.page.validate_form_values(world.form_data)
@step(u'When I modify about us content')
def when_i_modify_about_us_content(step):
world.form_data = {'content': "edited more blah blah blah"}
world.page.fill_wywget_textarea(world.form_data)
@step(u'Then I should see the content was updated successfully')
def then_i_should_see_the_content_was_updated_successfully(step):
world.page.see_success_message("About us content", "updated")
@step(u'And I should not see the edit about us button')
def and_i_should_not_see_the_edit_about_us_button(step):
world.page.assert_edit_link_absent() | [
"antsmc2@yahoo.com"
] | antsmc2@yahoo.com |
290920be930c40942829f4f4ddb96f55ae5fd5a5 | ca1c2630b517c2dd69ecb2741174c5147feea638 | /mercury/null_byte_filter.py | 1f5f05e6f372805662128a1c6598ba31b362b23f | [] | no_license | dexter-taylor/mercury | 708cf0440016d05e8c3754e82471d8b6e2ab5589 | efb02177ac12747d65aba43b47541d548fd5bdeb | refs/heads/master | 2020-04-18T14:25:43.808775 | 2019-01-25T06:59:08 | 2019-01-25T06:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,465 | py | #!/usr/bin/env python
'''Usage:
null_byte_filter.py (-n | -d | -l) <datafile>
Options:
-n --null Retrieve the line numbers of the lines with null bytes ('\0') and the first field in that line containing a null byte
-d --readable_dict Retrieve the lines that can be read by a csv reader (do not contain null bytes) and return lines as dictionaries
-l --readable_line Retrieve readable lines and just return line
'''
import docopt
import datamap as dmap
from snap import common
from xcsv import Dictionary2CSVProcessor
def main(args):
src_file = args.get('<datafile>')
null_mode = args.get('--null')
readable_dict_mode = args.get('--readable_dict')
readable_line_mode = args.get('--readable_line')
with open(src_file) as f:
first_line = f.readline()
fields = first_line.split('|')
nb_reporter = dmap.NullByteFilter(delimiter='|', field_names=fields)
if null_mode:
null_pairs = nb_reporter.filter_with_null_output(src_file)
for null_pair in null_pairs:
print(common.jsonpretty({'line_number': null_pair[0],
'field': null_pair[1]
}))
elif readable_dict_mode:
readable_lines = nb_reporter.filter_with_readable_output(src_file)
for line in readable_lines:
if line == first_line:
continue
record_dict = {}
value_array = line.split('|')
for r_index, field in enumerate(fields):
record_dict[field] = value_array[r_index]
print(common.jsonpretty(record_dict))
elif readable_line_mode:
proc = Dictionary2CSVProcessor(fields, "|", dmap.WhitespaceCleanupProcessor())
readable_lines = nb_reporter.filter_with_readable_output(src_file)
for line in readable_lines:
if line == first_line:
continue
record_dict = {}
value_array = line.split('|')
for r_index, field in enumerate(fields):
record_dict[field] = value_array[r_index]
proc.process(record_dict)
else:
print("Choose an option flag for record info output")
if __name__ == '__main__':
args = docopt.docopt(__doc__)
main(args)
| [
"binarymachineshop@gmail.com"
] | binarymachineshop@gmail.com |
7e49268cc95f7618ee769890ed82c3ea558465c2 | c15a2b234376b3a8ea5f3c790b4afd47150dcfcc | /Libs_Modules/test_3.py | b843063ce4cb8f089f79f6cafc5b1330a6760613 | [] | no_license | GLMF/GLMF225 | 9c05b60d8bce71973460e2d98c454b22115b92fc | c83e506f522af89cff1c76286689bb5cf2f412cf | refs/heads/master | 2020-05-03T20:48:00.897857 | 2019-04-01T07:41:13 | 2019-04-01T07:41:13 | 178,810,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | from PyInquirer import prompt
widget = [
{
'type': 'expand',
'name': 'serie',
'message': 'Quelle est votre série préférée ?',
'choices': [
{
'key': 'g',
'name': 'Game of Thrones',
'value': 'GoT'
},
{
'key': 'l',
'name': 'Lucifer',
'value': 'lucifer'
},
{
'key': 'w',
'name': 'Westworld',
'value': 'westworld'
}
]
}
]
result = prompt(widget)
| [
"tristan.colombo@gmail.com"
] | tristan.colombo@gmail.com |
3dd3ad42d05707545bad918cdf8e1c1a1956688b | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/basic/3_2.py | 1a6ecef30016ab772e487b8b0093ba0f863bafe3 | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,003 | py | Difference between str.capitalize() VS str.title()
Both **title()** and **capitalize()** have similar functionality of
capitalizing first characters. Let us see the difference between the two of
them.
### title()
**title()** function in Python is the Python String Method which is used to
convert the first character in each word to Uppercase and remaining characters
to Lowercase in the string and returns a new string.
> **Syntax:** str.title()
>
> **Parameters:** None
>
> **Returns:** This function returns a string which has first letter in each
> word is uppercase and all remaining letters are lowercase.
>
>
>
>
>
>
**Example:**
## Python3
__
__
__
__
__
__
__
# Python Title() Method Example
str1 = 'geeKs foR geEks'
str2 = str1.title()
print ('First Output after Title() method is = ', str2)
# observe the original string
print ('Converted String is = ', str1.title())
print ('Original String is = ', str1 )
# Performing title() function directly
str3 = 'ASIPU pawan kuMAr'.title()
print ('Second Output after Title() method is = ', str3)
str4 = 'stutya kUMari sHAW'.title()
print ('Third Output after Title() method is = ', str4)
str5 = '6041'.title()
print ('Fourth Output after Title() method is = ', str5)
---
__
__
**Output:**
First Output after Title() method is = Geeks For Geeks
Converted String is = Geeks For Geeks
Original String is = geeKs foR geEks
Second Output after Title() method is = Asipu Pawan Kumar
Third Output after Title() method is = Stutya Kumari Shaw
Fourth Output after Title() method is = 6041
### capitalize()
In Python, the **capitalize()** method converts the first character of a
string to a capital **(uppercase)** letter. If the string has its first
character as capital, then it returns the original string.
> **Syntax:** str.title()
>
> **Parameters:** None
>
> **Returns:** This function returns a string which has the first letter in
> uppercase and all remaining letters in lowercase.
**Example:**
## Python3
__
__
__
__
__
__
__
# Python program to demonstrate the
# use of capitalize() function
# capitalize() first letter of
# string.
name = "geeks for geeks"
print(name.capitalize())
# demonstration of individual words
# capitalization to generate camel case
name1 = "geeks"
name2 = "for"
name3 = "geeks"
print(name1.capitalize() + name2.capitalize()
+ name3.capitalize())
---
__
__
**Output:**
Geeks for geeks
GeeksForGeeks
### Difference Between title() and capitalize()
The difference between them is that Python string method title() returns a
copy of the string in which the first characters of all the words are
capitalized whereas the string method capitalize() returns a copy of the
string in which just the first word of the entire string is capitalized.
**Example:**
str = "geeks for geeks"
str.title() will return Geeks For Geeks
str.capitalize() will return Geeks for geeks
## Python3
__
__
__
__
__
__
__
str1= "my name is xyz"
str2 = "geeks for geeks"
# using title()
print(str1.title())
print(str2.title())
# usng capitalize()
print(str1.capitalize())
print(str2.capitalize())
---
__
__
**Output:**
My Name Is Xyz
Geeks For Geeks
My name is xyz
Geeks for geeks
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"qmnguyenw@gmail.com"
] | qmnguyenw@gmail.com |
1d664ef000866e3f35691cf6c589f02e172914c7 | 77d3633bb64d585aa087677af9ca6f672ae9ff1f | /src/sagemaker/serve/predict_nlp.py | a854a3c9b662a2a26b489283e45b4dead44b083c | [
"MIT"
] | permissive | reneang17/authorencoder | 0f16d9d2c72db3bd3e50fac03b7eb6e25e6f7f75 | e607ddc77d18fc62e292adfe1595a5dd35e10f99 | refs/heads/master | 2022-07-15T01:23:28.450030 | 2020-03-03T05:10:15 | 2020-03-03T05:10:15 | 237,246,389 | 2 | 0 | MIT | 2020-03-08T01:37:00 | 2020-01-30T15:46:16 | Jupyter Notebook | UTF-8 | Python | false | false | 3,887 | py | import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import numpy
from utils_nlp import tokenize, emb
from models import CNN
from sklearn.neighbors import KNeighborsClassifier as KNC
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#**********************************
# Load model
INPUT_DIM = model_info['INPUT_DIM']
WORD_EMBEDDING_DIM = model_info['WORD_EMBEDDING_DIM']
N_FILTERS = model_info['N_FILTERS']
FILTER_SIZES = model_info['FILTER_SIZES']
AUTHOR_DIM = model_info['AUTHOR_DIM']
DROPOUT = model_info['DROPOUT']
PAD_IDX = model_info['PAD_IDX']
#UNK_IDX = 0
model = CNN(INPUT_DIM, WORD_EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, AUTHOR_DIM, DROPOUT, PAD_IDX)
print("Model loaded with embedding_dim {}, vocab_size {}.".format(
# args.embedding_dim, args.hidden_dim, args.vocab_size
WORD_EMBEDDING_DIM, INPUT_DIM))
#**********************************
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model_state.pt')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == 'text/plain':
data = serialized_input_data.decode('utf-8')
return data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
return str(prediction_output)
#import training data and form clusters
#author_encoder_path = os.path.join('./', 'authorencoder.pkl')
#with open(author_encoder_path, 'rb') as f:
# train_embeddings_otl, train_labels_otl= pickle.load( f )
train_embeddings_otl, train_labels_otl = emb()
from sklearn.neighbors import KNeighborsClassifier as KNC
KNN = KNC(n_neighbors=3)
KNN.fit(train_embeddings_otl, train_labels_otl)
def predict_fn(input_text, model):
print('Inferring sentiment of input data.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model.word_dict is None:
raise Exception('Model has not been loaded properly, no word_dict.')
model = model
word_dict = model.word_dict
tokenized = tokenize(word_dict, input_text)
tensor = torch.tensor(tokenized).to(device)
tensor = tensor.unsqueeze(0).unsqueeze(0)
# Make sure to put the model into evaluation mode
model.eval()
#raise Exception('This is the input: ' + tensor)
with torch.no_grad():
output = model.forward(tensor).tolist()
prediction = int(KNN.predict(output).item())
author_dir = {0: 'John Dryden', 1: 'Robert Pinsky', 2: 'Anne Carson', 3: 'Alfred Lord Tennyson', 4: 'Allen Ginsberg', 5: 'Philip Whalen', 6: 'Matthew Arnold', 7: 'Walt Whitman', 8: 'William Shakespeare', 9: 'Beowulf Anonimous'}
return author_dir[prediction]
| [
"reneang17@gmail.com"
] | reneang17@gmail.com |
e41bf4c1770b7508b7f425ca2d18b7d1d68dad13 | 89e3f694021f261b95e494d2b479367bacde8251 | /tests/types/test_entity.py | 2910ad18598425b5e693e0e5e7401820c5e9b731 | [
"MIT"
] | permissive | dchaplinsky/followthemoney | 6f9c05f430f8bfb04f7841378fd2ee5cf9b33235 | a2a150f558acb5a1c985b9dc891c98c0fdf2f17e | refs/heads/master | 2020-09-10T08:16:14.617602 | 2019-11-14T09:15:52 | 2019-11-14T09:15:52 | 221,699,199 | 1 | 0 | MIT | 2019-11-14T13:03:41 | 2019-11-14T13:03:41 | null | UTF-8 | Python | false | false | 790 | py | import unittest
from followthemoney.types import registry
entities = registry.entity
class EntityTest(unittest.TestCase):
def test_parse(self):
self.assertEqual(entities.clean('88'), '88')
self.assertEqual(entities.clean(88), '88')
self.assertEqual(entities.clean({'id': 88}), '88')
self.assertEqual(entities.clean(None), None)
self.assertEqual(entities.clean('With spaces'), None)
self.assertEqual(entities.clean('With!special'), None)
self.assertEqual(entities.clean('with.dot'), 'with.dot')
def test_normalize(self):
self.assertEqual(entities.normalize('FOO'), ['FOO'])
self.assertEqual(entities.normalize(None), [])
def test_funcs(self):
self.assertEqual(entities.specificity('bla'), 1)
| [
"friedrich@pudo.org"
] | friedrich@pudo.org |
bd592c205d08a1f8ddc82451cb09b38db2934de7 | a4deea660ea0616f3b5ee0b8bded03373c5bbfa2 | /concrete_instances/register-variants/vfmsubadd213ps_xmm_xmm_xmm/instructions/vfmsubadd213ps_xmm_xmm_xmm/vfmsubadd213ps_xmm_xmm_xmm.gen.vex.py | 47e6123f37d250678ee38d0e8009fe13fcb34be4 | [] | no_license | Vsevolod-Livinskij/x86-64-instruction-summary | 4a43472e26f0e4ec130be9a82f7e3f3c1361ccfd | c276edab1b19e3929efb3ebe7514489f66087764 | refs/heads/master | 2022-02-02T18:11:07.818345 | 2019-01-25T17:19:21 | 2019-01-25T17:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | import angr
proj = angr.Project('./instructions/vfmsubadd213ps_xmm_xmm_xmm/vfmsubadd213ps_xmm_xmm_xmm.o')
print proj.arch
print proj.entry
print proj.filename
irsb = proj.factory.block(proj.entry).vex
irsb.pp() | [
"sdasgup3@illinois.edu"
] | sdasgup3@illinois.edu |
0fc748c3389c508b98a2f8ce1f12b4fb2ed423d3 | f213549d8725acaf5417d0d5290430d499bf3cf3 | /lino/core/boundaction.py | b3a6a42f40bb013688e5fa75400c13ca08ccbe41 | [
"BSD-2-Clause"
] | permissive | ExcellentServ/lino | 56c8159428a451058a35dad75e8799d239c2dc0e | 9ea630e719d47843dd8427dd64db22633626fd3d | refs/heads/master | 2020-12-28T23:15:47.380120 | 2015-01-27T14:53:10 | 2015-01-27T14:53:10 | 29,911,723 | 0 | 0 | null | 2015-01-27T11:44:09 | 2015-01-27T11:44:08 | null | UTF-8 | Python | false | false | 3,744 | py | # -*- coding: UTF-8 -*-
# Copyright 2009-2015 Luc Saffre
# License: BSD (see file COPYING for details)
"""
.. autosummary::
"""
import logging
logger = logging.getLogger(__name__)
from django.conf import settings
from lino.utils import curry
from lino.core import actions
class BoundAction(object):
"""An Action which is bound to an Actor. If an Actor has subclasses,
each subclass "inherits" its actions.
"""
def __init__(self, actor, action):
if not isinstance(action, actions.Action):
raise Exception("%s : %r is not an Action" % (actor, action))
self.action = action
self.actor = actor
required = dict()
if action.readonly:
required.update(actor.required)
#~ elif isinstance(action,InsertRow):
#~ required.update(actor.create_required)
elif isinstance(action, actions.DeleteSelected):
required.update(actor.delete_required)
else:
required.update(actor.update_required)
required.update(action.required)
#~ print 20120628, str(a), required
#~ def wrap(a,required,fn):
#~ return fn
debug_permissions = actor.debug_permissions and \
action.debug_permissions
if debug_permissions:
if settings.DEBUG:
logger.info("debug_permissions active for %r (required=%s)",
self, required)
else:
raise Exception(
"settings.DEBUG is False, but `debug_permissions` "
"for %r (required=%s) is active." % (self, required))
from lino.modlib.users.utils import (
make_permission_handler, make_view_permission_handler)
self.allow_view = curry(make_view_permission_handler(
self, action.readonly, debug_permissions, **required), action)
self._allow = curry(make_permission_handler(
action, actor, action.readonly,
debug_permissions, **required), action)
#~ if debug_permissions:
#~ logger.info("20130424 _allow is %s",self._allow)
#~ actor.actions.define(a.action_name,ba)
def get_window_layout(self):
return self.action.get_window_layout(self.actor)
def get_window_size(self):
return self.action.get_window_size(self.actor)
def full_name(self):
return self.action.full_name(self.actor)
def request(self, *args, **kw):
kw.update(action=self)
return self.actor.request(*args, **kw)
def get_button_label(self, *args):
return self.action.get_button_label(self.actor, *args)
#~ def get_panel_btn_handler(self,*args):
#~ return self.action.get_panel_btn_handler(self.actor,*args)
def setup_action_request(self, *args):
return self.action.setup_action_request(self.actor, *args)
def get_row_permission(self, ar, obj, state):
#~ if self.actor is None: return False
return self.actor.get_row_permission(obj, ar, state, self)
def get_bound_action_permission(self, ar, obj, state):
if not self.action.get_action_permission(ar, obj, state):
return False
return self._allow(ar.get_user(), obj, state)
def get_view_permission(self, profile):
"""
Return True if this bound action is visible for users of this
profile.
"""
if not self.actor.get_view_permission(profile):
return False
if not self.action.get_view_permission(profile):
return False
return self.allow_view(profile)
def __repr__(self):
return "<%s(%s,%r)>" % (
self.__class__.__name__, self.actor, self.action)
| [
"luc.saffre@gmail.com"
] | luc.saffre@gmail.com |
7b5fe80b8b6972477471881efb10fa5a505144d9 | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories_2to3/267667/kaggle-heart-master/configurations/je_os_fixedaggr_joniscale80small_augzoombright.py | 9b93b720a6e67660825c575404d24f2b139a2e03 | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,771 | py | """Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 20
validate_train_set = True
save_every = 20
restart_from_save = False
dump_network_loaded_data = False
# Training (schedule) parameters
# - batch sizes
batch_size = 8
sunny_batch_size = 4
batches_per_chunk = 16
num_epochs_train = 400
# - learning rate and method
base_lr = 0.0001
learning_rate_schedule = {
0: base_lr,
9*num_epochs_train/10: base_lr/10,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
"zoom_x": (.75, 1.25),
"zoom_y": (.75, 1.25),
"change_brightness": (-0.3, 0.3),
}
augmentation_params_test = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
"zoom_x": (.80, 1.20),
"zoom_y": (.80, 1.20),
"change_brightness": (-0.2, 0.2),
}
use_hough_roi = True
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(80,80)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
def filter_samples(folders):
# don't use patients who don't have mre than 6 slices
return [
folder for folder in folders
if data_loader.compute_nr_slices(folder) > 6]
# Input sizes
image_size = 64
nr_slices = 20
data_sizes = {
"sliced:data:sax": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:sax:locations": (batch_size, nr_slices),
"sliced:data:sax:is_not_padded": (batch_size, nr_slices),
"sliced:data:randomslices": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice": (batch_size, 30, image_size, image_size),
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size),
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 100 # More augmentations since a we only use single slices
#tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# nonlinearity putting a lower bound on it's output
def lb_softplus(lb):
return lambda x: nn.nonlinearities.softplus(x) + lb
init = nn.init.Orthogonal()
rnn_layer = functools.partial(nn.layers.RecurrentLayer,
W_in_to_hid=init,
W_hid_to_hid=init,
b=nn.init.Constant(0.1),
nonlinearity=nn.nonlinearities.rectify,
hid_init=nn.init.Constant(0.),
backwards=False,
learn_init=True,
gradient_steps=-1,
grad_clipping=False,
unroll_scan=False,
precompute_input=False)
# Architecture
def build_model():
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:sax"]
input_size_mask = data_sizes["sliced:data:sax:is_not_padded"]
input_size_locations = data_sizes["sliced:data:sax:locations"]
l0 = nn.layers.InputLayer(input_size)
lin_slice_mask = nn.layers.InputLayer(input_size_mask)
lin_slice_locations = nn.layers.InputLayer(input_size_locations)
# PREPROCESS SLICES SEPERATELY
# Convolutional layers and some dense layers are defined in a submodel
l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))
from . import je_ss_jonisc80small_360_gauss_longer_augzoombright
submodel = je_ss_jonisc80small_360_gauss_longer_augzoombright.build_model(l0_slices)
# Systole Dense layers
l_sys_mu = submodel["meta_outputs"]["systole:mu"]
l_sys_sigma = submodel["meta_outputs"]["systole:sigma"]
# Diastole Dense layers
l_dia_mu = submodel["meta_outputs"]["diastole:mu"]
l_dia_sigma = submodel["meta_outputs"]["diastole:sigma"]
# AGGREGATE SLICES PER PATIENT
l_scaled_slice_locations = layers.TrainableScaleLayer(lin_slice_locations, scale=nn.init.Constant(0.1), trainable=False)
# Systole
l_pat_sys_ss_mu = nn.layers.ReshapeLayer(l_sys_mu, (-1, nr_slices))
l_pat_sys_ss_sigma = nn.layers.ReshapeLayer(l_sys_sigma, (-1, nr_slices))
l_pat_sys_aggr_mu_sigma = layers.JeroenLayer([l_pat_sys_ss_mu, l_pat_sys_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=100.)
l_systole = layers.MuSigmaErfLayer(l_pat_sys_aggr_mu_sigma)
# Diastole
l_pat_dia_ss_mu = nn.layers.ReshapeLayer(l_dia_mu, (-1, nr_slices))
l_pat_dia_ss_sigma = nn.layers.ReshapeLayer(l_dia_sigma, (-1, nr_slices))
l_pat_dia_aggr_mu_sigma = layers.JeroenLayer([l_pat_dia_ss_mu, l_pat_dia_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=100.)
l_diastole = layers.MuSigmaErfLayer(l_pat_dia_aggr_mu_sigma)
submodels = [submodel]
return {
"inputs":{
"sliced:data:sax": l0,
"sliced:data:sax:is_not_padded": lin_slice_mask,
"sliced:data:sax:locations": lin_slice_locations,
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": dict(
{},
**{
k: v
for d in [model["regularizable"] for model in submodels if "regularizable" in model]
for k, v in list(d.items()) }
),
# "pretrained":{
# je_ss_jonisc80small_360_gauss_longer_augzoombright.__name__: submodel["outputs"],
# }
}
| [
"keesiu.wong@gmail.com"
] | keesiu.wong@gmail.com |
2954c7bb5b4630407209831dc851c067c8f7488a | b445f7ba5ae4899c3782dc08627b778de6bbf12b | /test/test_jsonchecker.py | 88703e6457bdd1c1e3214667c699e8b5be6bb428 | [
"Apache-2.0",
"MIT"
] | permissive | ijl/orjson | 91e620f1e68e985064a68e77569b56ff378637ea | d1cd27e29c8df2768be016071d0800a92d120786 | refs/heads/master | 2023-08-31T05:10:58.385975 | 2023-08-29T12:29:42 | 2023-08-29T13:05:57 | 158,618,772 | 4,895 | 243 | Apache-2.0 | 2023-09-08T00:40:34 | 2018-11-21T23:43:14 | Python | UTF-8 | Python | false | false | 6,187 | py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
Tests files from http://json.org/JSON_checker/
"""
import pytest
import orjson
from .util import read_fixture_str
PATTERN_1 = '["JSON Test Pattern pass1",{"object with 1 member":["array with 1 element"]},{},[],-42,true,false,null,{"integer":1234567890,"real":-9876.54321,"e":1.23456789e-13,"E":1.23456789e34,"":2.3456789012e76,"zero":0,"one":1,"space":" ","quote":"\\"","backslash":"\\\\","controls":"\\b\\f\\n\\r\\t","slash":"/ & /","alpha":"abcdefghijklmnopqrstuvwyz","ALPHA":"ABCDEFGHIJKLMNOPQRSTUVWYZ","digit":"0123456789","0123456789":"digit","special":"`1~!@#$%^&*()_+-={\':[,]}|;.</>?","hex":"ģ䕧覫췯ꯍ\uef4a","true":true,"false":false,"null":null,"array":[],"object":{},"address":"50 St. James Street","url":"http://www.JSON.org/","comment":"// /* <!-- --","# -- --> */":" "," s p a c e d ":[1,2,3,4,5,6,7],"compact":[1,2,3,4,5,6,7],"jsontext":"{\\"object with 1 member\\":[\\"array with 1 element\\"]}","quotes":"" \\" %22 0x22 034 "","/\\\\\\"쫾몾ꮘﳞ볚\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?":"A key can be any string"},0.5,98.6,99.44,1066,10.0,1.0,0.1,1.0,2.0,2.0,"rosebud"]'.encode()
class TestJsonChecker:
def _run_fail_json(self, filename, exc=orjson.JSONDecodeError):
data = read_fixture_str(filename, "jsonchecker")
pytest.raises(exc, orjson.loads, data)
def _run_pass_json(self, filename, match=""):
data = read_fixture_str(filename, "jsonchecker")
assert orjson.dumps(orjson.loads(data)) == match
def test_fail01(self):
"""
fail01.json
"""
self._run_pass_json(
"fail01.json",
b'"A JSON payload should be an object or array, not a string."',
)
def test_fail02(self):
"""
fail02.json
"""
self._run_fail_json("fail02.json", orjson.JSONDecodeError) # EOF
def test_fail03(self):
"""
fail03.json
"""
self._run_fail_json("fail03.json")
def test_fail04(self):
"""
fail04.json
"""
self._run_fail_json("fail04.json")
def test_fail05(self):
"""
fail05.json
"""
self._run_fail_json("fail05.json")
def test_fail06(self):
"""
fail06.json
"""
self._run_fail_json("fail06.json")
def test_fail07(self):
"""
fail07.json
"""
self._run_fail_json("fail07.json")
def test_fail08(self):
"""
fail08.json
"""
self._run_fail_json("fail08.json")
def test_fail09(self):
"""
fail09.json
"""
self._run_fail_json("fail09.json")
def test_fail10(self):
"""
fail10.json
"""
self._run_fail_json("fail10.json")
def test_fail11(self):
"""
fail11.json
"""
self._run_fail_json("fail11.json")
def test_fail12(self):
"""
fail12.json
"""
self._run_fail_json("fail12.json")
def test_fail13(self):
"""
fail13.json
"""
self._run_fail_json("fail13.json")
def test_fail14(self):
"""
fail14.json
"""
self._run_fail_json("fail14.json")
def test_fail15(self):
"""
fail15.json
"""
self._run_fail_json("fail15.json")
def test_fail16(self):
"""
fail16.json
"""
self._run_fail_json("fail16.json")
def test_fail17(self):
"""
fail17.json
"""
self._run_fail_json("fail17.json")
def test_fail18(self):
"""
fail18.json
"""
self._run_pass_json(
"fail18.json", b'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]'
)
def test_fail19(self):
"""
fail19.json
"""
self._run_fail_json("fail19.json")
def test_fail20(self):
"""
fail20.json
"""
self._run_fail_json("fail20.json")
def test_fail21(self):
"""
fail21.json
"""
self._run_fail_json("fail21.json")
def test_fail22(self):
"""
fail22.json
"""
self._run_fail_json("fail22.json")
def test_fail23(self):
"""
fail23.json
"""
self._run_fail_json("fail23.json")
def test_fail24(self):
"""
fail24.json
"""
self._run_fail_json("fail24.json")
def test_fail25(self):
"""
fail25.json
"""
self._run_fail_json("fail25.json")
def test_fail26(self):
"""
fail26.json
"""
self._run_fail_json("fail26.json")
def test_fail27(self):
"""
fail27.json
"""
self._run_fail_json("fail27.json")
def test_fail28(self):
"""
fail28.json
"""
self._run_fail_json("fail28.json")
def test_fail29(self):
"""
fail29.json
"""
self._run_fail_json("fail29.json")
def test_fail30(self):
"""
fail30.json
"""
self._run_fail_json("fail30.json")
def test_fail31(self):
"""
fail31.json
"""
self._run_fail_json("fail31.json")
def test_fail32(self):
"""
fail32.json
"""
self._run_fail_json("fail32.json", orjson.JSONDecodeError) # EOF
def test_fail33(self):
"""
fail33.json
"""
self._run_fail_json("fail33.json")
def test_pass01(self):
"""
pass01.json
"""
self._run_pass_json("pass01.json", PATTERN_1)
def test_pass02(self):
"""
pass02.json
"""
self._run_pass_json(
"pass02.json", b'[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]]'
)
def test_pass03(self):
"""
pass03.json
"""
self._run_pass_json(
"pass03.json",
b'{"JSON Test Pattern pass3":{"The outermost value":"must be '
b'an object or array.","In this test":"It is an object."}}',
)
| [
"ijl@mailbox.org"
] | ijl@mailbox.org |
e71a1663ae44c868dbd627ad3114ac41adc06bc0 | b99195cf2d181dec5c31aa7e58d747f474153802 | /Dictionary/Built-in Functions with Dictionary.py | cbca42c09ab3385fa84cac09cd510faf64f700a9 | [] | no_license | eldadpuzach/MyPythonProjects | b1b4d56a822fd781c7c4c7a9e4bb5408c180c187 | 3a961a7c265caf1369067d98e94564f01f1bde74 | refs/heads/master | 2020-03-20T18:07:43.319331 | 2019-02-13T22:07:10 | 2019-02-13T22:07:10 | 137,570,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | # Built-in Functions with Dictionary
#
# Built-in functions like all(), any(), len(), cmp(), sorted() etc. are commonly used with dictionary to perform different tasks.
# Built-in Functions with Dictionary Function Description
# all() Return True if all keys of the dictionary are true (or if the dictionary is empty).
# any() Return True if any key of the dictionary is true. If the dictionary is empty, return False.
# len() Return the length (the number of items) in the dictionary.
# cmp() Compares items of two dictionaries.
# sorted() Return a new sorted list of keys in the dictionary. | [
"eldadpuzach@gmail.com"
] | eldadpuzach@gmail.com |
f6450232aaf3f0568032b2d87c7fa644d8ab19c9 | 0c281ba9bb634d518536eea03059cdb05ba32cc5 | /many_to_one/migrations/0002_wpsuser.py | 1c896f777d6f0af5d69c690885ff05aa7fbc7677 | [] | no_license | parkhongbeen/Practice-Document | c09ef3a64d64c2067604b0c5f5d6fcabd96a8e50 | 441c1c1b21c6b66e1a01eea7487f6dffdfd98e58 | refs/heads/master | 2020-12-02T16:47:20.725694 | 2020-01-04T09:30:20 | 2020-01-04T09:30:20 | 231,061,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | # Generated by Django 2.2.9 on 2020-01-02 05:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('many_to_one', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='WPSUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('instructor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='many_to_one.WPSUser')),
],
),
]
| [
"pack122@naver.com"
] | pack122@naver.com |
359e4313843a6aecb4d74aa0c3a945078ffed7bf | 893f83189700fefeba216e6899d42097cc0bec70 | /webpage/lib/python3.5/site-packages/matplotlib/tests/test_backend_pdf.py | 2feee6fb1238014212e65e61b693ae6c6637cac5 | [
"MIT"
] | permissive | pseudoPixels/SciWorCS | 79249198b3dd2a2653d4401d0f028f2180338371 | e1738c8b838c71b18598ceca29d7c487c76f876b | refs/heads/master | 2021-06-10T01:08:30.242094 | 2018-12-06T18:53:34 | 2018-12-06T18:53:34 | 140,774,351 | 0 | 1 | MIT | 2021-06-01T22:23:47 | 2018-07-12T23:33:53 | Python | UTF-8 | Python | false | false | 4,488 | py | # -*- encoding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import io
import os
import numpy as np
from matplotlib import cm, rcParams
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
from matplotlib.testing.decorators import (image_comparison, knownfailureif,
cleanup)
if 'TRAVIS' not in os.environ:
@image_comparison(baseline_images=['pdf_use14corefonts'],
extensions=['pdf'])
def test_use14corefonts():
rcParams['pdf.use14corefonts'] = True
rcParams['font.family'] = 'sans-serif'
rcParams['font.size'] = 8
rcParams['font.sans-serif'] = ['Helvetica']
rcParams['pdf.compression'] = 0
text = '''A three-line text positioned just above a blue line
and containing some French characters and the euro symbol:
"Merci pépé pour les 10 €"'''
@cleanup
def test_type42():
rcParams['pdf.fonttype'] = 42
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
fig.savefig(io.BytesIO())
@cleanup
def test_multipage_pagecount():
with PdfPages(io.BytesIO()) as pdf:
assert pdf.get_pagecount() == 0
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
fig.savefig(pdf, format="pdf")
assert pdf.get_pagecount() == 1
pdf.savefig()
assert pdf.get_pagecount() == 2
@cleanup
def test_multipage_keep_empty():
from matplotlib.backends.backend_pdf import PdfPages
from tempfile import NamedTemporaryFile
# test empty pdf files
# test that an empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
assert os.path.exists(filename)
os.remove(filename)
# test if an empty pdf is deleting itself afterwards with keep_empty=False
with PdfPages(filename, keep_empty=False) as pdf:
pass
assert not os.path.exists(filename)
# test pdf files with content, they should never be deleted
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
# test that a non-empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
# test that a non-empty pdf is left behind with keep_empty=False
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp, keep_empty=False) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
@cleanup
def test_composite_image():
#Test that figures can be saved with and without combining multiple images
#(on a single set of axes) into a single composite image.
X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))
Z = np.sin(Y ** 2)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(0, 3)
ax.imshow(Z, extent=[0, 1, 0, 1])
ax.imshow(Z[::-1], extent=[2, 3, 0, 1])
plt.rcParams['image.composite_image'] = True
with PdfPages(io.BytesIO()) as pdf:
fig.savefig(pdf, format="pdf")
assert len(pdf._file._images.keys()) == 1
plt.rcParams['image.composite_image'] = False
with PdfPages(io.BytesIO()) as pdf:
fig.savefig(pdf, format="pdf")
assert len(pdf._file._images.keys()) == 2
@image_comparison(baseline_images=['hatching_legend'],
extensions=['pdf'])
def test_hatching_legend():
"""Test for correct hatching on patches in legend"""
fig = plt.figure(figsize=(1, 2))
a = plt.Rectangle([0, 0], 0, 0, facecolor="green", hatch="XXXX")
b = plt.Rectangle([0, 0], 0, 0, facecolor="blue", hatch="XXXX")
fig.legend([a, b, a, b], ["", "", "", ""])
@image_comparison(baseline_images=['grayscale_alpha'],
extensions=['pdf'])
def test_grayscale_alpha():
"""Masking images with NaN did not work for grayscale images"""
x, y = np.ogrid[-2:2:.1, -2:2:.1]
dd = np.exp(-(x**2 + y**2))
dd[dd < .1] = np.nan
fig, ax = plt.subplots()
ax.imshow(dd, interpolation='none', cmap='gray_r')
ax.set_xticks([])
ax.set_yticks([])
| [
"golam.mostaeen@usask.ca"
] | golam.mostaeen@usask.ca |
54b4d3130aa1d007ebb68af85127de4f74fe2589 | 18b7f6e6a64ff4e33202f4c647d33240bf8ce015 | /Graph/Baek_1926.py | 3607ad4f55f10014997e3712a5551d0bbfe15fa5 | [] | no_license | qorjiwon/LevelUp-Algorithm | 80734b88e2543fb4b6da48377bb31b70d972b448 | 62a71552427290361e6ade9dcfe3ffc90a9d86e2 | refs/heads/master | 2023-06-16T16:33:44.427818 | 2021-03-12T14:39:25 | 2021-03-12T14:39:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | """
@ Baek 1926. 그림
@ Prob. https://www.acmicpc.net/problem/1926
Ref.
@ Algo: 그래프(BFS)
@ Start day: 20. 03. 18.
@ End day: 20. 03. 18.
"""
from collections import deque
dx = [0, 0, -1, 1]
dy = [1, -1, 0, 0]
def BFS():
t = 1
q.append((i, j))
check[i][j] = 1
while q:
x, y = q.popleft()
for k in range(4):
nx, ny = x + dx[k], y + dy[k]
if 0 <= nx < N and 0 <= ny < M and MAP[nx][ny] == 1 and check[nx][ny] == 0:
check[nx][ny] = 1
q.append((nx, ny))
t += 1
return t
# N: 세로 M: 가로
N, M = map(int, input().split())
MAP = [list(map(int, input().split())) for _ in range(N)]
check = [[0] * M for _ in range(N)]
q = deque()
num_of_pic = 0
maxV = 0
for i in range(N):
for j in range(M):
if check[i][j] == 0 and MAP[i][j] == 1:
num_of_pic += 1
ret = BFS()
if ret > maxV: maxV = ret
print(num_of_pic)
print(maxV)
"""
6 5
1 1 0 1 1
0 1 1 0 0
0 0 0 0 0
1 0 1 1 1
0 0 1 1 1
0 0 1 1 1
>
4
9
""" | [
"21300035@handong.edu"
] | 21300035@handong.edu |
916af822d50ca0fafb9a6c3f5bb98ced51dfb76c | f8e8e365c9cf58b61d72655bc2340baeaed5baff | /Leetcode/Python Solutions/Math/ReachingPoints.py | 0ac3289c31b1e8936a326fe489c9a2da788ad09e | [
"MIT"
] | permissive | Mostofa-Najmus-Sakib/Applied-Algorithm | 39a69f6b9ed113efe4a420d19cad79e0aa317637 | bc656fd655617407856e0ce45b68585fa81c5035 | refs/heads/master | 2023-08-31T19:54:34.242559 | 2021-11-05T03:43:35 | 2021-11-05T03:43:35 | 412,263,430 | 0 | 0 | MIT | 2021-09-30T23:45:29 | 2021-09-30T23:45:25 | null | UTF-8 | Python | false | false | 865 | py | """
LeetCode Problem: 780. Reaching Points
Link: https://leetcode.com/problems/reaching-points/
Language: Python
Written by: Mostofa Adib Shakib
Time Complexity: O(max(tx,ty))
Space Complexity: O(1)
"""
class Solution:
def reachingPoints(self, sx: int, sy: int, tx: int, ty: int) -> bool:
# If tx is smaller than sx or if ty is smaller than sy, then there is no point in calculating as the answer would be False
if sx > tx or sy > ty:
return False
if sx == tx:
return (ty-sy)%sx == 0 # only change y
if sy == ty:
return (tx-sx)%sy == 0
if tx > ty:
return self.reachingPoints(sx, sy, tx%ty, ty) # make sure tx%ty < ty
elif tx < ty:
return self.reachingPoints(sx, sy, tx, ty%tx)
else:
return False | [
"adibshakib@gmail.com"
] | adibshakib@gmail.com |
8395fc7c7f92a208f8793b9f7b48c21bed9967d0 | 350db570521d3fc43f07df645addb9d6e648c17e | /0301_Remove_Invalid_Parentheses/solution_test.py | 2d7c1958b23dcdec8d5d9905ff2bd12cb4237244 | [] | no_license | benjaminhuanghuang/ben-leetcode | 2efcc9185459a1dd881c6e2ded96c42c5715560a | a2cd0dc5e098080df87c4fb57d16877d21ca47a3 | refs/heads/master | 2022-12-10T02:30:06.744566 | 2022-11-27T04:06:52 | 2022-11-27T04:06:52 | 236,252,145 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 384 | py |
'''
301. Remove Invalid Parentheses
Level: Hard
https://leetcode.com/problems/remove-invalid-parentheses
'''
import unittest
class TestSum(unittest.TestCase):
def test_sum(self):
self.assertEqual(sum([1, 2, 3]), 6, "Should be 6")
def test_sum_tuple(self):
self.assertEqual(sum((1, 2, 2)), 6, "Should be 6")
if __name__ == '__main__':
unittest.main() | [
"bhuang@rms.com"
] | bhuang@rms.com |
0f3b58683b7e2728255fccc69cf4195878792e65 | 28de04457e8ebcd1b34494db07bde8a3f25d8cf1 | /easy/symmetric_tree_101.py | 4480cfb339a9ab97cd6ab801aba0f4c8d142ae4b | [] | no_license | YangXinNewlife/LeetCode | 1df4218eef6b81db81bf2f0548d0a18bc9a5d672 | 20d3d0aa325d79c716acfc75daef32f8d4f9f1ad | refs/heads/master | 2023-08-16T23:18:29.776539 | 2023-08-15T15:53:30 | 2023-08-15T15:53:30 | 70,552,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py | # -*- coding:utf-8 -*-
__author__ = 'yangxin_ryan'
"""
Given a binary tree, check whether it is a mirror of itself (ie, symmetric around its center).
For example, this binary tree [1,2,2,3,4,4,3] is symmetric:
1
/ \
2 2
/ \ / \
3 4 4 3
But the following [1,2,2,null,3,null,3] is not:
1
/ \
2 2
\ \
3 3
Note:
Bonus points if you could solve it both recursively and iteratively.
Solution:
怎么判断一颗二叉树是Symmetric对称树
1.当root根节点为None的时候,为对称树。
2.当root根节点为非空,其左右孩子为None的时候,为对称树。
3.当root根节点为非空,其左或右孩子为None当时候,为非对称树。
4.当root根节点为非空,其左右孩子非空,但是左右孩子的val不相等的时候,为非对称树。
5.当root根节点为非空,其左右孩子非空,其左右孩子的val相等,那么再次调用函数去对比该左右孩子的左右孩子是否对称。
"""
class SymmetricTree(object):
def isSymmetric(self, root: TreeNode) -> bool:
"""
Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
:param root:
:return:
"""
if root is None:
return True
return self.judge(root.left, root.right)
def judge(self, left, right):
if left is None and right is None:
return True
if left is None or right is None:
return False
if left.val != right.val:
return False
return self.judge(left.left, right.right) and self.judge(left.right, right.left)
| [
"yangxin03@youxin.com"
] | yangxin03@youxin.com |
fd63cd6e400939b33de938e5de18673f74a5eae9 | f3e1423c27467e2501a5443a8767a40141752acc | /rx/core/operators/concat.py | 6c9eb5d63777ff846850ba55cd0d0a5d577af32e | [
"MIT"
] | permissive | py-lab/RxPY | a08a16cbb381aed08485e0e2c36098884f3ba903 | ce12560f9481dbd0d072911e12ff2ed30be328bf | refs/heads/master | 2020-08-09T10:25:38.111773 | 2019-10-05T20:44:32 | 2019-10-05T20:44:32 | 214,068,251 | 0 | 1 | MIT | 2019-10-10T02:28:10 | 2019-10-10T02:28:09 | null | UTF-8 | Python | false | false | 605 | py | from typing import Callable
import rx
from rx.core import Observable
def _concat(*sources: Observable) -> Callable[[Observable], Observable]:
def concat(source: Observable) -> Observable:
"""Concatenates all the observable sequences.
Examples:
>>> op = concat(xs, ys, zs)
Returns:
An operator function that takes one or more observable sources and
returns an observable sequence that contains the elements of
each given sequence, in sequential order.
"""
return rx.concat(source, *sources)
return concat
| [
"dag@brattli.net"
] | dag@brattli.net |
af832263b949a29bb698b61d332031527b2055fb | 94560fcfd85bf81c326063ff035c593b2793863c | /asap/scripts/um_downsample_particles.py | 5e3e8b0b886b003c2d525ce23333a04b8689f99c | [
"Unlicense"
] | permissive | dr-guangtou/asap | 783a0607aea631c7d56ea9142e9e4f8505c3eac4 | 4b796b9708ee1a1d854d4ddf6d5c6e811941f55e | refs/heads/master | 2021-03-27T19:38:44.986573 | 2020-04-02T21:25:28 | 2020-04-02T21:25:28 | 111,163,115 | 2 | 0 | Unlicense | 2019-11-06T22:02:05 | 2017-11-18T00:20:30 | Jupyter Notebook | UTF-8 | Python | false | false | 2,339 | py | #!/usr/bin/env python
"""This script will read the dark matter particle table for the SMDPL
simulation, and downsample it for our model.
"""
import os
import argparse
import numpy as np
import pandas as pd
def downsample_particles(ptbl_file, n_million, seed=95064, csv=False, verbose=True):
"""Down-sample the partile files from the DM simulation."""
if not os.path.isfile(ptbl_file):
raise IOError("# Can not find the particle table : %s" % ptbl_file)
ptbl_pre, ptbl_ext = os.path.splitext(ptbl_file)
# Reduce the number of colunms and save as a numpy array
ptbl_out = ptbl_pre + "_downsample_%.1fm.npy" % n_million
if verbose:
print("# Save the downsampled catalog to : %s" % ptbl_out)
# Data format for output
particle_table_dtype = [
("x", "float64"), ("y", "float64"), ("z", "float64")]
if csv or ptbl_ext == '.csv':
use_csv = True
else:
use_csv = False
# Read the data
chunksize = 1000000
ptbl_pchunks = pd.read_csv(
ptbl_file, usecols=[0, 1, 2], delim_whitespace=use_csv,
names=['x', 'y', 'z', 'vx', 'vy', 'vz', 'id'],
dtype=particle_table_dtype, index_col=False,
chunksize=chunksize)
ptbl_pdframe = pd.concat(ptbl_pchunks)
ptbl_array = ptbl_pdframe.values.ravel().view(dtype=particle_table_dtype)
# Downsample
np.random.seed(seed)
ptbl_downsample = np.random.choice(ptbl_array, int(n_million * 1e6), replace=False)
# Save the result
np.save(ptbl_out, ptbl_downsample)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'ptbl_file', type=str,
help=('The particle catalog.'))
parser.add_argument(
'n_million', type=float,
help=('Downsample the catalog to N x millions particles.'))
parser.add_argument(
'-s', '--seed', dest='seed',
help='Random seed',
type=int, default=95064)
parser.add_argument(
'-v', '--verbose', dest='verbose',
action="store_true", default=False)
parser.add_argument(
'-c', '--csv', dest='csv',
action="store_true", default=False)
args = parser.parse_args()
downsample_particles(args.ptbl_file, args.n_million,
csv=args.csv, seed=args.seed, verbose=args.verbose)
| [
"dr.guangtou@gmail.com"
] | dr.guangtou@gmail.com |
81a3b8cf4112ab531cb3cfba9a91eded7429840b | fb1e852da0a026fb59c8cb24aeb40e62005501f1 | /kosmos-2/fairseq/fairseq/data/id_dataset.py | 3e4d7969cf2a26e852b466f165a6fadabae3b35f | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | microsoft/unilm | 134aa44867c5ed36222220d3f4fd9616d02db573 | b60c741f746877293bb85eed6806736fc8fa0ffd | refs/heads/master | 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 | MIT | 2023-08-19T11:33:20 | 2019-07-23T04:15:28 | Python | UTF-8 | Python | false | false | 423 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import FairseqDataset
class IdDataset(FairseqDataset):
def __getitem__(self, index):
return index
def __len__(self):
return 0
def collater(self, samples):
return torch.tensor(samples)
| [
"1083127130@qq.com"
] | 1083127130@qq.com |
5a166c86274117ed654161b9db4ec8f72c03974e | d48aeeac74c02ae90d48c0994105027cee596f28 | /backend/bitter_fog_29287/wsgi.py | c1d64d470c8e7a5f73d506bb8bbeefaf596af81e | [] | no_license | crowdbotics-apps/bitter-fog-29287 | 6864b7668d09879d812d7438c580710b18c9958a | b24816c199dd45886c6ae563190fe1be731538be | refs/heads/master | 2023-07-02T14:52:19.004509 | 2021-07-30T15:43:54 | 2021-07-30T15:43:54 | 391,115,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for bitter_fog_29287 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bitter_fog_29287.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
54e745ef71add4a8f93c5f04df6b32f38c5f29c8 | ee1dc4708fe2dbff1d528baf589255d8d39e56c0 | /0x0B-python-input_output/2-main.py | 64e107c075d3ce32ecbd0133c299ffd95538ae50 | [] | no_license | agzsoftsi/holbertonschool-higher_level_programming | f267991d6a917b9fc9dbd2f639394e9585bf33b6 | 89e37450d24e200cde3b29cde20b161e75723805 | refs/heads/master | 2021-07-03T15:16:30.381714 | 2021-03-01T16:36:11 | 2021-03-01T16:36:11 | 226,884,471 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | #!/usr/bin/python3
read_lines = __import__('2-read_lines').read_lines
print("1 line:")
read_lines("my_file_0.txt", 1)
print("--")
print("3 lines:")
read_lines("my_file_0.txt", 3)
print("--")
print("Full content:")
read_lines("my_file_0.txt")
| [
"agzsoftsi@gmail.com"
] | agzsoftsi@gmail.com |
dabd42749c4135637d3896146d27573906923be0 | 69d8e789b289edfeb2fc18d0ef1c395bde9fb375 | /minDepth_111.py | 0366cd7558f4835563f6d3285240573c83501c3b | [] | no_license | Lucces/leetcode | d2db842eae7cdf1d7b9c56844660eb6f1940d88a | 2c3dbcbcb20cfdb276c0886e0193ef42551c5747 | refs/heads/master | 2021-01-19T05:59:04.712086 | 2016-08-29T13:33:00 | 2016-08-29T13:33:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | #!/usr/bin/env python
# coding=utf-8
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
global min_depth
min_depth = float("Inf")
if root == None:
return 0
inorder_tree_walk(root, 0)
return min_depth
def inorder_tree_walk(node, depth):
global min_depth
depth += 1
if node.left == None and node.right == None:
if depth < min_depth:
min_depth = depth
if node.left != None:
inorder_tree_walk(node.left, depth)
if node.right != None:
inorder_tree_walk(node.right, depth)
| [
"cntqrxj@gmail.com"
] | cntqrxj@gmail.com |
a65d37fbf1e635195b611f139195ad869fb87991 | 5b34d998c7798b7cc1068680d89a977151c66c1a | /test_appium/testcase/test_search.py | eeeaf76cfc58728e01b79222be8efedc5438455b | [] | no_license | Hanlen520/Hogwarts_11 | e79a0de2508dd6801d46893271bcbc239edff3e8 | 805ee2586a25da1e710ba3acf63b5591dd76fcc6 | refs/heads/master | 2022-07-18T11:03:09.692778 | 2020-04-13T14:57:49 | 2020-04-13T14:57:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Project : Hogwarts_11
@File : test_search.py
@Time : 2020-03-30 10:30:18
@Author : indeyo_lin
"""
import pytest
from test_appium.page.app import App
class TestSearch:
def setup(self):
self.page = App().start().main().goto_search()
def test_search(self):
price = self.page.search("alibaba").get_price("BABA")
assert price < 200
@pytest.mark.parametrize("stock, stock_type, price", [
("alibaba", "BABA", 200),
("JD", "JD", 20)
])
def test_search_type(self, stock, stock_type, price):
assert self.page.search(stock).get_price(stock_type) > price
def test_add_stock(self):
assert "已添加" in self.page.search("alibaba").add_stock().get_msg() | [
"indeyo@git.com"
] | indeyo@git.com |
812800e0ba2f557721a2771d61de9304ab8789cc | 3a1be455fc5e117bd8792ed46c59793f8b29a01f | /python/paddle/fluid/tests/unittests/ipu/test_greater_op_ipu.py | 934ad1014282703a4660e25725015fa588bb379a | [
"Apache-2.0"
] | permissive | liyancas/Paddle | 42d5e7c71c37b4e63bf54e6e31e82e40aef044ce | 98303291d27cb831b19111d82793159cbe9a85ca | refs/heads/develop | 2022-05-21T03:27:16.497238 | 2022-04-01T00:52:17 | 2022-04-01T00:52:17 | 72,499,865 | 0 | 0 | Apache-2.0 | 2022-02-11T08:16:37 | 2016-11-01T03:17:41 | Python | UTF-8 | Python | false | false | 5,422 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestGreaterThan(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_test_op()
@property
def fp16_enabled(self):
return True
def set_test_op(self):
self.op = paddle.fluid.layers.greater_than
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = self.op(x, y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def run_test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten().astype(np.int32)
self.check(output_dict)
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_data_feed0(self):
x = np.random.randn(3, 4, 5)
y = np.random.randn(3, 4, 5)
self.feed_fp32 = {
"x": x.astype(np.float32),
"y": y.astype(np.float32),
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"y": y.astype(np.float16),
}
self.set_feed_attr()
def set_data_feed1(self):
x = np.ones([1, 10])
y = np.ones([10])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
self.set_feed_attr()
def set_data_feed2(self):
x = np.ones([1, 10])
y = np.zeros([1, 10])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
self.set_feed_attr()
def set_data_feed3(self):
x = np.zeros([1, 10])
y = np.ones([1, 10])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
self.set_feed_attr()
def test_case0(self):
self.set_data_feed0()
self.set_op_attrs()
self.run_test_base()
def test_case1(self):
self.set_data_feed1()
self.set_op_attrs()
self.run_test_base()
def test_case2(self):
self.set_data_feed2()
self.set_op_attrs()
self.run_test_base()
def test_case3(self):
self.set_data_feed3()
self.set_op_attrs()
self.run_test_base()
class TestLessThan(TestGreaterThan):
def set_test_op(self):
self.op = paddle.fluid.layers.less_than
class TestEqual(TestGreaterThan):
def set_test_op(self):
self.op = paddle.fluid.layers.equal
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | liyancas.noreply@github.com |
3d8f2df75d1761a1b24f57e53f4640e6fa499b23 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_117/ch27_2019_03_08_12_32_17_139310.py | beac39a68b2338e79d5a49e9521b451eff51529b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | cigarros=int(input("quantos cigarros fuma por dia?"))
anos=int(input("quantos anos?"))
Tempo_de_vida_perdido=((int(cigarros)*365)*int(anos))*10
print (Tempo_de_vida_perdido)
| [
"you@example.com"
] | you@example.com |
ebdec2b196300b6b5d65b0d2260ebb5f1d6fe028 | 7889f7f0532db6a7f81e6f8630e399c90438b2b9 | /1.5.0/mpl_examples/api/filled_step.py | 42d61dc917668293fb8e7bf059ba464eed8925e2 | [] | no_license | matplotlib/matplotlib.github.com | ef5d23a5bf77cb5af675f1a8273d641e410b2560 | 2a60d39490941a524e5385670d488c86083a032c | refs/heads/main | 2023-08-16T18:46:58.934777 | 2023-08-10T05:07:57 | 2023-08-10T05:08:30 | 1,385,150 | 25 | 59 | null | 2023-08-30T15:59:50 | 2011-02-19T03:27:35 | null | UTF-8 | Python | false | false | 6,444 | py | import itertools
from functools import partial
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from cycler import cycler
from six.moves import zip
def filled_hist(ax, edges, values, bottoms=None, orientation='v',
**kwargs):
"""
Draw a histogram as a stepped patch.
Extra kwargs are passed through to `fill_between`
Parameters
----------
ax : Axes
The axes to plot to
edges : array
A length n+1 array giving the left edges of each bin and the
right edge of the last bin.
values : array
A length n array of bin counts or values
bottoms : scalar or array, optional
A length n array of the bottom of the bars. If None, zero is used.
orientation : {'v', 'h'}
Orientation of the histogram. 'v' (default) has
the bars increasing in the positive y-direction.
Returns
-------
ret : PolyCollection
Artist added to the Axes
"""
print(orientation)
if orientation not in set('hv'):
raise ValueError("orientation must be in {'h', 'v'} "
"not {o}".format(o=orientation))
kwargs.setdefault('step', 'post')
edges = np.asarray(edges)
values = np.asarray(values)
if len(edges) - 1 != len(values):
raise ValueError('Must provide one more bin edge than value not: '
'len(edges): {lb} len(values): {lv}'.format(
lb=len(edges), lv=len(values)))
if bottoms is None:
bottoms = np.zeros_like(values)
if np.isscalar(bottoms):
bottoms = np.ones_like(values) * bottoms
values = np.r_[values, values[-1]]
bottoms = np.r_[bottoms, bottoms[-1]]
if orientation == 'h':
return ax.fill_betweenx(edges, values, bottoms, **kwargs)
elif orientation == 'v':
return ax.fill_between(edges, values, bottoms, **kwargs)
else:
raise AssertionError("you should never be here")
def stack_hist(ax, stacked_data, sty_cycle, bottoms=None,
hist_func=None, labels=None,
plot_func=None, plot_kwargs=None):
"""
ax : axes.Axes
The axes to add artists too
stacked_data : array or Mapping
A (N, M) shaped array. The first dimension will be iterated over to
compute histograms row-wise
sty_cycle : Cycler or operable of dict
Style to apply to each set
bottoms : array, optional
The initial positions of the bottoms, defaults to 0
hist_func : callable, optional
Must have signature `bin_vals, bin_edges = f(data)`.
`bin_edges` expected to be one longer than `bin_vals`
labels : list of str, optional
The label for each set.
If not given and stacked data is an array defaults to 'default set {n}'
If stacked_data is a mapping, and labels is None, default to the keys
(which may come out in a random order).
If stacked_data is a mapping and labels is given then only
the columns listed by be plotted.
plot_func : callable, optional
Function to call to draw the histogram must have signature:
ret = plot_func(ax, edges, top, bottoms=bottoms,
label=label, **kwargs)
plot_kwargs : dict, optional
Any extra kwargs to pass through to the plotting function. This
will be the same for all calls to the plotting function and will
over-ride the values in cycle.
Returns
-------
arts : dict
Dictionary of artists keyed on their labels
"""
# deal with default binning function
if hist_func is None:
hist_func = np.histogram
# deal with default plotting function
if plot_func is None:
plot_func = filled_hist
# deal with default
if plot_kwargs is None:
plot_kwargs = {}
print(plot_kwargs)
try:
l_keys = stacked_data.keys()
label_data = True
if labels is None:
labels = l_keys
except AttributeError:
label_data = False
if labels is None:
labels = itertools.repeat(None)
if label_data:
loop_iter = enumerate((stacked_data[lab], lab, s) for lab, s in
zip(labels, sty_cycle))
else:
loop_iter = enumerate(zip(stacked_data, labels, sty_cycle))
arts = {}
for j, (data, label, sty) in loop_iter:
if label is None:
label = 'dflt set {n}'.format(n=j)
label = sty.pop('label', label)
vals, edges = hist_func(data)
if bottoms is None:
bottoms = np.zeros_like(vals)
top = bottoms + vals
print(sty)
sty.update(plot_kwargs)
print(sty)
ret = plot_func(ax, edges, top, bottoms=bottoms,
label=label, **sty)
bottoms = top
arts[label] = ret
ax.legend(fontsize=10)
return arts
# set up histogram function to fixed bins
edges = np.linspace(-3, 3, 20, endpoint=True)
hist_func = partial(np.histogram, bins=edges)
# set up style cycles
color_cycle = cycler('facecolor', 'rgbm')
label_cycle = cycler('label', ['set {n}'.format(n=n) for n in range(4)])
hatch_cycle = cycler('hatch', ['/', '*', '+', '|'])
# make some synthetic data
stack_data = np.random.randn(4, 12250)
dict_data = {lab: d for lab, d in zip(list(c['label'] for c in label_cycle),
stack_data)}
# work with plain arrays
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4.5), tight_layout=True)
arts = stack_hist(ax1, stack_data, color_cycle + label_cycle + hatch_cycle,
hist_func=hist_func)
arts = stack_hist(ax2, stack_data, color_cycle,
hist_func=hist_func,
plot_kwargs=dict(edgecolor='w', orientation='h'))
ax1.set_ylabel('counts')
ax1.set_xlabel('x')
ax2.set_xlabel('counts')
ax2.set_ylabel('x')
# work with labeled data
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4.5),
tight_layout=True, sharey=True)
arts = stack_hist(ax1, dict_data, color_cycle + hatch_cycle,
hist_func=hist_func)
arts = stack_hist(ax2, dict_data, color_cycle + hatch_cycle,
hist_func=hist_func, labels=['set 0', 'set 3'])
ax1.xaxis.set_major_locator(mticker.MaxNLocator(5))
ax1.set_xlabel('counts')
ax1.set_ylabel('x')
ax2.set_ylabel('x')
| [
"quantum.analyst@gmail.com"
] | quantum.analyst@gmail.com |
d8d1483f9f79e9e32c3b1a3028bd7eea445cce5b | b2cefb7a2a83aa93ee1b15a780b5ddf6c498215b | /nemo/collections/asr/parts/utils/rnnt_utils.py | 4b91eace8ad6f61848e401d3efd980c132c03d99 | [
"Apache-2.0"
] | permissive | VahidooX/NeMo | bfde8c9b48c818342a9c6290fb9dee62fafeca38 | 866cc3f66fab3a796a6b74ef7a9e362c2282a976 | refs/heads/main | 2023-07-23T19:13:39.948228 | 2022-04-29T21:51:54 | 2022-04-29T21:51:54 | 227,733,473 | 1 | 2 | Apache-2.0 | 2022-09-15T15:30:13 | 2019-12-13T01:55:21 | Jupyter Notebook | UTF-8 | Python | false | false | 5,863 | py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
@dataclass
class Hypothesis:
"""Hypothesis class for beam search algorithms.
score: A float score obtained from an AbstractRNNTDecoder module's score_hypothesis method.
y_sequence: Either a sequence of integer ids pointing to some vocabulary, or a packed torch.Tensor
behaving in the same manner. dtype must be torch.Long in the latter case.
dec_state: A list (or list of list) of LSTM-RNN decoder states. Can be None.
text: (Optional) A decoded string after processing via CTC / RNN-T decoding (removing the CTC/RNNT
`blank` tokens, and optionally merging word-pieces). Should be used as decoded string for
Word Error Rate calculation.
timestep: (Optional) A list of integer indices representing at which index in the decoding
process did the token appear. Should be of same length as the number of non-blank tokens.
alignments: (Optional) Represents the CTC / RNNT token alignments as integer tokens along an axis of
time T (for CTC) or Time x Target (TxU).
For CTC, represented as a single list of integer indices.
For RNNT, represented as a dangling list of list of integer indices.
Outer list represents Time dimension (T), inner list represents Target dimension (U).
The set of valid indices **includes** the CTC / RNNT blank token in order to represent alignments.
length: Represents the length of the sequence (the original length without padding), otherwise
defaults to 0.
y: (Unused) A list of torch.Tensors representing the list of hypotheses.
lm_state: (Unused) A dictionary state cache used by an external Language Model.
lm_scores: (Unused) Score of the external Language Model.
tokens: (Optional) A list of decoded tokens (can be characters or word-pieces.
last_token (Optional): A token or batch of tokens which was predicted in the last step.
"""
score: float
y_sequence: Union[List[int], torch.Tensor]
text: Optional[str] = None
dec_out: Optional[List[torch.Tensor]] = None
dec_state: Optional[Union[List[List[torch.Tensor]], List[torch.Tensor]]] = None
timestep: Union[List[int], torch.Tensor] = field(default_factory=list)
alignments: Optional[Union[List[int], List[List[int]]]] = None
length: Union[int, torch.Tensor] = 0
y: List[torch.tensor] = None
lm_state: Optional[Union[Dict[str, Any], List[Any]]] = None
lm_scores: Optional[torch.Tensor] = None
tokens: Optional[Union[List[int], torch.Tensor]] = None
last_token: Optional[torch.Tensor] = None
@dataclass
class NBestHypotheses:
"""List of N best hypotheses"""
n_best_hypotheses: Optional[List[Hypothesis]]
def is_prefix(x: List[int], pref: List[int]) -> bool:
"""
Obtained from https://github.com/espnet/espnet.
Check if pref is a prefix of x.
Args:
x: Label ID sequence.
pref: Prefix label ID sequence.
Returns:
: Whether pref is a prefix of x.
"""
if len(pref) >= len(x):
return False
for i in range(len(pref)):
if pref[i] != x[i]:
return False
return True
def select_k_expansions(
hyps: List[Hypothesis], logps: torch.Tensor, beam_size: int, gamma: float, beta: int,
) -> List[Tuple[int, Hypothesis]]:
"""
Obtained from https://github.com/espnet/espnet
Return K hypotheses candidates for expansion from a list of hypothesis.
K candidates are selected according to the extended hypotheses probabilities
and a prune-by-value method. Where K is equal to beam_size + beta.
Args:
hyps: Hypotheses.
beam_logp: Log-probabilities for hypotheses expansions.
beam_size: Beam size.
gamma: Allowed logp difference for prune-by-value method.
beta: Number of additional candidates to store.
Return:
k_expansions: Best K expansion hypotheses candidates.
"""
k_expansions = []
for i, hyp in enumerate(hyps):
hyp_i = [(int(k), hyp.score + float(logp)) for k, logp in enumerate(logps[i])]
k_best_exp_val = max(hyp_i, key=lambda x: x[1])
k_best_exp_idx = k_best_exp_val[0]
k_best_exp = k_best_exp_val[1]
expansions = sorted(filter(lambda x: (k_best_exp - gamma) <= x[1], hyp_i), key=lambda x: x[1],)[
: beam_size + beta
]
if len(expansions) > 0:
k_expansions.append(expansions)
else:
k_expansions.append([(k_best_exp_idx, k_best_exp)])
return k_expansions
| [
"noreply@github.com"
] | VahidooX.noreply@github.com |
72c20e2df29fcea30fe8377b3aafcd9722b96b2b | ef3ac1664accfe2f4f28800cb3dde383d04e2636 | /max len when alternating num are even and odd.py | 92e25b7c837f76ffaa71f84ffb3a4f3bebd3ae18 | [] | no_license | Shamabanu/python | 2466b253ead7249147844e22ede9017a2ffb299a | 76350525586b285773edb58912c1ba8eee35d1a6 | refs/heads/master | 2020-03-27T15:45:09.838053 | 2019-08-14T15:06:18 | 2019-08-14T15:06:18 | 146,736,750 | 3 | 6 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | num=input()
m=[]
s=0
for i in range(0,len(num)-1):
k=int(num[i])+int(num[i+1])
if k%2!=0:
s=s+1
else:
m.append(s)
s=0
m.append(s)
n=max(m)
if n==0:
print(0)
else:
print(n+1)
| [
"noreply@github.com"
] | Shamabanu.noreply@github.com |
02fe935d53fe2979fae0f6a73cfa11bc96de96d4 | 52d73c4b6ad70b62000d9d01e3dbab94f1edcb39 | /uiautomator2/settings.py | 7c6aea22b08ddeefc1661a927266e822bf257347 | [
"MIT"
] | permissive | zenjan1/uiautomator2 | ff50abae9bfe7430aea77bbf4431eab472153a8c | 907ea86099719edaec14f802f5182f1a8b359840 | refs/heads/master | 2022-06-04T06:44:07.467582 | 2020-05-03T07:17:14 | 2020-05-03T07:17:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | # coding: utf-8
#
import json
import logging
import pprint
from typing import Any
class Settings(object):
def __init__(self, d):
self._d = d
self._defaults = {
"post_delay": 0, # Deprecated
"wait_timeout": 20.0,
"xpath_debug": False, #self._set_xpath_debug,
"uiautomator_runtest_app_background": True,
"click_after_delay": 0.2,
"click_before_delay": 0.2,
}
self._props = {
"post_delay": (float, int),
"xpath_debug": bool,
}
for k, v in self._defaults.items():
if k not in self._props:
self._props[k] = (float, int) if type(v) in (float, int) else type(v)
def get(self, key: str) -> Any:
return self._defaults.get(key)
def set(self, key: str, val: Any):
if key not in self._props:
raise AttributeError("invalid attribute", key)
if not isinstance(val, self._props[key]):
print(key, self._props[key])
raise TypeError("invalid type, only accept: %r" % self._props[key])
# function call
callback = self._defaults[key]
if callable(callback):
callback(val)
self._defaults[key] = val
def __setitem__(self, key: str, val: Any):
self.set(key, val)
def __getitem__(self, key: str) -> Any:
if key not in self._defaults:
raise RuntimeError("invalid key", key)
return self.get(key)
def __repr__(self):
return pprint.pformat(self._defaults)
# return self._defaults
# if __name__ == "__main__":
# settings = Settings(None)
# settings.set("pre_delay", 10)
# print(settings['pre_delay'])
# settings["post_delay"] = 10
| [
"codeskyblue@gmail.com"
] | codeskyblue@gmail.com |
7e7b13cc713a73b93b9922fa4890545534bec49f | 149e9e52304a970ffb256f290fce5f614c9e20c4 | /Python Programming language/DataCampPractice/Corso_CISCO_netacad/modules/platform_module/m9_platform.py | 418ca4b294f6305bd778bfbbc88fc1fb4623c30e | [] | no_license | Pasquale-Silv/Improving_Python | 7451e0c423d73a91fa572d44d3e4133b0b4f5c98 | 96b605879810a9ab6c6459913bd366b936e603e4 | refs/heads/master | 2023-06-03T15:00:21.554783 | 2021-06-22T15:26:28 | 2021-06-22T15:26:28 | 351,806,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | """
But sometimes you want to know more - for example, the name of the OS which hosts Python,
and some characteristics describing the hardware that hosts the OS.
There is a module providing some means to allow you to know where you are and what components work for you.
The module is named
platform
We'll show you some of the functions it provides to you.
The platform module lets you access the underlying platform's data, i.e.,
hardware, operating system, and interpreter version information.
There is a function that can show you all the underlying layers in one glance, named platform, too.
It just returns a string describing the environment; thus,
its output is rather addressed to humans than to automated processing (you'll see it soon).
This is how you can invoke it:
platform(aliased = False, terse = False)
"""
from platform import platform
print(platform())
print(platform(1))
print(platform(0, 1))
print(platform(0, 1))
| [
"55320885+Pasquale-Silv@users.noreply.github.com"
] | 55320885+Pasquale-Silv@users.noreply.github.com |
b4eadce6676c593ff4b4b8f33c4ab61fae97d601 | 77b16dcd465b497c22cf3c096fa5c7d887d9b0c2 | /Quintana_Jerrod/Assignments/f+sql_projects/login_registration/mysqlconnection.py | 4c155f5e9f6da54e3176b393d8fb413e17530147 | [
"MIT"
] | permissive | curest0x1021/Python-Django-Web | a7cf8a45e0b924ce23791c18f6a6fb3732c36322 | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | refs/heads/master | 2020-04-26T17:14:20.277967 | 2016-10-18T21:54:39 | 2016-10-18T21:54:39 | 173,706,702 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,250 | py | """ import the necessary modules """
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
# Create a class that will give us an object that we can use to connect to a database
class MySQLConnection(object):
def __init__(self, app, db):
config = {
'host': 'localhost',
'database': 'login_registration', # we got db as an argument
# my note: The database name above is the only db from the original copy of this document that changes
'user': 'root',
'password': '',
# password is blank because I never set it
'port': '3306' # change the port to match the port your SQL server is running on
}
# this will use the above values to generate the path to connect to your sql database
DATABASE_URI = "mysql://{}:{}@127.0.0.1:{}/{}".format(config['user'], config['password'], config['port'], config['database'])
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# establish the connection to database
self.db = SQLAlchemy(app)
# this is the method we will use to query the database
def query_db(self, query, data=None):
result = self.db.session.execute(text(query), data)
if query[0:6].lower() == 'select':
# if the query was a select
# convert the result to a list of dictionaries
list_result = [dict(r) for r in result]
# return the results as a list of dictionaries
return list_result
elif query[0:6].lower() == 'insert':
# if the query was an insert, return the id of the
# commit changes
self.db.session.commit()
# row that was inserted
return result.lastrowid
else:
# if the query was an update or delete, return nothing and commit changes
self.db.session.commit()
# This is the module method to be called by the user in server.py. Make sure to provide the db name!
# My note: best I can tell, these two db's don't change, only the middle one
def MySQLConnector(app, db):
return MySQLConnection(app, db)
| [
"43941751+curest0x1021@users.noreply.github.com"
] | 43941751+curest0x1021@users.noreply.github.com |
0fbd804353c409e267bd017d27c00640523191a2 | 1b787489aab83b2e06a8f658ee8e01a10eb01998 | /antpat/reps/hamaker.py | f4272ea25cf8388cb1ffd0520e9dc8142dc481d5 | [
"ISC"
] | permissive | daydreamer2023/AntPat | 7edd471bd1e0997eb5befa029120ba13b861f106 | 6dc416a1593346421337400f880e7159a07447f6 | refs/heads/master | 2022-01-22T17:52:39.300557 | 2019-08-15T08:41:24 | 2019-08-15T08:41:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,368 | py | #!/usr/bin/python
"""Hamaker's analytic antenna pattern model."""
#TobiaC 2015-11-29 (2015-07-31)
import sys
#sys.path.append('/home/tobia/projects/BeamFormica/AntPatter/')
import math
import cmath
import scipy.special
import numpy
from antpat import dualpolelem
from antpat.reps.sphgridfun import tvecfun, pntsonsphere
import matplotlib.pyplot as plt
HA_LBAfile_default = ''
class HamakerPolarimeter(object):
"""This is the Hamaker polarimeter model class."""
nr_pols = 2 #Number of polarization channels
def __init__(self, artsdata):
"""Objects are created based on a Arts coefficient C++ header
file. There is current one default set for the HBA and one for
LBA."""
self.coefs = artsdata['coefs']
self.HAcoefversion = artsdata['HAcoefversion']
self.HAcoefband = artsdata['HAcoefband']
self.HAcoefnrelem = artsdata['HAcoefnrelem']
self.freq_center = artsdata['freq_center']
self.freq_range = artsdata['freq_range']
self.channels = artsdata['channels']
self.nr_bands=len(self.coefs)
self.freqintervs = (self.freq_center-self.freq_range,
self.freq_center+self.freq_range)
def getfreqs(self):
"""Returns nominals channel center frequencies"""
return self.channels
def getJonesAlong(self, freqvals, theta_phi):
"""Compute Jones matrix for given frequencies and directions.
Input is list of frequencies in Hz and a list of theta,phi pairs;
and the output is Jones[freq, dir_th, dir_ph, polchan, comp]."""
mask_horizon = True
(theta, phi) = theta_phi
theta = numpy.array(theta)
phi = numpy.array(phi)
freqvals = numpy.array(freqvals)
(k_ord, TH_ord, FR_ord, nr_pol) = self.coefs.shape
freqn = (freqvals-self.freq_center)/self.freq_range
if len(freqvals) > 1:
frqXdrn_shp = freqvals.shape+theta.shape
else :
frqXdrn_shp = theta.shape
response = numpy.zeros(frqXdrn_shp+(2, 2), dtype=complex)
for ki in range(k_ord):
P = numpy.zeros((nr_pol,)+frqXdrn_shp, dtype=complex)
for THi in range(TH_ord):
for FRi in range(FR_ord):
fac = numpy.multiply.outer(freqn**FRi, theta**THi).squeeze()
P[0,...] += self.coefs[ki,THi,FRi,0]*fac
P[1,...] += self.coefs[ki,THi,FRi,1]*fac
ang = (-1)**ki*(2*ki+1)*phi
response[...,0,0] += +numpy.cos(ang)*P[0,...]
response[...,0,1] += -numpy.sin(ang)*P[1,...]
response[...,1,0] += +numpy.sin(ang)*P[0,...]
response[...,1,1] += +numpy.cos(ang)*P[1,...]
#numpy.array([[math.cos(ang)*P[0],-math.sin(ang)*P[1]],
# [math.sin(ang)*P[0], math.cos(ang)*P[1]]])
#Mask beam below horizon
if mask_horizon:
mh = numpy.ones(frqXdrn_shp+(1,1))
mh[...,numpy.where(theta>numpy.pi/2),0,0]=0.
response=mh*response
return response
def plotElemPat(artsdata, frequency = 55.0e6):
"""Plots the HA antenna pattern over the entire Hemisphere."""
THETA, PHI = pntsonsphere.ZenHemisphGrid() #theta=0.2rad for zenith anomaly
hp = HamakerPolarimeter(artsdata)
jones=hp.getJonesAlong([frequency], (THETA, PHI) )
EsTh = numpy.squeeze(jones[...,0,0])
EsPh = numpy.squeeze(jones[...,0,1])
tvecfun.plotvfonsph(THETA, PHI, EsTh, EsPh, freq=frequency, vcoord='Ludwig3')
EsTh = numpy.squeeze(jones[...,1,0])
EsPh = numpy.squeeze(jones[...,1,1])
tvecfun.plotvfonsph(THETA, PHI, EsTh, EsPh, freq=frequency, vcoord='Ludwig3')
def showAnomaly():
"""Demostrates the anomaly of the Hamaker-Arts model close to zenith."""
frequency = 225e6
nrPnts = 200
timeAng = 0.5
timeAngs = numpy.linspace(-timeAng, timeAng, nrPnts)/2.0
theta0 = 0.5
thetas, phis = pntsonsphere.getTrack(theta0, 0*math.pi/4, theta0-0.001, timeAngs)
hp = HamakerPolarimeter(HA_LBAfile_default)
#jones = hp.getJonesAlong([frequency], (phis+1*5*math.pi/4, math.pi/2-thetas))
jones = hp.getJonesAlong([frequency], (phis+1*5*math.pi/4, thetas))
EsTh = numpy.squeeze(jones[...,0,0])
EsPh = numpy.squeeze(jones[...,0,1])
plt.subplot(2,1,1)
plt.plot(phis/math.pi*180, 90-thetas/math.pi*180, '*')
plt.xlabel('Azimuth [deg]')
plt.ylabel('Elevation [deg]')
plt.subplot(2,1,2)
plt.plot(timeAngs*60, numpy.abs(EsTh))
plt.xlabel('Transit time [min]')
plt.ylabel('Gain [rel.]')
plt.show()
def getJones(freq, az, el):
"""Print the Jones matrix of the HA model for a frequency and direction."""
hp = HamakerPolarimeter(HA_LBAfile_default)
jones=hp.getJonesAlong([10.e6], (0.1, 0.2))
print "Jones:"
print jones
print "J.J^H:"
print numpy.dot(jones, jones.conj().transpose()).real
IXRJ = dualpolelem.getIXRJ(jones)
print "IXRJ:", 10*numpy.log10(IXRJ),"[dB]"
def _getargs():
freq = float(sys.argv[1])
az = float(sys.argv[2])
el = float(sys.argv[3])
return freq, az, el
if __name__ == "__main__":
#plotElemPat(30e6)
showAnomaly()
#HBAmod = HamakerPolarimeter(HA_HBAfile_default)
#jones = HBAmod.getJonesAlong([150e6, 160e6, 170e6], ( [0.1,0.1], [0.3, 0.4]) )
#print jones
| [
"tobia@chalmers.se"
] | tobia@chalmers.se |
91554162b70e52b44dac5788af4f99b9d261c2e6 | 1a220abd21c56728aa3368534506bfc9ced8ad46 | /3.beakjoon/DP/BOJ_가장 큰 증가하는 부분 수열.py | f52f6d4ddeb092b4be6311015d24ddfacf7df44e | [] | no_license | JeonJe/Algorithm | 0ff0cbf47900e7877be077e1ffeee0c1cd50639a | 6f8da6dbeef350f71b7c297502a37f87eb7d0823 | refs/heads/main | 2023-08-23T11:08:17.781953 | 2023-08-23T08:31:41 | 2023-08-23T08:31:41 | 197,085,186 | 0 | 0 | null | 2023-02-21T03:26:41 | 2019-07-15T23:22:55 | Python | UTF-8 | Python | false | false | 340 | py | n = int(input())
nums = list(map(int,input().split()))
if n == 1:
print(nums[0])
exit(0)
LIS_sum = [0]*(n)
LIS_sum[0] = nums[0]
for i in range(1,n):
temp = 0
for j in range(i-1,-1,-1):
if nums[j] < nums[i]:
temp = max(temp, LIS_sum[j])
LIS_sum[i] = temp+nums[i]
print(max(LIS_sum)) | [
"whssodi@gmail.com"
] | whssodi@gmail.com |
a5c5dcbc94612c0992dd51b396788cba593b0e91 | b224c7413b7e6a1cb78dad60b4899674fefe8269 | /openforce_financial_report/report/__init__.py | f25291a28701b525b54a4b167f885ea54eb1abb4 | [] | no_license | alessandrocamilli/7-openforce-addons | 2ee00b712538a8eb433d0ce0c63cd12a861548e6 | 78fc164679b690bcf84866987266838de134bc2f | refs/heads/master | 2016-08-03T11:58:12.730337 | 2014-07-03T10:29:56 | 2014-07-03T10:29:56 | 21,004,298 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Alessandro Camilli (a.camilli@yahoo.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import partners_due_register
import common_partner_reports
import common_reports
import webkit_parser_header_fix
| [
"alessandrocamilli@openforce.it"
] | alessandrocamilli@openforce.it |
50a2ef5128dd7ed1fe459863cad3a6691fb09054 | 7ae754f51aaf2e6e559b925980c35fe795808537 | /02_分支/hm_07_火车站安检.py | 49c870c4d693840399ef573b305e8d8426bed2bc | [] | no_license | xiaohema233/PythonStart | be113d9359734d17eeb22b0584cf240a128fed3c | 44f98fc6c50f2c85b72ee029ec99d3099459f370 | refs/heads/master | 2022-05-15T19:33:22.363721 | 2022-05-09T06:28:05 | 2022-05-09T06:28:05 | 241,621,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | # 定义布尔型变量 has_ticket 表示是否有车票
has_ticket = True
# 定义整型变量 knife_length 表示刀的长度,单位:厘米
knife_length = 30
# 首先检查是否有车票,如果有,才允许进行 安检
if has_ticket:
print("车票检查通过,准备开始安检")
# 安检时,需要检查刀的长度,判断是否超过 20 厘米
if knife_length > 20:
# 如果超过 20 厘米,提示刀的长度,不允许上车
print("您携带的刀太长了,有 %d 公分长!" % knife_length)
print("不允许上车")
# 如果不超过 20 厘米,安检通过
else:
print("安检已经通过,祝您旅途愉快!")
# 如果没有车票,不允许进门
else:
print("大哥,请先买票")
| [
"33052287+xiaohema233@users.noreply.github.com"
] | 33052287+xiaohema233@users.noreply.github.com |
44132d5bd3147c83d6a97cb746893c629d905bfa | 9c880db9912c35a73469f728245de78459763ce4 | /ProjectCode/DataPreprocess.py | de80744293fdabddd08e68bd8803a6da43957549 | [] | no_license | Lizi2hua/Project-111 | 3a17e396230c76bf47dd8209e801fe5edd079004 | 57dc7e331b5bfa860226e67c6f45de682720df98 | refs/heads/master | 2022-10-20T16:36:47.322765 | 2020-07-14T14:42:03 | 2020-07-14T14:42:03 | 277,501,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,166 | py | #将所有的数据处理函数都包含到此文件下
import SimpleITK as sitk
import os
import json
import glob
import SimpleITK as sitk
import pandas as pd
import matplotlib.pyplot as plt
# dcm数据处理函数
def dicom_metainfo(dicm_path, list_tag):
'''
获取dicom的元数据信息
:param dicm_path: dicom文件地址
:param list_tag: 标记名称列表,比如['0008|0018',]
:return:
'''
reader = sitk.ImageFileReader()
reader.LoadPrivateTagsOn()
reader.SetFileName(dicm_path)
reader.ReadImageInformation()
return [reader.GetMetaData(t) for t in list_tag]
def dicom2array(dcm_path):
'''
读取dicom文件并把其转化为灰度图(np.array)
https://simpleitk.readthedocs.io/en/master/link_DicomConvert_docs.html
:param dcm_path: dicom文件
:return:
'''
image_file_reader = sitk.ImageFileReader()
image_file_reader.SetImageIO('GDCMImageIO')
image_file_reader.SetFileName(dcm_path)
image_file_reader.ReadImageInformation()
image = image_file_reader.Execute()
if image.GetNumberOfComponentsPerPixel() == 1:
image = sitk.RescaleIntensity(image, 0, 255)
if image_file_reader.GetMetaData('0028|0004').strip() == 'MONOCHROME1':
image = sitk.InvertIntensity(image, maximum=255)
image = sitk.Cast(image, sitk.sitkUInt8)
img_x = sitk.GetArrayFromImage(image)[0]
return img_x
# json文件处理函数
def get_info(train_path,json_path):
annotation_info = pd.DataFrame(columns=('studyUid','seriesUid','instanceUid','annotation'))
json_df = pd.read_json(json_path)
for idx in json_df.index:
studyUid = json_df.loc[idx,"studyUid"]
seriesUid = json_df.loc[idx,"data"][0]['seriesUid']
instanceUid = json_df.loc[idx,"data"][0]['instanceUid']
annotation = json_df.loc[idx,"data"][0]['annotation']
row = pd.Series({'studyUid':studyUid,'seriesUid':seriesUid,'instanceUid':instanceUid,'annotation':annotation})
annotation_info = annotation_info.append(row,ignore_index=True)
dcm_paths = glob.glob(os.path.join(train_path,"**","**.dcm"))
tag_list = ['0020|000d','0020|000e','0008|0018']
dcm_info = pd.DataFrame(columns=('dcmPath','studyUid','seriesUid','instanceUid'))
for dcm_path in dcm_paths:
try:
studyUid,seriesUid,instanceUid = dicom_metainfo(dcm_path,tag_list)
row = pd.Series({'dcmPath':dcm_path,'studyUid':studyUid,'seriesUid':seriesUid,'instanceUid':instanceUid })
dcm_info = dcm_info.append(row,ignore_index=True)
except:
continue
result = pd.merge(annotation_info,dcm_info,on=['studyUid','seriesUid','instanceUid'])
result = result.set_index('dcmPath')['annotation'] #返回图片路径与标注信息
return result
# 得到数据(array类型)和标签的函数
def DataLabelGenerator(DATA_PATH,JSON_PATH,idx):
result=get_info(DATA_PATH,JSON_PATH)
#将读图转换为array
img_dir=result.index[idx] #第idx的图片路径
img_arr=dicom2array(img_dir)
#获取标注信息
tags=result[idx]
annoation=tags[0]['data']['point']
#坐标
coord=[]
#脊椎ID
id=[]
#腰间盘
disc=[]
#腰椎
vertebra=[]
for j in range(len(annoation)):
coord_list=annoation[j]['coord']
coord.append(coord_list)
id_name=annoation[j]['tag']['identification']
id.append(id_name)
name=annoation[j]['tag']
vertebra_label=name.get('vertebra')
vertebra.append(vertebra_label)
disc_label=name.get('disc')
disc.append(disc_label)
return img_arr,coord,id,disc,vertebra
# 一下代码是测试,也可以做模板
# DATA_PATH= r"C:\project\lumbar\Project-111\dataset\train_train51"
# JSON_PATH= r"C:\project\lumbar\Project-111\dataset\train_train51/lumbar_train51_annotation.json"
# idx=5
# img_arr,coord,id,disc,vertebra=DataLabelGenerator(DATA_PATH,JSON_PATH,idx)
# print(coord)
# print(img_arr)
# plt.title("{}\'s img ".format(idx))
# for j in coord:
# x,y=j
# plt.scatter(x,y,c='r',s=3)
# plt.imshow(img_arr,cmap='gray')
# plt.show()
| [
"1050100964@qq.com"
] | 1050100964@qq.com |
a9cf8b7863b31cbd9969909edaa8c0ecef6230ee | bfd04e41f0b5b236ef937eb3922a105d00d25ce0 | /db/campaigns/migrations/0001_initial.py | 11a233e848f4766a19dd5444b69249caf5e3acf5 | [
"MIT"
] | permissive | cega/try.wq.io | 5c9dac3f428b2a92c63096f0cb882f264f684b9f | fcb12d5e26a05f6a653f0cf63e668ea1a2f088c2 | refs/heads/master | 2021-06-09T20:41:25.402542 | 2016-11-29T19:41:59 | 2016-11-29T19:41:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,079 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-23 15:59
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.WQ_REPORTSTATUS_MODEL),
migrations.swappable_dependency(settings.WQ_SITE_MODEL),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('name', models.CharField(max_length=255)),
('icon', models.ImageField(upload_to='campaigns')),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
],
options={
'ordering': ('-date', 'campaign', 'site'),
},
),
migrations.CreateModel(
name='EventResult',
fields=[
('id', models.PositiveIntegerField(primary_key=True, serialize=False)),
('event_date', models.DateField()),
('result_value_numeric', models.FloatField(blank=True, null=True)),
('result_value_text', models.TextField(blank=True, null=True)),
('result_empty', models.BooleanField(default=False)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.WQ_EVENT_MODEL)),
('event_campaign', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign')),
('event_site', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.WQ_SITE_MODEL)),
],
options={
'db_table': 'wq_eventresult',
'abstract': False,
},
),
migrations.CreateModel(
name='Parameter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, db_index=True, max_length=255)),
('slug', models.CharField(blank=True, max_length=255)),
('is_numeric', models.BooleanField(default=False)),
('units', models.CharField(blank=True, max_length=50, null=True)),
('description', models.TextField()),
('campaign', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parameters', to='campaigns.Campaign')),
],
options={
'ordering': ['pk'],
},
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('entered', models.DateTimeField(blank=True)),
('photo', models.ImageField(blank=True, null=True, upload_to='reports')),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='report_set', to=settings.WQ_EVENT_MODEL)),
('status', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.WQ_REPORTSTATUS_MODEL)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
'ordering': ('-entered',),
},
),
]
| [
"andrew@wq.io"
] | andrew@wq.io |
d9b9389944f741e45759c518f06c672459fe46f7 | a281d09ed91914b134028c3a9f11f0beb69a9089 | /great_expectations/rule_based_profiler/domain_builder/column_pair_domain_builder.py | e7290588875208769d5627b986abc51a6ca0c953 | [
"Apache-2.0"
] | permissive | CarstenFrommhold/great_expectations | 4e67bbf43d21bc414f56d576704259a4eca283a5 | 23d61c5ed26689d6ff9cec647cc35712ad744559 | refs/heads/develop | 2023-01-08T10:01:12.074165 | 2022-11-29T18:50:18 | 2022-11-29T18:50:18 | 311,708,429 | 0 | 0 | Apache-2.0 | 2020-11-10T15:52:05 | 2020-11-10T15:52:04 | null | UTF-8 | Python | false | false | 3,949 | py | from __future__ import annotations
from typing import TYPE_CHECKING, Dict, List, Optional, Union
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.metric_domain_types import MetricDomainTypes
from great_expectations.rule_based_profiler.domain import (
INFERRED_SEMANTIC_TYPE_KEY,
Domain,
SemanticDomainTypes,
)
from great_expectations.rule_based_profiler.domain_builder import ColumnDomainBuilder
from great_expectations.rule_based_profiler.parameter_container import (
ParameterContainer,
)
if TYPE_CHECKING:
from great_expectations.data_context.data_context.abstract_data_context import (
AbstractDataContext,
)
from great_expectations.validator.validator import Validator
class ColumnPairDomainBuilder(ColumnDomainBuilder):
"""
This DomainBuilder uses relative tolerance of specified map metric to identify domains.
"""
def __init__(
self,
include_column_names: Optional[Union[str, Optional[List[str]]]] = None,
data_context: Optional[AbstractDataContext] = None,
) -> None:
"""
Args:
include_column_names: Explicitly specified exactly two desired columns
data_context: AbstractDataContext associated with this DomainBuilder
"""
super().__init__(
include_column_names=include_column_names,
exclude_column_names=None,
include_column_name_suffixes=None,
exclude_column_name_suffixes=None,
semantic_type_filter_module_name=None,
semantic_type_filter_class_name=None,
include_semantic_types=None,
exclude_semantic_types=None,
data_context=data_context,
)
@property
def domain_type(self) -> MetricDomainTypes:
return MetricDomainTypes.COLUMN_PAIR
def _get_domains(
self,
rule_name: str,
variables: Optional[ParameterContainer] = None,
) -> List[Domain]:
"""Return domains matching the specified tolerance limits.
Args:
rule_name: name of Rule object, for which "Domain" objects are obtained.
variables: Optional variables to substitute when evaluating.
Returns:
List of domains that match the desired tolerance limits.
"""
batch_ids: List[str] = self.get_batch_ids(variables=variables)
validator: Validator = self.get_validator(variables=variables)
effective_column_names: List[str] = self.get_effective_column_names(
batch_ids=batch_ids,
validator=validator,
variables=variables,
)
if not (effective_column_names and (len(effective_column_names) == 2)):
raise ge_exceptions.ProfilerExecutionError(
message=f"""Error: Columns specified for {self.__class__.__name__} in sorted order must correspond to \
"column_A" and "column_B" (in this exact order).
"""
)
effective_column_names = sorted(effective_column_names)
domain_kwargs: Dict[str, str] = dict(
zip(
[
"column_A",
"column_B",
],
effective_column_names,
)
)
column_name: str
semantic_types_by_column_name: Dict[str, SemanticDomainTypes] = {
column_name: self.semantic_type_filter.table_column_name_to_inferred_semantic_domain_type_map[
column_name
]
for column_name in effective_column_names
}
domains: List[Domain] = [
Domain(
domain_type=self.domain_type,
domain_kwargs=domain_kwargs,
details={
INFERRED_SEMANTIC_TYPE_KEY: semantic_types_by_column_name,
},
rule_name=rule_name,
),
]
return domains
| [
"noreply@github.com"
] | CarstenFrommhold.noreply@github.com |
2a947cb9b779beaefbc64505b7502fe3f4a97d72 | 38346ccf93e0c0d49a378b2532fe215669018829 | /nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py | 447ba546dd431fb73ff3b3ce3dbc8a4164d3feb4 | [
"BSD-3-Clause"
] | permissive | swederik/nipype | de509c2605bc83448240c7c3c68ee8d220d48ef3 | 872720a6fc00b00e029fb67742deedee524b2a9f | refs/heads/master | 2020-12-25T10:08:44.268742 | 2014-05-22T14:05:58 | 2014-05-22T14:05:58 | 1,421,176 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,106 | py | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.mrtrix.tracking import SphericallyDeconvolutedStreamlineTrack
def test_SphericallyDeconvolutedStreamlineTrack_inputs():
input_map = dict(args=dict(argstr='%s',
),
cutoff_value=dict(argstr='-cutoff %s',
units='NA',
),
desired_number_of_tracks=dict(argstr='-number %d',
),
do_not_precompute=dict(argstr='-noprecomputed',
),
environ=dict(nohash=True,
usedefault=True,
),
exclude_file=dict(argstr='-exclude %s',
xor=['exclude_file', 'exclude_spec'],
),
exclude_spec=dict(argstr='-exclude %s',
position=2,
sep=',',
units='mm',
xor=['exclude_file', 'exclude_spec'],
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=-2,
),
include_file=dict(argstr='-include %s',
xor=['include_file', 'include_spec'],
),
include_spec=dict(argstr='-include %s',
position=2,
sep=',',
units='mm',
xor=['include_file', 'include_spec'],
),
initial_cutoff_value=dict(argstr='-initcutoff %s',
units='NA',
),
initial_direction=dict(argstr='-initdirection %s',
units='voxels',
),
inputmodel=dict(argstr='%s',
position=-3,
usedefault=True,
),
mask_file=dict(argstr='-mask %s',
xor=['mask_file', 'mask_spec'],
),
mask_spec=dict(argstr='-mask %s',
position=2,
sep=',',
units='mm',
xor=['mask_file', 'mask_spec'],
),
maximum_number_of_tracks=dict(argstr='-maxnum %d',
),
maximum_tract_length=dict(argstr='-length %s',
units='mm',
),
minimum_radius_of_curvature=dict(argstr='-curvature %s',
units='mm',
),
minimum_tract_length=dict(argstr='-minlength %s',
units='mm',
),
no_mask_interpolation=dict(argstr='-nomaskinterp',
),
out_file=dict(argstr='%s',
name_source=['in_file'],
name_template='%s_tracked.tck',
output_name='tracked.tck',
position=-1,
),
seed_file=dict(argstr='-seed %s',
xor=['seed_file', 'seed_spec'],
),
seed_spec=dict(argstr='-seed %s',
position=2,
sep=',',
units='mm',
xor=['seed_file', 'seed_spec'],
),
step_size=dict(argstr='-step %s',
units='mm',
),
stop=dict(argstr='-stop',
),
terminal_output=dict(mandatory=True,
nohash=True,
),
unidirectional=dict(argstr='-unidirectional',
),
)
inputs = SphericallyDeconvolutedStreamlineTrack.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_SphericallyDeconvolutedStreamlineTrack_outputs():
output_map = dict(tracked=dict(),
)
outputs = SphericallyDeconvolutedStreamlineTrack.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| [
"satra@mit.edu"
] | satra@mit.edu |
58fa7eb98f9ddaf9c735d1a5be9232da1b31fd64 | 87b66cbf9eb8aca426808734eb754f87cf3ec38e | /tests/dolfin/test_ascalarbar.py | c80a048a0c2e3a8970760f0d6197c67c8c5d50c3 | [
"MIT",
"LicenseRef-scancode-public-domain",
"OFL-1.1"
] | permissive | RubendeBruin/vtkplotter | ea33c38cc9df72c15d7ca2fa357205ea35a559bb | 641743f29289d6df3d44d366072b72e221d16a51 | refs/heads/master | 2023-05-25T13:06:24.774288 | 2023-05-01T10:27:19 | 2023-05-01T10:27:19 | 219,933,917 | 0 | 1 | MIT | 2019-11-06T07:10:46 | 2019-11-06T07:10:45 | null | UTF-8 | Python | false | false | 899 | py | import numpy as np
from dolfin import *
from dolfin import __version__
from vedo.dolfin import plot, screenshot, MeshActor, show
from vedo import settings
print('Test ascalarbar, dolfin version', __version__)
if hasattr(MPI, 'comm_world'):
mesh = UnitSquareMesh(MPI.comm_world, nx=16, ny=16)
else:
mesh = UnitSquareMesh(16,16)
V = FunctionSpace(mesh, 'Lagrange', 1)
f = Expression('10*(x[0]+x[1]-1)', degree=1)
u = interpolate(f, V)
actors = plot(u, mode='color', cmap='viridis', vmin=-3, vmax=3, style=1,
returnActorsNoShow=True)
actor = actors[0]
solution = actor.pointdata[0]
print('ArrayNames', actor.pointdata.keys())
print('min', 'mean', 'max:')
print(np.min(solution), np.mean(solution), np.max(solution), len(solution))
assert len(solution) == 289
assert np.isclose(np.min(solution) , -10., atol=1e-05)
assert np.isclose(np.max(solution) , 10., atol=1e-05)
| [
"marco.musy@gmail.com"
] | marco.musy@gmail.com |
9a2213ec3ee95a7ecf2b52d08704ba5983875ca3 | 7f4306057991622329ed3ab43c8e338ebdfb6d74 | /pilgram/css/blending/tests/test_nonseparable.py | 9e606fbb615e0ff3891a7809437768cffa36a290 | [
"Apache-2.0"
] | permissive | akiomik/pilgram | d958312c98a5418d176ad884c50303e9f2731825 | 410252928e50e954472ff5ffcd45446aa8be32f7 | refs/heads/main | 2023-07-24T06:54:00.285954 | 2023-07-18T00:26:42 | 2023-07-18T00:26:42 | 169,348,812 | 90 | 19 | Apache-2.0 | 2023-09-12T12:31:08 | 2019-02-06T03:10:44 | Jupyter Notebook | UTF-8 | Python | false | false | 4,985 | py | # Copyright 2019 Akiomi Kamakura
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import floor
import pytest
from PIL import Image, ImageMath
from PIL.ImageMath import imagemath_convert as _convert
from pilgram import util
from pilgram.css.blending.nonseparable import (
_clip_color,
_max3,
_min3,
lum,
lum_im,
sat,
set_lum,
set_sat,
)
def test_min3():
im = util.fill((1, 1), [0, 128, 255])
r, g, b = im.split()
im_min = ImageMath.eval('convert(min3((r, g, b)), "L")', min3=_min3, r=r, g=g, b=b)
assert list(im_min.getdata()) == [0]
def test_max3():
im = util.fill((1, 1), [0, 128, 255])
r, g, b = im.split()
im_max = ImageMath.eval('convert(max3((r, g, b)), "L")', max3=_max3, r=r, g=g, b=b)
assert list(im_max.getdata()) == [255]
def test_clip_color():
im = util.fill((1, 1), [0, 128, 255])
r, g, b = im.split()
bands = ImageMath.eval(
"clip_color((float(r - 64), float(g), float(b + 64)))",
clip_color=_clip_color,
r=r,
g=g,
b=b,
)
expected = [
[pytest.approx(25.70517158047366, 1e-6)],
[pytest.approx(106.8796587856024, 1e-6)],
[pytest.approx(187.63136220320442, 1e-6)],
]
assert [list(band.im.getdata()) for band in bands] == expected
def test_lum():
im = util.fill((1, 1), [0, 128, 255])
r, g, b = im.split()
im_f = ImageMath.eval("lum((float(r), float(g), float(b)))", lum=lum, r=r, g=g, b=b)
im_l = im_f.convert("L")
assert list(im_f.getdata()) == [pytest.approx(103.57, 1e-6)]
assert list(im_l.getdata()) == [floor(103.57)]
def test_lum_im():
im = util.fill((1, 1), [0, 128, 255])
im_lum = lum_im(im)
assert list(im_lum.getdata()) == [round(103.57)]
def test_set_lum():
im1 = util.fill((1, 1), [0, 128, 255])
im2 = util.fill((1, 1), [128, 128, 128])
r1, g1, b1 = im1.split()
r2, g2, b2 = im2.split()
c1 = "(float(r1), float(g1), float(b1))"
c2 = "(float(r2), float(g2), float(b2))"
bands = ImageMath.eval(
"set_lum({}, lum({}))".format(c1, c2),
set_lum=set_lum,
lum=lum,
r1=r1,
g1=g1,
b1=b1,
r2=r2,
b2=b2,
g2=g2,
)
expected1 = [
[pytest.approx(41.13881001122631, 1e-6)],
[pytest.approx(148.48874067225782, 1e-6)],
[255],
]
assert [list(band.im.getdata()) for band in bands] == expected1
im_set_lum = Image.merge("RGB", [_convert(band, "L").im for band in bands])
expected2 = [(floor(41.13881001122631), floor(148.48874067225782), 255)]
assert list(im_set_lum.getdata()) == expected2
def test_sat():
im = util.fill((1, 1), [80, 128, 200])
r, g, b = im.split()
im_sat = ImageMath.eval('convert(sat((r, g, b)), "L")', sat=sat, r=r, g=g, b=b)
assert list(im_sat.getdata()) == [120]
def test_set_sat_cmax_gt_cmin():
im1 = util.fill((1, 1), [0, 128, 255])
im2 = util.fill((1, 1), [64, 96, 128]) # sat = 64
r1, g1, b1 = im1.split()
r2, g2, b2 = im2.split()
bands = ImageMath.eval(
"set_sat((r1, g1, b1), sat((r2, g2, b2)))",
set_sat=set_sat,
sat=sat,
r1=r1,
g1=g1,
b1=b1,
r2=r2,
g2=g2,
b2=b2,
)
expected = [
[0],
[pytest.approx(32.12549019607843, abs=1)],
[64],
]
assert [list(band.im.getdata()) for band in bands] == expected
def test_set_sat_cmax_eq_cmid_gt_cmin():
im1 = util.fill((1, 1), [0, 128, 128])
im2 = util.fill((1, 1), [64, 96, 128]) # sat = 64
r1, g1, b1 = im1.split()
r2, g2, b2 = im2.split()
bands = ImageMath.eval(
"set_sat((r1, g1, b1), sat((r2, g2, b2)))",
set_sat=set_sat,
sat=sat,
r1=r1,
g1=g1,
b1=b1,
r2=r2,
g2=g2,
b2=b2,
)
expected = [[0], [64], [64]]
assert [list(band.im.getdata()) for band in bands] == expected
def test_set_sat_cmax_eq_cmin():
im1 = util.fill((1, 1), [128, 128, 128])
im2 = util.fill((1, 1), [64, 96, 128]) # sat = 64
r1, g1, b1 = im1.split()
r2, g2, b2 = im2.split()
bands = ImageMath.eval(
"set_sat((r1, g1, b1), sat((r2, g2, b2)))",
set_sat=set_sat,
sat=sat,
r1=r1,
g1=g1,
b1=b1,
r2=r2,
g2=g2,
b2=b2,
)
expected = [[0], [0], [0]]
assert [list(band.im.getdata()) for band in bands] == expected
| [
"akiomik@gmail.com"
] | akiomik@gmail.com |
44f89ac374c146890f0b8ea11344cfeacb3cc065 | 04a643a77927bc56ab58c7df91d4733321e61e51 | /p19_plots/fig_hair_mosh.py | 2bd63d253824490409c2a69e2466befa213887e2 | [] | no_license | dcollins4096/p19_newscripts | d2fae1807170a4d70cf4c87222a6258211f993ff | 23c780dd15b60944ed354406706de85282d0bee6 | refs/heads/master | 2023-07-21T11:53:55.188383 | 2023-07-18T17:38:21 | 2023-07-18T17:38:21 | 215,159,839 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,028 | py |
from starter2 import *
from collections import defaultdict
import scipy
import colors
import hair_dryer
reload(hair_dryer)
import three_loopers_u500 as TL
def simple_hair(this_looper,core_list=None):
if core_list is None:
core_list = np.unique(this_looper.tr.core_ids)
thtr=this_looper.tr
mask = movie_frames.quantized_mask(this_looper).flatten()
all_times=thtr.times
all_frames=thtr.frames
times=thtr.times[mask]+0 #the zero makes a copy
times.shape=times.size,1
times=times/colors.tff
for core_id in core_list:
fig,axes=plt.subplots(3,1, figsize=(6,10))
ax=axes[0];ax1=axes[2]; ax3=axes[1]
ms = trackage.mini_scrubber(thtr,core_id, do_velocity=True)
ms.particle_pos(core_id)
LOS = 0
x = [1,0,1][LOS] # Using [1,0,1] and [2,2,0]
y = [2,2,0][LOS] # unfolds nicely.
if False:
sl=slice(None)
c=[0.5]*4
else:
sl = slice(None,None,30)
#c=[0,0,0,0.1]
c=[0.1]*4
Linewidth1=Linewidth2=0.2
print(sl)
print(c)
rho = ms.density[sl].transpose()#*colors.density_units
rho = rho[mask,:]
dv = ms.cell_volume[sl].transpose()[mask,:]
vv = dv.sum(axis=1)
vx = ms.rel_vx[sl].transpose()[mask,:]
vy = ms.rel_vy[sl].transpose()[mask,:]
vz = ms.rel_vz[sl].transpose()[mask,:]
v22_all = ms.rel_vmag[:].transpose()[mask,:]
vr_all = ms.vr_rel[:].transpose()[mask,:]
vt_all = (ms.vt2_rel[:].transpose()[mask,:])**0.5
vrm=vr_all.mean(axis=1)
v2 = v22_all.mean(axis=1)
vtm=vt_all.mean(axis=1)
rho_plot=ax1.twinx()
print(rho.shape,c)
rho_plot.plot(times, rho*colors.density_units, c=c, linewidth=Linewidth1)
rho_plot.set(yscale='log',ylabel=r'$\rho_{\rm{particle}} [cm^{-3}]$')
ax1.plot(times, v2, c='k')
ax1.plot(times, vtm, c='c')
ax1.plot(times, np.abs(vrm), c='r')
ax1.set(ylabel=r'$v_{\rm{particles}}/c_s$', xlabel=r'$t/t_{\rm{ff}}$')
p = [ms.particle_x[sl].transpose(),ms.particle_y[sl].transpose(),ms.particle_z[sl].transpose()]
for aaa in [ax,ax3]:
aaa.scatter( p[x][0,:].flatten(),p[y][0,:].flatten(),c='k',s=0.1)
aaa.scatter( p[x][-1,:].flatten(),p[y][-1,:].flatten(),c='r',s=0.1)
aaa.plot( p[x], p[y], c=c, linewidth=0.3)
aaa.set(xlabel='xyz [code length]'[x], ylabel='xyz [code length]'[y])
x0,x1=[0.090,0.175]
y0,y1=[0.15,0.25]
ax.plot([x0,x1,x1,x0,x0],[y0,y0,y1,y1,y0],c='r',linewidth=Linewidth1)
ax3.set(xlim=[x0,x1],ylim=[y0,y1])
outname='plots_to_sort/%s_mosh_%s_c%04d.png'%(this_looper.sim_name,'xyz'[LOS],core_id)
fig.tight_layout()
fig.savefig(outname)
print(outname)
sims=[ 'u502']
for sim in sims:
core_list=[9]
print('word')
simple_hair(TL.loops[sim],core_list=core_list)
| [
"dccollins@fsu.edu"
] | dccollins@fsu.edu |
e0cb24cdf4bc78dca043fbefd148f25a1bede4f9 | 2a66fdf4ddcfb475f80a61a8d8c31b3a320c5bae | /code/hprog59.py | 3db8c1e47c0056cf681abeea658da9f87b3d3ba1 | [] | no_license | sujasriman/guvi | 12143757bee6e0679ca44f44a6624d34a6dd2cb4 | 74b4caf2a9c58da5e72eabe0b05adfe77310f71b | refs/heads/master | 2020-05-31T07:24:37.295062 | 2019-08-12T18:24:22 | 2019-08-12T18:24:22 | 190,163,562 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | n=int(input())
l=[]
l1=[]
for i in range(2):
l.append(input().split())
for i in range(n):
a=0
l1.append(int(l[a][i])+int(l[a+1][i]))
for i in range(n-1):
print(l1[i],end=' ')
print(l1[n-1])
| [
"noreply@github.com"
] | sujasriman.noreply@github.com |
8c3abcbe5c6d057c305c7384d01b769a48f5b9c5 | 425db5a849281d333e68c26a26678e7c8ce11b66 | /LeetCodeSolutions/LeetCode_0170.py | 2e5beb6f321161d1499a11d942cd419feca374df | [
"MIT"
] | permissive | lih627/python-algorithm-templates | e8092b327a02506086414df41bbfb2af5d6b06dc | a61fd583e33a769b44ab758990625d3381793768 | refs/heads/master | 2021-07-23T17:10:43.814639 | 2021-01-21T17:14:55 | 2021-01-21T17:14:55 | 238,456,498 | 29 | 8 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | class TwoSum:
def __init__(self):
"""
Initialize your data structure here.
"""
import collections
self.nums = collections.defaultdict(int)
def add(self, number: int) -> None:
"""
Add the number to an internal data structure..
"""
self.nums[number] += 1
def find(self, value: int) -> bool:
"""
Find if there exists any pair of numbers which sum is equal to the value.
"""
for key in self.nums:
if value - key in self.nums:
if value - key == key:
if self.nums[key] > 1:
return True
else:
return True
return False
# Your TwoSum object will be instantiated and called as such:
# obj = TwoSum()
# obj.add(number)
# param_2 = obj.find(value)
| [
"lih627@outlook.com"
] | lih627@outlook.com |
b0eaf2f6d26344d045fa9fdb82857f26f776ef18 | 00f1e3047afe17a4c213b7e5655a5a4d99dc94c6 | /mainapp/ajax.py | 0a395dae7a9fe960e7352907e86f2a6731622865 | [] | no_license | zhyfruit/DietCat | 3e52391cce3b3245c9ae940bea35e9ab5cfd2d74 | 15a66f5b9de3d8cbe7cce2fa4458de85dd383c76 | refs/heads/master | 2020-03-27T04:42:01.497212 | 2018-08-23T10:47:59 | 2018-08-23T10:47:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | from django.shortcuts import render
from django.shortcuts import HttpResponse
# 下载文件要用
from django.http import FileResponse
# 提交用户反馈,这里用了ajax
# 其实我觉得在这个页面不用ajax也没什么,毕竟这个页面也没有别的需要交互的东西
def subProp(request):
if request.method == 'POST':
# 获得提交的具体内容
msg = request.POST.get('prop')
print(msg)
# TODO 写入DB
return HttpResponse('1')
return HttpResponse('3')
| [
"java233@foxmail.com"
] | java233@foxmail.com |
113d8826277c464d78f5df2901a3616ed0be649c | 307089d509d2b72ac036b7fcc5bd60f5759cca6f | /opencv/timelapse-usb.py | 5796e4b04d7171718fe2ddbaca9b0b4efb04bce1 | [] | no_license | bluemooninc/campi | 45a7bf480d6c507a20f132c64ed8315776ccacbb | 7614e2847e12442c1900281662b7bac587a9ee46 | refs/heads/master | 2020-04-06T13:12:41.184245 | 2016-09-06T14:40:03 | 2016-09-06T14:40:03 | 52,285,836 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,127 | py | import cv2
import numpy as np
import time
import datetime
import logging
import scp
import ConfigParser
import os.path
import os
import socket
import glob
import re
import lcd
##
## config
##
inifile = ConfigParser.SafeConfigParser()
inifile.read("/home/pi/camlaps.ini")
serialno = inifile.get("user","serialno")
frameWidth = inifile.getint("camera","frameWidth")
frameHeight = inifile.getint("camera","frameHeight")
delay = inifile.getint("camera","delay")
shottime = inifile.getint("camera","shottime")
## get ip address
gw = os.popen("ip -4 route show default").read().split()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((gw[2], 0))
ipaddr = s.getsockname()[0]
LOG_FILENAME = '/var/log/timelapse.log'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)
logging.debug(cv2.__version__)
logging.debug('timelapse start...')
# initialize the camera and grab a reference to the raw camera capture
print frameWidth
print frameHeight
location = (0,30)
fontscale = 2.0
fontface = cv2.FONT_HERSHEY_PLAIN
color = (255,190,0)
dt = datetime.datetime.today()
seekfile = '/home/pi/picture/img%02d-*.jpg' % dt.hour
newestCount = 0
##
## capture start
##
# capture frames from the camera
count = 0
cap = cv2.VideoCapture(0)
cap.set(3,frameWidth)
cap.set(4,frameHeight)
if not cap:
print "Could not open camera"
sys.exit()
time.sleep(1)
while(cap.isOpened()):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
ret, img = cap.read()
print count
now = datetime.datetime.now()
msg = now.strftime("%Y/%m/%d %H:%M:%S")
cv2.putText(img,msg,location,fontface,fontscale,color,4)
fname = "img%02d-%04d.jpg" % (dt.hour,count,)
fpath = "/home/pi/picture/" + fname
#logging.debug("debug:"+fname)
if os.path.exists(fpath):
os.remove(fpath)
print fname + msg
cv2.imwrite(fpath, img)
lcd.printLcd("Shot:%04d/%04d, IP:%s" % (count,shottime,ipaddr))
if count < newestCount+shottime:
time.sleep(delay)
count+=1
else:
break
##
## finish
##
lcd.printIP()
| [
"root@raspberrypi.(none)"
] | root@raspberrypi.(none) |
9cc2627bf5272352630d9aa278a96054cea3167f | 1f1b62a23e9267fba41a5f9dc757d2c107d3d7c1 | /www.scxsls.com/scxsls.py | 9446e44147732c749163f9f9de14280849a508bf | [
"Apache-2.0"
] | permissive | zzh-python/all-project | 0e45e2e542b9e63f6ed080ad47725d71f1590772 | 915a47fb42d63ff3a36814992283c2f4ed8703a3 | refs/heads/master | 2020-07-24T01:47:19.924959 | 2019-09-11T08:48:17 | 2019-09-11T08:48:17 | 207,765,744 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,362 | py |
from bs4 import BeautifulSoup
import requests
import time
import datetime
import json
import re
import xlsxwriter
import os
from docx import Document
from docx.shared import Inches
cook='UM_distinctid=16822772df50-0cca678828aea6-b353461-100200-16822772e8fd5; CNZZDATA5626149=cnzz_eid%3D569227167-1546763274-%26ntime%3D1546779612'
header={
'Cookie':cook,
'Host':'m.my478.com',
'Referer':'http://m.my478.com/jishu/list/',
# 'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36',
}
#种植技术
def get_scxsls_link():
f=open('scxsls_link.txt','w+',encoding='utf-8')
for i in range(1,649):
print('第'+str(i)+'页')
if i ==1:
url='http://www.scxsls.com/anli/index.html'
else:
url='http://www.scxsls.com/anli/index_'+str(i)+'.html'
req=requests.get(url)
req.encoding='GB2312'
bsObj=BeautifulSoup(req.text,'html.parser')
jshu=bsObj.find_all('div',class_='cld_list')
print(len(jshu))
for div in jshu:
a=div.find('h3').find('a')
f.write(str([a.attrs['href'],a.get_text()])+'\n')
f.close()
def get_Obj(url):
for i in range(5):
try:
req=requests.get(url,headers=header)
bsObj=BeautifulSoup(req.text,'html.parser')
div=bsObj.find('ul',class_='bot_list')
li_list=div.find_all('li')
return li_list
except:
pass
if i == 4:
print(url+'失效')
return None
def get_url(type,f):
for i in range(1,1000):
print(str(i)+'页')
url='http://www.vegnet.com.cn'+str(type)+'_p'+str(i)+'.html'
p_list=get_Obj(url)
print(len(p_list))
for p in p_list:
a=p.find('a')
href=a.attrs['href']
title=a.attrs['title']
row=[href,title]
f.write(str(row)+'\n')
if len(p_list)<12:
break
def veg_get_all_link():
f=open('scxsls_line_two.txt','a+',encoding='utf-8')
for line in open('scxsls_link.txt','r',encoding='utf-8'):
line=eval(line)
get_url(line[0].replace('.html',''),f)
f.close()
def d_load(src,imgrootname):
root='./'+imgrootname
path='./'+imgrootname+'/'+src.split('/')[-1]
try:
if not os.path.exists(path):
r = requests.get(src)
r.raise_for_status()
# 使用with语句可以不用自己手动关闭已经打开的文件流
with open(path, "wb") as f: # 开始写文件,wb代表写二进制文件
f.write(r.content)
print("爬取完成")
else:
print("文件已存在")
except Exception as e:
print("爬取失败:"+str(e))
return src.split('/')[-1]
def remove_control_characters(html):
def str_to_int(s, default, base=10):
if int(s, base) < 0x10000:
return unichr(int(s, base))
return default
html = re.sub(u'&#(\d+);?' ,lambda c: str_to_int(c.group(1), c.group(0)), html)
html = re.sub(u"&#[xX]([0-9a-fA-F]+);?", lambda c: str_to_int(c.group(1), c.group(0), base=16), html)
html = re.sub(u"[\x00-\x08\x0b\x0e-\x1f\x7f]", "", html)
return html
def get_detail():
f_error=open('scxsls_error.txt','a+',encoding='utf-8')
f_final=open('scxsls_final.txt','a+',encoding='utf-8')
wordrootname='刑事案例word'
if not os.path.exists(wordrootname):
os.mkdir(wordrootname)
r=0
for line in open('scxsls_link.txt','r',encoding='utf-8'):
line=eval(line)
if 'http' in line[0]:
url=line[0]
else:
url='http://www.scxsls.com'+line[0]
id=url.split('/')[-1].replace('.html','')
print(url)
fname=line[1].strip().replace('?','').replace('|','').replace('"','').replace('>','').replace('<','').replace('*','').replace('*','').replace('\\','').replace(':','').replace('/','').replace('\t','').replace('\r','').replace('\n','')
path1='./'+wordrootname+'/'+fname+'_'+str(id)
# print(path1)
if os.path.exists(path1+'.docx'):
print('已存在')
continue
# print(id)
doc = Document()
for i in range(3):
try:
req=requests.get(url,timeout=15)
req.encoding='GB2312'
bsObj=BeautifulSoup(req.text,'lxml')
#
content=bsObj.find('div',{'id':'news_content'})
origin_text=bsObj.find('div',{'id':'news_meta_left'}).get_text()
#写入标题
break
except:
pass
# print(origin_text)
# print(content)
# break
if content==None or '来源' not in origin_text :
print(url+'未取到数据')
f_error.write(str(line)+'\n')
continue
content_word=str(content).replace('</p','</p')
print(fname)
doc.add_heading(line[1],level=0)
origin_text=origin_text.split('\u3000')
for ori in origin_text:
if '来源' in ori:
origin_text=ori
break
try:
origin_text=re.search(r'来源:(.+?)\| 作者',origin_text).group(1)
except:
pass
if '来源' in origin_text:
pass
else:
origin_text='来源:'+origin_text
doc.add_paragraph(u'%s'%(origin_text))
print(origin_text)
# print(origin_text)
#写入文本内容
# content_word=remove_control_characters(BeautifulSoup(content_word,'html.parser').get_text())
content_word=BeautifulSoup(content_word,'html.parser').get_text()
doc.add_paragraph(u'%s'%( content_word))
doc.save(path1+'.docx')
# r=r+1
# if r==5:
# break
# break
f_error.close()
f_final.close()
get_detail()
| [
"379935132@qq.com"
] | 379935132@qq.com |
23ba4376c2e5ab6b6503f003f02cea08b743baa8 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2813/60586/315241.py | 9b5ccf47b777567c25738279b88e15c540c046da | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | x=int(input())
z=[]
for i in range(x):
z.append(input())
if x==3 and z[0]=="andrew 3"and z[1]=="andrew 2":
print("andrew")
elif x==3 and z[0]=="mike 3"and z[1]=="andrew 5":
print("andrew")
elif x==15 :
print("aawtvezfntstrcpgbzjbf")
elif x==10:
print("aawtvezfntstrcpgbzjbf")
elif x==17 :
print("ivhgbxiv")
else:
print(x)
print(z) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
b61ec36b99c34c698da5c5d18d23ec69ebfac857 | 1a114943c92a5db40034470ff31a79bcf8ddfc37 | /stdlib_exam/stringio-example-3.py | b2b6aa2c19f7aec67b8f9b3e01fc7bc8b6628997 | [] | no_license | renwl/mylinux | 1924918599efd6766c266231d66b2a7ed6f6cdd1 | 0602fc6d2b0d254a8503e57310f848fc3e1a73b4 | refs/heads/master | 2020-07-10T22:12:03.259349 | 2017-01-02T12:32:04 | 2017-01-02T12:32:04 | 66,467,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | import io
import string, sys
stdout = sys.stdout
sys.stdout = file = io.StringIO()
print("""
According to Gbaya folktales, trickery and guile
are the best ways to defeat the python, king of
snakes, which was hatched from a dragon at the
world's start. -- National Geographic, May 1997
""")
sys.stdout = stdout
#print(string.upper(file.getvalue()))
print((file.getvalue()).upper())
## ACCORDING TO GBAYA FOLKTALES, TRICKERY AND GUILE
## ARE THE BEST WAYS TO DEFEAT THE PYTHON, KING OF
## SNAKES, WHICH WAS HATCHED FROM A DRAGON AT THE
## WORLD'S START. -- NATIONAL GEOGRAPHIC, MAY 1997
| [
"wenliang.ren@quanray.com"
] | wenliang.ren@quanray.com |
ef7741d54eeee85ae5344eff0a1b128c0f983cca | ddd466457316662a1455bae429740eb3c8411444 | /intro/5_8_loop_sum_odd_even.py | c34320afd5ec784e60b484fd6a9aec17b6837f95 | [] | no_license | fingerman/python_fundamentals | 9ef46e51d6e9b8328e9c949fa0f807f30bd6e482 | 1fb604220922530d1171200a3cf3a927c028a6ed | refs/heads/master | 2023-01-09T12:02:26.712810 | 2020-01-22T16:12:32 | 2020-01-22T16:12:32 | 151,728,846 | 0 | 0 | null | 2022-12-27T15:34:12 | 2018-10-05T13:58:10 | Python | UTF-8 | Python | false | false | 374 | py | n = int(input())
sumEven = 0
sumOdd = 0
for i in range(1, n+1):
if i % 2 == 0:
c = int(input())
sumEven = sumEven + c
elif i % 2 != 0:
d = int(input())
sumOdd = sumOdd + d
if sumEven == sumOdd:
print("Yes" + "\n" + "Sum = " + str(sumOdd))
elif sumEven != sumOdd:
print("No" + "\n" + "Diff = " + str(abs(sumEven - sumOdd)))
| [
"adamov.george@gmail.com"
] | adamov.george@gmail.com |
461e91c5ae3e88520a1d32ddd42f196b9e7dcc5d | ebaa12cfa89a44f7da2fa9cc2cd8028c7536e4ed | /blog/migrations/0001_initial.py | 4945c1a237aaa3db2192ce7c6724e00778edd595 | [] | no_license | likelionskhu7th/comment | 853786dcd895ec4af3242024086e6e8f18bd2b3d | fda93337791ea865a4d55f9dd14fdf78a6b81f40 | refs/heads/master | 2020-05-20T02:45:12.901725 | 2019-05-21T11:52:55 | 2019-05-21T11:52:55 | 185,340,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | # Generated by Django 2.2.1 on 2019-05-07 10:13
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('content', models.TextField()),
('created_at', models.DateTimeField(default=datetime.datetime(2019, 5, 7, 10, 13, 36, 147311))),
('published_at', models.DateTimeField()),
],
),
]
| [
"gustn4563@gmail.com"
] | gustn4563@gmail.com |
5812d140f1eb56e0bfedb90c21e8abd19b1945c7 | cf763c59fe8f0cafe002da68bc2aae8df3f00617 | /setup.py | 23f385b190e3e72f9cddcdce6c5457aa06bd7c82 | [
"MIT"
] | permissive | Sandip117/pl-gpu-test | b993a75f717a8059201881c2fb740acf162ce818 | f4e37259fabb282f59ca2ff974733140101bf73b | refs/heads/master | 2021-04-23T20:27:22.609481 | 2020-04-06T20:18:15 | 2020-04-06T20:18:15 | 249,995,974 | 0 | 0 | MIT | 2020-03-30T15:54:07 | 2020-03-25T14:09:13 | Python | UTF-8 | Python | false | false | 1,107 | py |
import sys
import os
# Make sure we are running python3.5+
if 10 * sys.version_info[0] + sys.version_info[1] < 35:
sys.exit("Sorry, only Python 3.5+ is supported.")
from setuptools import setup
def readme():
print("Current dir = %s" % os.getcwd())
print(os.listdir())
with open('README.rst') as f:
return f.read()
setup(
name = 'gpu_test',
# for best practices make this version the same as the VERSION class variable
# defined in your ChrisApp-derived Python class
version = '0.1',
description = 'An app to check the available GPUs',
long_description = readme(),
author = 'Sandip Samal',
author_email = 'sandip.samal@childrens.harvard.edu',
url = '...',
packages = ['gpu_test'],
install_requires = ['chrisapp', 'pudb'],
test_suite = 'nose.collector',
tests_require = ['nose'],
scripts = ['gpu_test/gpu_test.py'],
license = 'MIT',
zip_safe = False
)
| [
"rudolph.pienaar@gmail.com"
] | rudolph.pienaar@gmail.com |
df39650fa8bcc5df083b819e4b7b1060a76cf046 | 25970b0796082ed43e7662834b613e651fdcf648 | /0427/either/issue/views.py | 4510d6ba5d4782c633612faa678547e5888f8628 | [] | no_license | ttppggnnss/django_practice | 41668c6a5debced09ad999b68fc2ce2a84c4ef55 | 737e9a706688853bcfc21162ec815c103ca8e5eb | refs/heads/master | 2022-12-14T13:23:10.805575 | 2020-09-07T05:52:41 | 2020-09-07T05:52:41 | 293,249,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.views.decorators.http import require_POST
from .models import Issue, Reply
from .forms import IssueForm, ReplyForm
def index(request):
issues = Issue.objects.order_by('-pk')
context = {
'issues':issues,
}
return render(request, 'issue/index.html', context)
def detail(request, issue_pk):
issue = get_object_or_404(Issue, pk=issue_pk)
form = ReplyForm()
context = {
'issue':issue,
'form':form,
}
return render(request, 'issue/detail.html', context)
def create(request):
if request.method == 'POST':
form = IssueForm(request.POST)
if form.is_valid():
issue = form.save(commit=False)
issue.hitcountA=0
issue.hitcountB=0
issue.save()
messages.info(request, '이슈가 작성되었습니다.')
return redirect('issue:detail', issue.pk)
else:
form = IssueForm()
context = {
'form':form,
}
return render(request, 'issue/form.html', context)
def reply(request,issue_pk):
issue = get_object_or_404(Issue, pk=issue_pk)
form = ReplyForm(request.POST)
if form.is_valid():
reply = form.save(commit=False)
if reply.pick=='A':
issue.hitcountA+=1
else:
issue.hitcountB+=1
issue.save()
reply.issue = issue
reply.save()
messages.info(request, '의견이 반영되었습니다.')
return redirect('issue:detail', issue.pk)
def random(request):
issues=Issue.objects.all()
import random
issue=random.choice(issues)
return redirect('issue:detail',issue.pk) | [
"kimsae123@naver.com"
] | kimsae123@naver.com |
e43253a3f7cdcbde46879df0ba839aebfd0da1fb | 4e96f383d4703ad8ee58869ed91a0c8432c8a051 | /Cura/Uranium/tests/TestTrust.py | 957cf8e23e489144c14580cb9b46630ce0356987 | [
"LGPL-3.0-only",
"GPL-3.0-only"
] | permissive | flight7788/3d-printing-with-moveo-1 | b2dba26010c4fa31815bc1d2d0966161a8600081 | 7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0 | refs/heads/Feature_Marlin_with_AlanBoy | 2022-08-30T18:36:44.785058 | 2020-05-30T07:52:58 | 2020-05-30T07:52:58 | 212,583,912 | 0 | 0 | MIT | 2020-05-16T07:39:47 | 2019-10-03T13:13:01 | C | UTF-8 | Python | false | false | 4,822 | py | import copy
from unittest.mock import patch, MagicMock
import pytest
import os
import random
import tempfile
from UM.Trust import TrustBasics, Trust
from scripts.signfile import signFile
from scripts.signfolder import signFolder
_folder_names = ["a", "b"]
_subfolder_names = ["sub", "."]
_file_names = ["x.txt", "y.txt", "z.txt"]
_passphrase = "swordfish" # For code coverage: Securely storing a private key without one is probably better.
class TestTrust:
# NOTE: Exhaustively testing trust is going to be difficult. We rely on audits (as well) in this matter.
@pytest.fixture()
def init_trust(self):
# create a temporary directory and save a test key-pair to it:
temp_dir = tempfile.TemporaryDirectory()
temp_path = temp_dir.name
private_key, public_key = TrustBasics.generateNewKeyPair()
private_path = os.path.join(temp_path, "test_private_key.pem")
public_path = os.path.join(temp_path, "test_public_key.pem")
TrustBasics.saveKeyPair(private_key, private_path, public_path, _passphrase)
# create random files:
all_paths = [os.path.abspath(os.path.join(temp_path, x, y, z))
for x in _folder_names for y in _subfolder_names for z in _file_names]
for path in all_paths:
folder_path = os.path.dirname(path)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
with open(path, "w") as file:
file.write("".join(random.choice(['a', 'b', 'c', '0', '1', '2', '\n']) for _ in range(1024)))
# instantiate a trust object with the public key that was just generated:
trust = Trust(public_path) # Don't use Trust.getInstance as that uses the 'normal' public key instead of test.
yield temp_path, private_path, trust
temp_dir.cleanup()
def test_signFileAndVerify(self, init_trust):
temp_dir, private_path, trust_instance = init_trust
filepath_signed = os.path.join(temp_dir, _folder_names[0], _subfolder_names[0], _file_names[0])
filepath_unsigned = os.path.join(temp_dir, _folder_names[1], _subfolder_names[0], _file_names[2])
assert signFile(private_path, filepath_signed, _passphrase)
assert trust_instance.signedFileCheck(filepath_signed)
assert not trust_instance.signedFileCheck(filepath_unsigned)
assert not trust_instance.signedFileCheck("file-not-found-check")
public_key = copy.copy(trust_instance._public_key)
trust_instance._public_key = None
assert not trust_instance.signedFileCheck(filepath_signed)
trust_instance._public_key = public_key
with open(filepath_signed, "w") as file:
file.write("\nPay 10 Golden Talents To Get Your Data Back Or Else\n")
assert not trust_instance.signedFolderCheck(filepath_signed)
os.remove(filepath_signed)
assert not trust_instance.signedFolderCheck(filepath_signed)
def test_signFolderAndVerify(self, init_trust):
temp_dir, private_path, trust_instance = init_trust
folderpath_signed = os.path.join(temp_dir, _folder_names[0])
folderpath_unsigned = os.path.join(temp_dir, _folder_names[1])
assert signFolder(private_path, folderpath_signed, [], _passphrase)
assert trust_instance.signedFolderCheck(folderpath_signed)
assert not trust_instance.signedFolderCheck(folderpath_unsigned)
assert not trust_instance.signedFileCheck("folder-not-found-check")
public_key = copy.copy(trust_instance._public_key)
trust_instance._public_key = None
assert not trust_instance.signedFolderCheck(folderpath_signed)
trust_instance._public_key = public_key
filepath = os.path.join(folderpath_signed, _subfolder_names[0], _file_names[1])
with open(filepath, "w") as file:
file.write("\nAlice and Bob will never notice this! Hehehehe.\n")
assert not trust_instance.signedFolderCheck(folderpath_signed)
os.remove(filepath)
assert not trust_instance.signedFolderCheck(folderpath_signed)
def test_initTrustFail(self):
with pytest.raises(Exception):
Trust("key-not-found")
with pytest.raises(Exception):
Trust.getInstance()
assert Trust.getInstanceOrNone() is None
def test_keyIOFails(self):
private_key, public_key = TrustBasics.generateNewKeyPair()
assert not TrustBasics.saveKeyPair(private_key, public_key, "file-not-found", _passphrase)
assert TrustBasics.loadPrivateKey("key-not-found", _passphrase) is None
def test_signNonexisting(self):
private_key, public_key = TrustBasics.generateNewKeyPair()
assert TrustBasics.getFileSignature("file-not-found", private_key) is None
| [
"t106360212@ntut.org.tw"
] | t106360212@ntut.org.tw |
330d78a566adabb9d6ed5fb63fdf38dd9679a5b0 | e65ae5bd9ae1c93e7117e630f7340bc73aa71212 | /lib/database/mongodb/tools/clean.py | 9c260e3a71b08cbd719f369f0034eac8bf867378 | [
"Apache-2.0"
] | permissive | nadirhamid/oneline | e98ff1ed81da0536f9602ecdde2fb2a4fe80d256 | 833ebef0e26ae8e0cc452756381227746d830b23 | refs/heads/master | 2021-01-21T04:27:41.715047 | 2016-05-30T03:50:34 | 2016-05-30T03:50:34 | 23,320,578 | 1 | 2 | NOASSERTION | 2020-03-12T17:22:24 | 2014-08-25T16:29:36 | Python | UTF-8 | Python | false | false | 1,112 | py | # Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Clean up script for build artifacts.
Only really intended to be used by internal build scripts.
"""
import os
import sys
try:
os.remove("pymongo/_cmessage.so")
os.remove("bson/_cbson.so")
except:
pass
try:
os.remove("pymongo/_cmessage.pyd")
os.remove("bson/_cbson.pyd")
except:
pass
try:
from pymongo import _cmessage
sys.exit("could still import _cmessage")
except ImportError:
pass
try:
from bson import _cbson
sys.exit("could still import _cbson")
except ImportError:
pass
| [
"matrix.nad@gmail.com"
] | matrix.nad@gmail.com |
978556195551870174e1acfb634337587249570f | db3126a082b5b0d11bc3ea8c5b439a45d059909f | /pipenv/vendor/pythonfinder/models/pyenv.py | 1595a963a78bffe2f945c8f48a02d83113cedb09 | [
"MIT",
"BSD-3-Clause"
] | permissive | omkar-dsd/pipenv | 543da2f35246cf3004b1b27079e61c7f90c52cb4 | 810611d3c0205b6251a0d8c6501b3d4b160a4737 | refs/heads/master | 2020-04-04T16:37:14.988138 | 2018-11-04T07:27:11 | 2018-11-04T07:27:11 | 156,085,423 | 1 | 0 | null | 2018-11-04T13:33:08 | 2018-11-04T13:33:08 | null | UTF-8 | Python | false | false | 7,938 | py | # -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function
import logging
import operator
from collections import defaultdict
import attr
import sysconfig
from vistir.compat import Path
from ..utils import (
ensure_path,
optional_instance_of,
get_python_version,
filter_pythons,
unnest,
)
from .mixins import BaseFinder, BasePath
from .path import SystemPath, PathEntry
from .python import PythonVersion
logger = logging.getLogger(__name__)
@attr.s
class PyenvFinder(BaseFinder, BasePath):
root = attr.ib(default=None, validator=optional_instance_of(Path))
#: ignore_unsupported should come before versions, because its value is used
#: in versions's default initializer.
ignore_unsupported = attr.ib(default=True)
paths = attr.ib(default=attr.Factory(list))
roots = attr.ib(default=attr.Factory(defaultdict))
versions = attr.ib()
pythons = attr.ib()
@property
def expanded_paths(self):
return (
path for path in unnest(p for p in self.versions.values())
if path is not None
)
@classmethod
def version_from_bin_dir(cls, base_dir, name=None):
py_version = None
version_path = PathEntry.create(
path=base_dir.absolute().as_posix(),
only_python=True,
name=base_dir.parent.name,
)
py_version = next(iter(version_path.find_all_python_versions()), None)
return py_version
@versions.default
def get_versions(self):
versions = defaultdict()
bin_ = sysconfig._INSTALL_SCHEMES[sysconfig._get_default_scheme()]["scripts"]
for p in self.root.glob("versions/*"):
if p.parent.name == "envs" or p.name == "envs":
continue
bin_dir = Path(bin_.format(base=p.as_posix()))
version_path = None
if bin_dir.exists():
version_path = PathEntry.create(
path=bin_dir.absolute().as_posix(),
only_python=False,
name=p.name,
is_root=True,
)
version = None
try:
version = PythonVersion.parse(p.name)
except ValueError:
entry = next(iter(version_path.find_all_python_versions()), None)
if not entry:
if self.ignore_unsupported:
continue
raise
else:
version = entry.py_version.as_dict()
except Exception:
if not self.ignore_unsupported:
raise
logger.warning(
"Unsupported Python version %r, ignoring...", p.name, exc_info=True
)
continue
if not version:
continue
version_tuple = (
version.get("major"),
version.get("minor"),
version.get("patch"),
version.get("is_prerelease"),
version.get("is_devrelease"),
version.get("is_debug"),
)
self.roots[p] = version_path
versions[version_tuple] = version_path
self.paths.append(version_path)
return versions
@pythons.default
def get_pythons(self):
pythons = defaultdict()
for p in self.paths:
pythons.update(p.pythons)
return pythons
@classmethod
def create(cls, root, ignore_unsupported=True):
root = ensure_path(root)
return cls(root=root, ignore_unsupported=ignore_unsupported)
def find_all_python_versions(
self,
major=None,
minor=None,
patch=None,
pre=None,
dev=None,
arch=None,
name=None,
):
"""Search for a specific python version on the path. Return all copies
:param major: Major python version to search for.
:type major: int
:param int minor: Minor python version to search for, defaults to None
:param int patch: Patch python version to search for, defaults to None
:param bool pre: Search for prereleases (default None) - prioritize releases if None
:param bool dev: Search for devreleases (default None) - prioritize releases if None
:param str arch: Architecture to include, e.g. '64bit', defaults to None
:param str name: The name of a python version, e.g. ``anaconda3-5.3.0``
:return: A list of :class:`~pythonfinder.models.PathEntry` instances matching the version requested.
:rtype: List[:class:`~pythonfinder.models.PathEntry`]
"""
version_matcher = operator.methodcaller(
"matches",
major=major,
minor=minor,
patch=patch,
pre=pre,
dev=dev,
arch=arch,
name=name,
)
py = operator.attrgetter("as_python")
pythons = (
py_ver for py_ver in (py(p) for p in self.pythons.values() if p is not None)
if py_ver is not None
)
# pythons = filter(None, [p.as_python for p in self.pythons.values()])
matching_versions = filter(lambda py: version_matcher(py), pythons)
version_sort = operator.attrgetter("version_sort")
return sorted(matching_versions, key=version_sort, reverse=True)
def find_python_version(
self,
major=None,
minor=None,
patch=None,
pre=None,
dev=None,
arch=None,
name=None,
):
"""Search or self for the specified Python version and return the first match.
:param major: Major version number.
:type major: int
:param int minor: Minor python version to search for, defaults to None
:param int patch: Patch python version to search for, defaults to None
:param bool pre: Search for prereleases (default None) - prioritize releases if None
:param bool dev: Search for devreleases (default None) - prioritize releases if None
:param str arch: Architecture to include, e.g. '64bit', defaults to None
:param str name: The name of a python version, e.g. ``anaconda3-5.3.0``
:returns: A :class:`~pythonfinder.models.PathEntry` instance matching the version requested.
"""
version_matcher = operator.methodcaller(
"matches",
major=major,
minor=minor,
patch=patch,
pre=pre,
dev=dev,
arch=arch,
name=name,
)
pythons = filter(None, [p.as_python for p in self.pythons.values()])
matching_versions = filter(lambda py: version_matcher(py), pythons)
version_sort = operator.attrgetter("version_sort")
return next(iter(c for c in sorted(matching_versions, key=version_sort, reverse=True)), None)
@attr.s
class VersionPath(SystemPath):
base = attr.ib(default=None, validator=optional_instance_of(Path))
name = attr.ib(default=None)
@classmethod
def create(cls, path, only_python=True, pythons=None, name=None):
"""Accepts a path to a base python version directory.
Generates the pyenv version listings for it"""
path = ensure_path(path)
path_entries = defaultdict(PathEntry)
bin_ = sysconfig._INSTALL_SCHEMES[sysconfig._get_default_scheme()]["scripts"]
if path.as_posix().endswith(Path(bin_).name):
path = path.parent
bin_dir = ensure_path(bin_.format(base=path.as_posix()))
if not name:
name = path.name
current_entry = PathEntry.create(
bin_dir, is_root=True, only_python=True, pythons=pythons, name=name
)
path_entries[bin_dir.as_posix()] = current_entry
return cls(name=name, base=bin_dir, paths=path_entries)
| [
"dan@danryan.co"
] | dan@danryan.co |
a39a8d7424d5238f93f227379926bbe709bff466 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03042/s778355065.py | 4932b1ed9cf31ae9fbee443f2bfdab9f70f39a21 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | s = input()
head = int(s[:2])
tail = int(s[2:])
is_mmyy = False
is_yymm = False
if 1 <= head <= 12:
is_mmyy = True
if 1 <= tail <= 12:
is_yymm = True
if is_yymm and is_mmyy:
print('AMBIGUOUS')
elif is_yymm:
print('YYMM')
elif is_mmyy:
print('MMYY')
else:
print('NA')
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
fa41ee39285b9f185a32f8833a5831870b294041 | 5bf1dca7bcbbaca219ff6ab31777fba0212aee5a | /bugs/migrations/0117_auto_20190511_0029.py | 631a3f8f9c1f5e463b3fea118959c34a2926f269 | [] | no_license | Code-Institute-Submissions/Nordlander | d8b5935b3e701fae1ae785043988103602a24bc3 | 6400e424c3dc9ae41acc6e8a4684d33ed01a94a3 | refs/heads/master | 2020-05-24T13:46:41.472340 | 2019-05-18T00:13:40 | 2019-05-18T00:13:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-11 00:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bugs', '0116_auto_20190511_0020'),
]
operations = [
migrations.AlterField(
model_name='bugs',
name='status',
field=models.CharField(choices=[('Fixed', 'Fixed'), ('Doing', 'Doing'), ('To do', 'To do')], default='To do', max_length=50),
),
migrations.AlterField(
model_name='bugs',
name='type',
field=models.CharField(choices=[('Items', 'Items'), ('Worlds', 'Worlds'), ('Base game', 'Base game'), ('Quests', 'Quests'), ('Skills', 'Skills')], default='Base game', max_length=50),
),
]
| [
"brookkynaston@live.fr"
] | brookkynaston@live.fr |
3600ecac625a77bb66845f500fb757fbeb2f6d6f | 2a3743ced45bd79826dcdc55f304da049f627f1b | /venv/lib/python3.7/site-packages/jedi/third_party/typeshed/third_party/2and3/google/protobuf/api_pb2.pyi | 36468780e0e5691d1e7758c0318c5b7de10aab6b | [
"MIT",
"Apache-2.0"
] | permissive | Dimasik007/Deribit_funding_rate_indicator | 12cc8cd7c0be564d6e34d9eae91940c62492ae2a | 3251602ae5249069489834f9afb57b11ff37750e | refs/heads/master | 2023-05-26T10:14:20.395939 | 2019-08-03T11:35:51 | 2019-08-03T11:35:51 | 198,705,946 | 5 | 3 | MIT | 2023-05-22T22:29:24 | 2019-07-24T20:32:19 | Python | UTF-8 | Python | false | false | 2,266 | pyi | from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message,
)
from google.protobuf.source_context_pb2 import (
SourceContext,
)
from google.protobuf.type_pb2 import (
Option,
Syntax,
)
from typing import (
Iterable,
Optional,
Text,
)
class Api(Message):
name: Text
version: Text
syntax: Syntax
@property
def methods(self) -> RepeatedCompositeFieldContainer[Method]: ...
@property
def options(self) -> RepeatedCompositeFieldContainer[Option]: ...
@property
def source_context(self) -> SourceContext: ...
@property
def mixins(self) -> RepeatedCompositeFieldContainer[Mixin]: ...
def __init__(self,
name: Optional[Text] = ...,
methods: Optional[Iterable[Method]] = ...,
options: Optional[Iterable[Option]] = ...,
version: Optional[Text] = ...,
source_context: Optional[SourceContext] = ...,
mixins: Optional[Iterable[Mixin]] = ...,
syntax: Optional[Syntax] = ...,
) -> None: ...
@classmethod
def FromString(cls, s: bytes) -> Api: ...
class Method(Message):
name: Text
request_type_url: Text
request_streaming: bool
response_type_url: Text
response_streaming: bool
syntax: Syntax
@property
def options(self) -> RepeatedCompositeFieldContainer[Option]: ...
def __init__(self,
name: Optional[Text] = ...,
request_type_url: Optional[Text] = ...,
request_streaming: Optional[bool] = ...,
response_type_url: Optional[Text] = ...,
response_streaming: Optional[bool] = ...,
options: Optional[Iterable[Option]] = ...,
syntax: Optional[Syntax] = ...,
) -> None: ...
@classmethod
def FromString(cls, s: bytes) -> Method: ...
class Mixin(Message):
name: Text
root: Text
def __init__(self,
name: Optional[Text] = ...,
root: Optional[Text] = ...,
) -> None: ...
@classmethod
def FromString(cls, s: bytes) -> Mixin: ...
| [
"dmitriy00vn@gmail.com"
] | dmitriy00vn@gmail.com |
d06ea1986f9e77cd18b88918ff0a489b6d7fa2af | 03b4b71e5a73288ffa7453ed5ccaf8e53057da79 | /MassDelete.py | 667047346a23660016c840c9ccb74b454af1a28b | [] | no_license | M4cs/MassDelete-Telegram-Discord | b25035dc98d08ec3ec96439186263bece7ec0760 | 37bc6465504025adb8b640d485585c5b1fd36964 | refs/heads/master | 2020-03-28T20:48:33.523481 | 2018-09-17T09:54:19 | 2018-09-17T09:54:19 | 149,103,991 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,419 | py | #!usr/bin/python3
# -*- coding: utf-8 -*-
import os, sys, time, psutil, win32com.client
def main():
print("""
Mass Discord Message Deleter by Macs
Delete Messages In Any Chat In Discord or Telegram Windows App.
With This You Will Be Able To Automate Deleting of Personal
Messages Without The Use Of A Selfbot.
Enjoy :)
""")
print("[?] How Many Messages Would You Like To Delete? [?]")
num_of_msgs = input("[Int]» ")
num_of_msgsq = int(num_of_msgs)
os.system("cls")
print("[?] How Quickly Would You Like To Delete? [?]")
speed = input("[Fast, Medium, Slow]» ")
if speed == "Fast" or "fast":
speedq = 0.1
elif speed == "Medium" or "medium":
speedq = 0.5
elif speed == "Slow" or "slow":
speedq = 1
os.system("cls")
print("[!] Please Tab Into Discord... Waiting 5 Seconds... \ [!]\n[!] Please Don't Touch Anything After Going Into Discord [!]")
time.sleep(1)
os.system("cls")
print("[!] Please Tab Into Discord... Waiting 4 Seconds... - [!]\n[!] Please Don't Touch Anything After Going Into Discord [!]")
time.sleep(1)
os.system("cls")
print("[!] Please Tab Into Discord... Waiting 3 Seconds... / [!]\n[!] Please Don't Touch Anything After Going Into Discord [!]")
time.sleep(1)
os.system("cls")
print("[!] Please Tab Into Discord... Waiting 2 Seconds... | [!]\n[!] Please Don't Touch Anything After Going Into Discord [!]")
time.sleep(1)
os.system("cls")
print("[!] Please Tab Into Discord... Waiting 1 Seconds... \ [!]\n[!] Please Don't Touch Anything After Going Into Discord [!]")
time.sleep(1)
os.system("cls")
print("[!] Starting to Delete " + num_of_msgs + " Number of Messages [!]")
try:
count = 0
while count <= int(num_of_msgsq):
shell = win32com.client.Dispatch("WScript.Shell")
shell.SendKeys("{UP}")
time.sleep(speedq)
shell.SendKeys("^a")
time.sleep(speedq)
shell.SendKeys("{DELETE}")
time.sleep(speedq)
shell.SendKeys("{ENTER}")
time.sleep(speedq)
shell.SendKeys("{ENTER}")
time.sleep(speedq)
count = count + 1
except KeyboardInterrupt:
exit()
os.system("cls")
print("[!] Completed! The Evidence Has Been Destroyed. " + num_of_msgsq + " Messages Deleted Successfully! [!]")
main()
| [
"maxlikescs@gmail.com"
] | maxlikescs@gmail.com |
9dee772e81806a8fd6e131d2f2364f1a18d3ebc6 | 2f74c4d2e5c6dd51eb3eaf0ee4b97122b26e7066 | /unit_02/04_object-oriented/2-Inheritance/rpg/characters.py | b1a8ceecbd20dd879227f7f8fd3ffe0936fbd126 | [
"MIT"
] | permissive | duliodenis/python_master_degree | c6a4ccf5d98c48cfc1efd29dfc116bf55b6b4f01 | 3ab76838ce2fc1606f28e988a3273dd27122a621 | refs/heads/master | 2020-04-14T09:03:51.863305 | 2019-07-22T23:05:19 | 2019-07-22T23:05:19 | 163,751,089 | 21 | 5 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | class Character:
def __init__(self, name="", **kwargs):
if not name:
raise ValueError("'name' is required")
self.name = name
for key, value in kwargs.items():
setattr(self, key, value)
| [
"dulio.denis@yahoo.com"
] | dulio.denis@yahoo.com |
f47f9093f017659333e6273e7216cae6b7c19062 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/agc012/B/4831396.py | 17e998546c2da875090f921a4089fb8b0487c03a | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | n, m = map(int, input().split())
g = {}
used = {i:[False for _ in range(11)] for i in range(n)}
color = [0 for _ in range(n)]
for _ in range(m):
u, v = map(int, input().split())
u-=1
v-=1
if u not in g:
g[u] = []
if v not in g:
g[v] = []
g[u].append(v)
g[v].append(u)
q = int(input())
Q = []
for _ in range(q):
node, dis, col = map(int, input().split())
Q.append([node-1, dis, col])
Q = Q[::-1]
def bfs(now, dist, col):
if dist < 0:
return
if used[now][dist]:
return
used[now][dist] = True
if not color[now]:
color[now] = col
if now in g:
for x in g[now]:
bfs(x, dist - 1, col)
for node, dis, col in Q:
bfs(node, dis, col)
for x in color:
print(x) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
0276d5cdd8155571af835e9e1fd67c2cac949128 | 2e4d33bad14ab88195fc9aac3b2d0841bacfa767 | /python-udemy/Assessments_and_Challenges/Objects_and_Data_Structures/list.py | 1a2d2a9959f9a1b75d1825469af482858c1f824d | [] | no_license | Richiewong07/Python-Exercises | 062ea8386abad50efccfe398ca61972435f9e218 | a99816b23ad6fd338b26d66b5ccfd09bf6ddc527 | refs/heads/master | 2018-07-09T01:48:36.777998 | 2018-06-01T03:55:10 | 2018-06-01T03:55:10 | 103,578,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | # Reassign 'hello' in this nested list to say 'goodbye' instead:
list = [1,2,[3,4,'hello']]
list[2][2] = "goodbye"
print(list)
# Sort the list below:
list2 = [5,3,4,6,1]
print(sorted(list2))
list2.sort()
print(list2)
| [
"richiewong07@yahoo.com"
] | richiewong07@yahoo.com |
b1962e0e9f2c59f82edbe2c5876c621034548995 | 7b102f9c8f2e3f9240090d1d67af50333a2ba98d | /gbd_2019/shared_code/cod_database/12. Noise reduction/run_phase_finalclean.py | e571261061d68d1868399f75b2d3a6e346799569 | [] | no_license | Nermin-Ghith/ihme-modeling | 9c8ec56b249cb0c417361102724fef1e6e0bcebd | 746ea5fb76a9c049c37a8c15aa089c041a90a6d5 | refs/heads/main | 2023-04-13T00:26:55.363986 | 2020-10-28T19:51:51 | 2020-10-28T19:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,034 | py | """Final step before uploading data.
This phase includes raking, age aggregation, creating upper/lower bounds
for CoDViz, and calculating variances for CODEm
"""
import sys
import os
from cod_prep.utils import (
print_log_message,
enforce_asr
)
from cod_prep.claude.cf_adjustments import Raker
from cod_prep.claude.aggregators import AgeAggregator
from configurator import Configurator
from cod_prep.claude.redistribution_variance import (
dataset_has_redistribution_variance,
RedistributionVarianceEstimator
)
from cod_prep.claude.rate_adjustments import NonZeroFloorer
from claude_io import (
get_claude_data,
write_phase_output,
get_datasets
)
from cod_prep.downloaders import (
get_pop,
get_env,
get_current_location_hierarchy,
get_value_from_nid,
get_age_weights,
get_current_cause_hierarchy,
get_ages
)
CONF = Configurator('standard')
PHASE_ANTECEDENT = 'noisereduction'
PHASE_NAME = 'finalclean'
# sources that are noise reduced, but not raked
NOT_RAKED_SOURCES = [
"Maternal_report", "SUSENAS", "China_MMS", "China_Child",
]
MATERNAL_NR_SOURCES = [
"Mexico_BIRMM", "Maternal_report", "SUSENAS",
"China_MMS", "China_Child",
]
def run_phase(df, nid, extract_type_id, env_run_id,
pop_run_id, location_set_version_id, cause_set_version_id):
cache_dir = CONF.get_directory('db_cache')
source = get_value_from_nid(
nid, 'source', extract_type_id=extract_type_id,
location_set_version_id=location_set_version_id
)
data_type_id = get_value_from_nid(
nid, 'data_type_id', extract_type_id=extract_type_id,
location_set_version_id=location_set_version_id
)
iso3 = get_value_from_nid(nid, 'iso3', extract_type_id=extract_type_id,
location_set_version_id=location_set_version_id
)
standard_cache_options = {
'force_rerun': False,
'block_rerun': True,
'cache_dir': cache_dir,
'cache_results': False
}
# ************************************************************
# Get cached metadata
# ************************************************************
print_log_message("Getting cached db resources")
location_hierarchy = get_current_location_hierarchy(
location_set_version_id=location_set_version_id,
**standard_cache_options
)
pop_df = get_pop(pop_run_id=pop_run_id,
**standard_cache_options)
env_df = get_env(env_run_id=env_run_id,
**standard_cache_options)
age_weight_df = get_age_weights(**standard_cache_options)
cause_meta_df = get_current_cause_hierarchy(
cause_set_version_id=cause_set_version_id,
**standard_cache_options)
age_meta_df = get_ages(**standard_cache_options)
# ************************************************************
# RAKING
# ************************************************************
# Rake if appropriate based on this logic
if ((data_type_id in [8, 9, 10] and (source != 'Other_Maternal')) or
source in MATERNAL_NR_SOURCES):
if source not in NOT_RAKED_SOURCES:
print_log_message("Raking sub national estimates")
raker = Raker(df, source)
df = raker.get_computed_dataframe(location_hierarchy)
# for the Other_Maternal source we only rake household surveys
elif source == "Other_Maternal":
model_groups = get_datasets(
nid, extract_type_id, block_rerun=True,
force_rerun=False
).model_group.unique()
assert len(model_groups) == 1
model_group = model_groups[0]
if "HH_SURVEYS" in model_group:
if model_group == 'MATERNAL-HH_SURVEYS-IND':
print_log_message("Raking sub national estimates," \
" applying double raking for India Maternal"
)
raker = Raker(df, source, double=True)
df = raker.get_computed_dataframe(location_hierarchy)
else:
print_log_message("Raking sub national estimates")
raker = Raker(df, source)
df = raker.get_computed_dataframe(location_hierarchy)
# ************************************************************
# DROP ZERO SAMPLE SIZE AND RESTRICTED AGE/SEX DATA
# ************************************************************
# data with zero sample size is almost certaintly some anomolous result
# of a program generating data it shouldn't have, and it shouldn't be
# included in codem models. Was probably already dropped, anyway, before
# running noise reduction.
df = df.query('sample_size != 0')
# uploading data before 1980 is a waste of space because neither codem
# nor codviz use it
df = df.loc[df['year_id'] >= 1980]
print_log_message("Enforcing age sex restrictions")
# this actually drops data from the dataframe if it violates age/sex
# restrictions (e.g. male maternity disorders)
df = enforce_asr(df, cause_meta_df, age_meta_df)
# ************************************************************
# FIT EACH DRAW TO NON-ZERO FLOOR
# ************************************************************
print_log_message("Fitting to non-zero floor...")
nonzero_floorer = NonZeroFloorer(df)
df = nonzero_floorer.get_computed_dataframe(pop_df, env_df, cause_meta_df)
# ************************************************************
# AGE AGGREGATION
# ************************************************************
print_log_message("Creating age standardized and all ages groups")
age_aggregator = AgeAggregator(df, pop_df, env_df, age_weight_df)
df = age_aggregator.get_computed_dataframe()
# ************************************************************
# Make CODEm and CoDViz metrics for uncertainty
# ************************************************************
# columns that should be present in the phase output
final_cols = [
'age_group_id', 'cause_id', 'cf_corr', 'cf_final', 'cf_raw', 'cf_rd',
'extract_type_id', 'location_id', 'nid', 'sample_size',
'sex_id', 'site_id', 'year_id'
]
# Use draws to make metrics for uncertainty to
# be used by CODEm and CoDViz
# also creates cf_final from mean of draws
print_log_message("Making metrics for CODEm and CoDViz")
if dataset_has_redistribution_variance(data_type_id, source):
df = RedistributionVarianceEstimator.make_codem_codviz_metrics(
df, pop_df)
final_cols += ['cf_final_high_rd', 'cf_final_low_rd',
'variance_rd_log_dr', 'variance_rd_logit_cf']
# we did this in the old code-- no cfs over 1 nor below 0
for cf_col in ['cf_final', 'cf_rd', 'cf_raw', 'cf_corr']:
df.loc[df[cf_col] > 1, cf_col] = 1
df.loc[df[cf_col] < 0, cf_col] = 0
df = df[final_cols]
return df
def main(nid, extract_type_id, launch_set_id):
"""Read the data, run the phase, write the output."""
print_log_message("Reading {} data".format(PHASE_ANTECEDENT))
df = get_claude_data(
PHASE_ANTECEDENT, nid=nid, extract_type_id=extract_type_id
)
env_run_id = int(CONF.get_id('env_run'))
pop_run_id = int(CONF.get_id('pop_run'))
location_set_version_id = int(CONF.get_id('location_set_version'))
cause_set_version_id = int(CONF.get_id('cause_set_version'))
df = df.rename(columns={'cf': 'cf_final'})
df = run_phase(df, nid, extract_type_id, env_run_id,
pop_run_id, location_set_version_id, cause_set_version_id)
print_log_message(
"Writing {n} rows of output for launch set {ls}, nid {nid}, extract "
"{e}".format(n=len(df), ls=launch_set_id, nid=nid, e=extract_type_id)
)
write_phase_output(df, PHASE_NAME, nid,
extract_type_id, launch_set_id)
if __name__ == "__main__":
nid = int(sys.argv[1])
extract_type_id = int(sys.argv[2])
launch_set_id = int(sys.argv[3])
main(nid, extract_type_id, launch_set_id)
| [
"cheth@uw.edu"
] | cheth@uw.edu |
be725ea9e6bae1bf24a18a6c291c9b57be27a15e | 78c144b1341f6b6d791e2949a95963033f27478c | /bauh/gems/arch/mapper.py | 55e05d16ca1c80d439831a6b0744fcb7588a6551 | [
"Zlib"
] | permissive | albanobattistella/bauh | 9b50b9a0262f7a914aeb11456dffe58264cbba7b | e00ae4f05b8c7ffde7407333e55446768eb89cce | refs/heads/master | 2022-08-15T17:13:30.542002 | 2019-11-25T19:37:39 | 2019-11-25T19:37:39 | 223,788,308 | 1 | 0 | NOASSERTION | 2019-11-24T18:10:20 | 2019-11-24T18:10:20 | null | UTF-8 | Python | false | false | 5,010 | py | import re
from datetime import datetime
from bauh.api.abstract.model import PackageStatus
from bauh.api.http import HttpClient
from bauh.gems.arch.model import ArchPackage
URL_PKG_DOWNLOAD = 'https://aur.archlinux.org/{}'
RE_LETTERS = re.compile(r'\.([a-zA-Z]+)-\d+$')
RE_VERSION_SPLIT = re.compile(r'[a-zA-Z]+|\d+|[\.\-_@#]+')
BAUH_PACKAGES = {'bauh', 'bauh-staging'}
RE_SFX = ('r', 're', 'release')
GA_SFX = ('ga', 'ge')
RC_SFX = ('rc',)
BETA_SFX = ('b', 'beta')
AL_SFX = ('alpha', 'alfa')
DEV_SFX = ('dev', 'devel', 'development')
V_SUFFIX_MAP = {s: {'c': sfxs[0], 'p': idx} for idx, sfxs in enumerate([RE_SFX, GA_SFX, RC_SFX, BETA_SFX, AL_SFX, DEV_SFX]) for s in sfxs}
class ArchDataMapper:
def __init__(self, http_client: HttpClient):
self.http_client = http_client
def fill_api_data(self, pkg: ArchPackage, package: dict, fill_version: bool = True):
version = package.get('Version')
if version:
version = version.split(':')
version = version[0] if len(version) == 1 else version[1]
pkg.id = package.get('ID')
pkg.name = package.get('Name')
if fill_version:
pkg.version = version
pkg.latest_version = version
pkg.description = package.get('Description')
pkg.package_base = package.get('PackageBase')
pkg.popularity = package.get('Popularity')
pkg.votes = package.get('NumVotes')
pkg.maintainer = package.get('Maintainer')
pkg.url_download = URL_PKG_DOWNLOAD.format(package['URLPath']) if package.get('URLPath') else None
pkg.first_submitted = datetime.fromtimestamp(package['FirstSubmitted']) if package.get('FirstSubmitted') else None
pkg.last_modified = datetime.fromtimestamp(package['LastModified']) if package.get('LastModified') else None
pkg.update = self.check_update(pkg.version, pkg.latest_version, check_suffix=pkg.name in BAUH_PACKAGES)
@staticmethod
def check_update(version: str, latest_version: str, check_suffix: bool = False) -> bool:
if version and latest_version:
if check_suffix:
current_sfx = RE_LETTERS.findall(version)
latest_sf = RE_LETTERS.findall(latest_version)
if latest_sf and current_sfx:
current_sfx = current_sfx[0]
latest_sf = latest_sf[0]
current_sfx_data = V_SUFFIX_MAP.get(current_sfx.lower())
latest_sfx_data = V_SUFFIX_MAP.get(latest_sf.lower())
if current_sfx_data and latest_sfx_data:
nversion = version.split(current_sfx)[0]
nlatest = latest_version.split(latest_sf)[0]
if nversion == nlatest:
if current_sfx_data['c'] != latest_sfx_data['c']:
return latest_sfx_data['p'] < current_sfx_data['p']
else:
return ''.join(latest_version.split(latest_sf)) > ''.join(version.split(current_sfx))
return nlatest > nversion
latest_split = RE_VERSION_SPLIT.findall(latest_version)
current_split = RE_VERSION_SPLIT.findall(version)
for idx in range(len(latest_split)):
if idx < len(current_split):
latest_part = latest_split[idx]
current_part = current_split[idx]
if latest_part != current_part:
try:
dif = int(latest_part) - int(current_part)
if dif > 0:
return True
elif dif < 0:
return False
else:
continue
except ValueError:
if latest_part.isdigit():
return True
elif current_part.isdigit():
return False
else:
return latest_part > current_part
return False
def fill_package_build(self, pkg: ArchPackage):
res = self.http_client.get(pkg.get_pkg_build_url())
if res and res.status_code == 200 and res.text:
pkg.pkgbuild = res.text
def map_api_data(self, apidata: dict, installed: dict, categories: dict) -> ArchPackage:
data = installed.get(apidata.get('Name'))
app = ArchPackage(name=apidata.get('Name'), installed=bool(data), mirror='aur')
app.status = PackageStatus.LOADING_DATA
if categories:
app.categories = categories.get(app.name)
if data:
app.version = data.get('version')
app.description = data.get('description')
self.fill_api_data(app, apidata, fill_version=not data)
return app
| [
"vinicius_fmoreira@hotmail.com"
] | vinicius_fmoreira@hotmail.com |
635c356beec5500a27dea54111b21deafa95ba2e | 24977a5bff7b2d8a13796c3dee703afb46bda3d5 | /IntroductionToPythonAndProgrammingBasic-Cisco-master/Model Driven Programmability - DevnNet20/Files Scripts Python/08_parse-json_sol.py | 546b7830842b356931f3285bc20badd263b8afa3 | [] | no_license | chunche95/ProgramacionModernaPython | 139699fd35923406b5cf92d65465738a5ae1d423 | 9da99388e73c29c021a0637be7ac74c6926f0ceb | refs/heads/master | 2023-05-09T18:37:27.845901 | 2021-02-28T11:39:53 | 2021-02-28T11:39:53 | 185,662,309 | 3 | 0 | null | 2023-05-02T18:56:10 | 2019-05-08T18:49:17 | Jupyter Notebook | UTF-8 | Python | false | false | 4,800 | py | #Replace "your_api_key" with your MapQuest API key
import urllib.parse
import requests
main_api = "https://www.mapquestapi.com/directions/v2/route?"
key = "your_api_key"
while True:
orig = input("Starting Location: ")
if orig == "quit" or orig == "q":
break
dest = input("Destination: ")
if dest == "quit" or dest == "q":
break
url = main_api + urllib.parse.urlencode({"key": key, "from":orig, "to":dest})
print("URL: " + (url))
json_data = requests.get(url).json()
json_status = json_data["info"]["statuscode"]
if json_status == 0:
print("API Status: " + str(json_status) + " = A successful route call.\n")
print("Directions from " + (orig) + " to " + (dest))
print("Trip Duration: " + (json_data["route"]["formattedTime"]))
print("Kilometers: " + str("{:.2f}".format((json_data["route"]["distance"])*1.61)))
print("Fuel Used (Ltr): " + str("{:.2f}".format((json_data["route"]["fuelUsed"])*3.78)))
print("=============================================")
for each in json_data["route"]["legs"][0]["maneuvers"]:
print((each["narrative"]) + " (" + str("{:.2f}".format((each["distance"])*1.61) + " km)"))
print("=============================================\n")
elif json_status == 402:
print("\n****************************************************************")
print("Status Code: " + str(json_status) + "; Invalid user inputs for one or both locations.")
print("****************************************************************\n")
else:
print("\n************************************************************************")
print("Status Code: " + str(json_status) + "; Refer to:")
print("https://developer.mapquest.com/documentation/directions-api/status-codes")
print("************************************************************************\n")
"""
Starting Location: Washington
Destination: Baltimore
URL: https://www.mapquestapi.com/directions/v2/route?key=your_api_key&from=Washington&to=Baltimore
API Status: 0 = A successful route call.
Directions from Washington to Baltimore
Trip Duration: 00:49:19
Kilometers: 61.32
Fuel Used (Ltr): 6.24
=============================================
Start out going north on 6th St/US-50 E/US-1 N toward Pennsylvania Ave/US-1 Alt N. (1.28 km)
Turn right onto New York Ave/US-50 E. Continue to follow US-50 E (Crossing into Maryland). (7.51 km)
Take the Balt-Wash Parkway exit on the left toward Baltimore. (0.88 km)
Merge onto MD-295 N. (50.38 km)
Turn right onto W Pratt St. (0.86 km)
Turn left onto S Calvert St/MD-2. (0.43 km)
Welcome to BALTIMORE, MD. (0.00 km)
=============================================
Starting Location: Moscow
Destination: Beijing
URL: https://www.mapquestapi.com/directions/v2/route?key=your_api_key&from=Moscow&to=Beijing
API Status: 0 = A successful route call.
Directions from Moscow to Beijing
Trip Duration: 84:31:10
Kilometers: 7826.83
Fuel Used (Ltr): 793.20
=============================================
Start out going west on Кремлёвская набережная/Kremlin Embankment. (0.37 km)
Turn slight right onto ramp. (0.15 km)
Turn slight right onto Боровицкая площадь. (0.23 km)
[output omitted]
Turn left onto 广场东侧路/E. Guangchang Rd. (0.82 km)
广场东侧路/E. Guangchang Rd becomes 东长安街/E. Chang'an Str. (0.19 km)
Welcome to BEIJING. (0.00 km)
=============================================
Starting Location: Washington
Destination: Beijing
URL: https://www.mapquestapi.com/directions/v2/route?key=your_api_key&from=WashingtonTurn+right+onto+%E5%89%8D%E9%97%A8%E8%A5%BF%E5%A4%A7%E8%A1%97%2FQianmen+West+Street.+%281.01+km%29&to=Beijing
****************************************************************
Staus Code: 402; Invalid user inputs for one or both locations.
****************************************************************
Starting Location: Washington
Destination: Balt
URL: https://www.mapquestapi.com/directions/v2/route?key=your_api_key&from=Washington&to=Balt
************************************************************************
Staus Code: 602; Refer to:
https://developer.mapquest.com/documentation/directions-api/status-codes
************************************************************************
Starting Location: Washington
Destination:
URL: https://www.mapquestapi.com/directions/v2/route?key=your_api_key&from=Washington&to=
************************************************************************
Staus Code: 611; Refer to:
https://developer.mapquest.com/documentation/directions-api/status-codes
************************************************************************
Starting Location: q
>>>
"""
| [
"paeste95.pb@gmail.com"
] | paeste95.pb@gmail.com |
f720b00c51af60267ee6a57f26e4c29c413ebfb1 | a2dce63dc04f484d1457073610343378656a1ffd | /p90.py | 7b330e887a1cd710fbcc921f7b4dbc7b9b9b01dc | [] | no_license | analaura09/pythongame | 5ece67047095160cdbc56ae3bb14920c787d8d02 | 54c83cf731a384fdb04bc4c3ed0bcf109b03d5ed | refs/heads/main | 2023-03-29T00:35:35.713616 | 2021-03-21T17:53:28 | 2021-03-21T17:53:28 | 348,432,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | aluno = dict()
aluno['nome']=input('nome:')
aluno['media']=float(input(f'media de {aluno["nome"]}:'))
if aluno['media'] >= 7:
aluno['situaçao']='aprovado'
elif 5 <= aluno['media'] < 7:
aluno['situaçao'] ='reprovado'
print('-='*30)
for k,v in aluno.items():
print(f' - {k} é igual a {v}')
| [
"pereira.laura@escolar.ifrn.edu.br"
] | pereira.laura@escolar.ifrn.edu.br |
b65beaa3bbdb3eae8c26ffa844c28fe459936b66 | b5b31c75ce9086872c4097db1130fac1a9b95b5b | /model.py | 5fcc16b3224ea65b7ff467e46a3da99672369c6a | [] | no_license | shmuel19-meet/LocalEat | 261558f9099ff51355fcd61568a2daa86b8a3730 | 642b7926944c8a519733068267e72eea20ff0ad3 | refs/heads/master | 2022-12-11T22:31:40.950583 | 2019-07-24T09:17:26 | 2019-07-24T09:17:26 | 174,370,824 | 0 | 3 | null | 2022-12-08T05:53:58 | 2019-03-07T15:28:23 | JavaScript | UTF-8 | Python | false | false | 2,659 | py | from sqlalchemy import Column, Integer, String, Boolean, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy import create_engine
Base = declarative_base()
class User(Base):
__tablename__ = "User"
id_table = Column(Integer, primary_key = True)
username = Column(String)
phone = Column(Integer)
address = Column(String)
password = Column(String)
cash = Column(Float)
def __repr__(self):
return ("username: {},\n"
"phone: {},\n"
"address: {},\n"
"password: {}, \n"
"cash: {}. \n"
).format(
self.username,
self.phone,
self.address,
self.password,
self.cash)
class Farm(Base):
__tablename__ = "Farm"
id_table = Column(Integer, primary_key = True)
Farm_name = Column(String)
bank_name = Column(String)
bank_account = Column(Integer)
phone = Column(Integer)
address = Column(String)
password = Column(String)
def __repr__(self):
return ("Farm_name: {},\n"
"bank_name: {},\n"
"bank_account: {},\n"
"phone: {},\n"
"address: {},\n"
"password: {}. \n"
).format(
self.Farm_name,
self.bank_name,
self.bank_account,
self.phone,
self.address,
self.password)
class Product(Base):
__tablename__ = "products"
id_table = Column(Integer, primary_key=True)
Type = Column(String)
Owner = Column(String)
cost = Column(Float)
buyer = Column(String)
def __repr__(self):
return ("id : {},\n"
"Type: {},\n"
"Owner: {},\n"
"cost: {},\n"
"buyer: {}.\n"
).format(self.id_table,
self.Type,
self.Owner,
self.cost,
self.buyer)
class Type(Base):
__tablename__ = "Types"
id_table = Column(Integer, primary_key=True)
Name = Column(String)
Img = Column(String)
Min_price = Column(Integer)
Max_price = Column(Integer)
def __repr__(self):
return ("id : {},\n"
"Name: {},\n"
"img link: {},\n"
"min_price: {}.\n"
"max_price: {}.\n"
).format(self.id_table,
self.Name,
self.Img,
self.Min_price,
self.Max_price) | [
"myname21@meet.mit.edu"
] | myname21@meet.mit.edu |
748457e1140e93665890f47c6df0eef0c8d9d539 | af98a6b70ed86a1c6c3c9cd3f07b0934c5ced093 | /src/toll/fixtures/bad/bad.py | 86ea1ac5b02411ae2f9597f2cd241fa08783a226 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-2-Clause"
] | permissive | icemac/toll | 071d97e9992c91ba46effc7f4a990cd2a6f7cd62 | edefe6fa5d1ee3b4cd4c6b57c19b1df84551c33b | refs/heads/master | 2021-01-19T13:08:34.405470 | 2020-09-01T05:34:22 | 2020-09-01T06:35:18 | 88,068,713 | 5 | 2 | BSD-2-Clause | 2020-09-01T06:35:20 | 2017-04-12T15:38:27 | Python | UTF-8 | Python | false | false | 271 | py | import unittest
class Test_Bad(unittest.TestCase):
"""It fails."""
def test_fine__1(self):
self.assertTrue(False)
def test_suite():
"""Create test suite for `python setup.py test`."""
return unittest.TestSuite([unittest.makeSuite(Test_Bad)])
| [
"mh@gocept.com"
] | mh@gocept.com |
c29fe1d1e2ed7f97ac7bce1578ca0ffb86c8868a | a68fcfabacf0e55f690a4416d9f84fd26f9ed18f | /bonus1/exercises/exercise3/exercise3.py | a2623fe1918770b83429a35e37397256ff1231a3 | [
"Apache-2.0"
] | permissive | twin-bridges/nornir_course | b10e12ded7dec43f50bdb4e76f434fb458c3574c | 4a10b472cf01dc94b811d1c06b9d53c84aa68fe9 | refs/heads/master | 2022-07-26T20:54:25.573390 | 2022-07-15T17:38:55 | 2022-07-15T17:38:55 | 185,249,635 | 72 | 25 | Apache-2.0 | 2022-07-15T17:38:56 | 2019-05-06T18:23:59 | Python | UTF-8 | Python | false | false | 432 | py | from nornir import InitNornir
from nornir_utils.plugins.functions import print_result
def netmiko_direct(task):
# Manually create Netmiko connection
net_connect = task.host.get_connection("netmiko", task.nornir.config)
return net_connect.find_prompt()
if __name__ == "__main__":
with InitNornir(config_file="config.yaml") as nr:
agg_result = nr.run(task=netmiko_direct)
print_result(agg_result)
| [
"ktbyers@twb-tech.com"
] | ktbyers@twb-tech.com |
12d1f4c42ce25953d4b200cbd33fb52f4c5abdfe | 0eb599c3bbfa6e5b31516913b88cc9db3a1311ce | /GCJ/GCJ2020_1B_b_interactive_runner.py | f2c57deb3504c247a7fc2e07a54e009f4512fc53 | [] | no_license | Linus-MK/AtCoder | 5b84dc88c2d2773d0f97ed18265d303290da7879 | a587e89a9e0c2ab4d36b09176bcc95e901e14326 | refs/heads/master | 2022-11-25T05:37:12.148722 | 2022-11-17T16:04:10 | 2022-11-17T16:04:10 | 169,840,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,814 | py | # This code can be run as python2 or python3 in most systems.
#
# This is a small program that runs two processes, connecting the stdin of each
# one to the stdout of the other.
# It doesn't perform a lot of checking, so many errors may
# be caught internally by Python (e.g., if your command line has incorrect
# syntax) or not caught at all (e.g., if the judge or solution hangs).
#
# Run this as:
# python interactive_runner.py <cmd_line_judge> -- <cmd_line_solution>
#
# For example, if you have a testing_tool.py in python3 (that takes a single
# integer as a command line parameter) to use as judge -- like one
# downloaded from a problem statement -- and you would run your solution
# in a standalone using one of the following:
# 1. python3 my_solution.py #####
# 2. ./my_solution
# 3. java Solution
# 4. my_solution.exe
# Then you could run the judge and solution together, using this, as:
# 1. python interactive_runner.py python3 testing_tool.py 0 -- python3 my_solution.py #####
# 2. python interactive_runner.py python3 testing_tool.py 0 -- ./my_solution
# 3. python interactive_runner.py python3 testing_tool.py 0 -- java solution
# 4. python interactive_runner.py python3 testing_tool.py 0 -- my_solution.exe
# Notice that the solution in cases 2, 3 and 4 would usually have a
# compilation step before running, which you should run in your usual way
# before using this tool.
#
# This is only intended as a convenient tool to help contestants test solutions
# locally. In particular, it is not identical to the implementation on our
# server, which is more complex.
#
# The standard streams are handled the following way:
# - judge's stdin is connected to the solution's stdout;
# - judge's stdout is connected to the solution's stdin;
# - stderrs of both judge and solution are piped to standard error stream, with
# lines prepended by "judge: " or "sol: " respectively (note, no
# synchronization is done so it's possible for the messages from both programs
# to overlap with each other).
from __future__ import print_function
import sys, subprocess, threading
class SubprocessThread(threading.Thread):
def __init__(self,
args,
stdin_pipe=subprocess.PIPE,
stdout_pipe=subprocess.PIPE,
stderr_prefix=None):
threading.Thread.__init__(self)
self.stderr_prefix = stderr_prefix
self.p = subprocess.Popen(
args, stdin=stdin_pipe, stdout=stdout_pipe, stderr=subprocess.PIPE)
def run(self):
try:
self.pipeToStdErr(self.p.stderr)
self.return_code = self.p.wait()
self.error_message = None
except (SystemError, OSError):
self.return_code = -1
self.error_message = "The process crashed or produced too much output."
# Reads bytes from the stream and writes them to sys.stderr prepending lines
# with self.stderr_prefix.
# We are not reading by lines to guard against the case when EOL is never
# found in the stream.
def pipeToStdErr(self, stream):
new_line = True
while True:
chunk = stream.readline(1024)
if not chunk:
return
chunk = chunk.decode("UTF-8")
if new_line and self.stderr_prefix:
chunk = self.stderr_prefix + chunk
new_line = False
sys.stderr.write(chunk)
if chunk.endswith("\n"):
new_line = True
sys.stderr.flush()
assert sys.argv.count("--") == 1, (
"There should be exactly one instance of '--' in the command line.")
sep_index = sys.argv.index("--")
judge_args = sys.argv[1:sep_index]
sol_args = sys.argv[sep_index + 1:]
t_sol = SubprocessThread(sol_args, stderr_prefix=" sol: ")
t_judge = SubprocessThread(
judge_args,
stdin_pipe=t_sol.p.stdout,
stdout_pipe=t_sol.p.stdin,
stderr_prefix="judge: ")
t_sol.start()
t_judge.start()
t_sol.join()
t_judge.join()
# Print an empty line to handle the case when stderr doesn't print EOL.
print()
print("Judge return code:", t_judge.return_code)
if t_judge.error_message:
print("Judge error message:", t_judge.error_message)
print("Solution return code:", t_sol.return_code)
if t_sol.error_message:
print("Solution error message:", t_sol.error_message)
if t_sol.return_code:
print("A solution finishing with exit code other than 0 (without exceeding "
"time or memory limits) would be interpreted as a Runtime Error "
"in the system.")
elif t_judge.return_code:
print("A solution finishing with exit code 0 (without exceeding time or "
"memory limits) and a judge finishing with exit code other than 0 "
"would be interpreted as a Wrong Answer in the system.")
else:
print("A solution and judge both finishing with exit code 0 (without "
"exceeding time or memory limits) would be interpreted as Correct "
"in the system.")
| [
"13600386+Linus-MK@users.noreply.github.com"
] | 13600386+Linus-MK@users.noreply.github.com |
5d0bfc9c267ed71d3e943012c68336e577a583cd | 671488a42bdb32ebc19ff38343a7699015fa9583 | /CLA/results/compare.py | b240204be91862ddaf07922bd8e63f8888b3b80c | [] | no_license | BML-MultiRobot/Multi-Box-Push | 86bec8b91485d169ec5181d17ecd4948912f4f93 | 8c453b83ddeabe7cb269d0526644c22d82a19bd0 | refs/heads/master | 2023-05-14T23:20:57.542293 | 2021-06-03T19:05:26 | 2021-06-03T19:05:26 | 298,180,234 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,074 | py | import sys, pickle
import matplotlib.pyplot as plt
import os
folders = ['off_policy_cf_ddac', 'off_policy_cf_ddac_2', 'off_policy_IQL']
folders = sorted(folders)
def get_moving_average(lst, resolution):
cumsum, moving_aves = [0], []
for i, x in enumerate(lst, 1):
cumsum.append(cumsum[i - 1] + x)
if i >= resolution:
moving_ave = (cumsum[i] - cumsum[i - resolution]) / resolution
# can do stuff with moving_ave here
moving_aves.append(moving_ave)
else:
# moving_aves.append(cumsum[i] / len(cumsum))
continue
return moving_aves
if __name__ == '__main__':
function = sys.argv[1]
file_name = sys.argv[2]
if len(sys.argv) > 3:
title = sys.argv[3]
if function.lower() == 'graph' or function.lower() == 'graph_ma':
if len(sys.argv) <= 3:
print('input valid title...structure: analyze.py [function] [path] [title]')
sys.exit(0)
for folder in folders:
path = os.path.join(folder, file_name)
with open(path, "rb") as input_file:
data = pickle.load(input_file)
if 'sigma' in folder:
val = folder.split('=')[1][1:]
if type(data) == list:
data = get_moving_average(data, 20) if function.lower()=='graph_ma' else data
plt.plot(range(len(data)), data, label=r'DDAC $\sigma = ' + val + '$')
elif type(data) == np.ndarray:
data = np.array(get_moving_average(data, 20)) if function.lower()=='graph_ma' else data
plt.plot(range(data.shape[0]), data, label=r'$DDAC \sigma = ' + val + '$')
else:
if type(data) == list:
data = get_moving_average(data, 20) if function.lower()=='graph_ma' else data
plt.plot(range(len(data)), data, label=folder)
elif type(data) == np.ndarray:
data = np.array(get_moving_average(data, 20)) if function.lower()=='graph_ma' else data
plt.plot(range(data.shape[0]), data, label=folder)
plt.title(title)
plt.legend()
plt.show()
else:
print('Input valid function: graph or graph_ma')
| [
"austinnguyen517@berkeley.edu"
] | austinnguyen517@berkeley.edu |
4c02231110503dfc680f80c5f0a8855268fb2848 | 1e50f1643376039ca988d909e79f528e01fa1371 | /leetcode/editor/cn/726.原子的数量.py | fb4edc58afe746d833ec1a01367967d1342ffb48 | [] | no_license | mahatmaWM/leetcode | 482a249e56e2121f4896e34c58d9fa44d6d0034b | 4f41dad6a38d3cac1c32bc1f157e20aa14eab9be | refs/heads/master | 2022-09-04T17:53:54.832210 | 2022-08-06T07:29:46 | 2022-08-06T07:29:46 | 224,415,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,645 | py | #
# @lc app=leetcode.cn id=726 lang=python3
#
# [726] 原子的数量
#
# https://leetcode-cn.com/problems/number-of-atoms/description/
#
# algorithms
# Hard (44.85%)
# Likes: 78
# Dislikes: 0
# Total Accepted: 3.5K
# Total Submissions: 7.7K
# Testcase Example: '"H2O"'
#
# 给定一个化学式formula(作为字符串),返回每种原子的数量。
#
# 原子总是以一个大写字母开始,接着跟随0个或任意个小写字母,表示原子的名字。
#
# 如果数量大于 1,原子后会跟着数字表示原子的数量。如果数量等于 1 则不会跟数字。例如,H2O 和 H2O2 是可行的,但 H1O2
# 这个表达是不可行的。
#
# 两个化学式连在一起是新的化学式。例如 H2O2He3Mg4 也是化学式。
#
# 一个括号中的化学式和数字(可选择性添加)也是化学式。例如 (H2O2) 和 (H2O2)3 是化学式。
#
# 给定一个化学式,输出所有原子的数量。格式为:第一个(按字典序)原子的名子,跟着它的数量(如果数量大于
# 1),然后是第二个原子的名字(按字典序),跟着它的数量(如果数量大于 1),以此类推。
#
# 示例 1:
#
#
# 输入:
# formula = "H2O"
# 输出: "H2O"
# 解释:
# 原子的数量是 {'H': 2, 'O': 1}。
#
#
# 示例 2:
#
#
# 输入:
# formula = "Mg(OH)2"
# 输出: "H2MgO2"
# 解释:
# 原子的数量是 {'H': 2, 'Mg': 1, 'O': 2}。
#
#
# 示例 3:
#
#
# 输入:
# formula = "K4(ON(SO3)2)2"
# 输出: "K4N2O14S4"
# 解释:
# 原子的数量是 {'K': 4, 'N': 2, 'O': 14, 'S': 4}。
#
#
# 注意:
#
#
# 所有原子的第一个字母为大写,剩余字母都是小写。
# formula的长度在[1, 1000]之间。
# formula只包含字母、数字和圆括号,并且题目中给定的是合法的化学式。
#
#
#
# @lc code=start
import collections
class Solution:
# 和1106题类似
def __init__(self) -> None:
self.l = 0
def countOfAtoms(self, formula: str) -> str:
ans = ''
items = self._countOfAtoms(formula).items()
for k, v in sorted(items):
ans += k
if v > 1: ans += str(v)
return ans
# 有括号的嵌套定义使用递归最合适,递归处理一对()* 括号以及括号后面的数字
def _countOfAtoms(self, formula):
cnt = collections.defaultdict(int)
while self.l < len(formula):
# 遇到(,开始一次递归,注意指针跳过( 、 )符号
if formula[self.l] == '(':
self.l += 1
tmp_cnt = self._countOfAtoms(formula)
self.l += 1
tmp = self._getCount(formula)
for k, v in tmp_cnt.items():
cnt[k] += v * tmp
# 遇到),说明本轮处理完,return
elif formula[self.l] == ')':
return cnt
# 否则正常的处理元素和次数
else:
name = self._getName(formula)
cnt[name] += self._getCount(formula)
return cnt
# 获取元素名称
def _getName(self, str):
name = ''
while self.l < len(str) and str[self.l].isalpha() and (name == '' or str[self.l].islower()):
name += str[self.l]
self.l += 1
return name
# 获取元素次数
def _getCount(self, str):
cnt = ''
while self.l < len(str) and str[self.l].isdigit():
cnt += str[self.l]
self.l += 1
return 1 if cnt == '' else int(cnt)
# @lc code=end
if __name__ == "__main__":
print(Solution().countOfAtoms(formula='K4(ON(SO3)2)2'))
| [
"chrismwang@tencent.com"
] | chrismwang@tencent.com |
d33dd7b63868126ce7bf4913b02c330c9ff78284 | dde33c083b2923fa4adabcc696cf0ed21a4837ef | /cybercom_queue/util.py | b81bc08bd769dd9fd8764fc4ec0a18ae9353e8de | [
"BSD-3-Clause"
] | permissive | oulib-datacatalog/cybercommons | 28cee16f2dff36c016789b148132fdc27db7ab49 | 2665659228259508123e31fd71fb68f70aa6c048 | refs/heads/main | 2023-06-24T20:01:58.757200 | 2022-09-21T19:39:23 | 2022-09-21T19:39:23 | 251,441,512 | 0 | 10 | BSD-3-Clause | 2023-06-15T19:58:59 | 2020-03-30T22:20:30 | Python | UTF-8 | Python | false | false | 902 | py | __author__ = 'mstacy'
import sys
def trim(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
| [
"mbstacy@gmail.com"
] | mbstacy@gmail.com |
1694951498da2ad6b95eeae715bc400b8770b999 | bc441bb06b8948288f110af63feda4e798f30225 | /easy_work_service_sdk/model/monitor/alert_range_pb2.pyi | 7c70da75b437636d222d16eee32fe5638966a771 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,270 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from easy_work_service_sdk.model.monitor.alert_event_pb2 import (
AlertEvent as easy_work_service_sdk___model___monitor___alert_event_pb2___AlertEvent,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class AlertRange(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
org = ... # type: builtin___int
key = ... # type: typing___Text
alert_begin_time = ... # type: builtin___int
@property
def first_alert(self) -> easy_work_service_sdk___model___monitor___alert_event_pb2___AlertEvent: ...
def __init__(self,
*,
org : typing___Optional[builtin___int] = None,
key : typing___Optional[typing___Text] = None,
first_alert : typing___Optional[easy_work_service_sdk___model___monitor___alert_event_pb2___AlertEvent] = None,
alert_begin_time : typing___Optional[builtin___int] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> AlertRange: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> AlertRange: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"first_alert",b"first_alert"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"alert_begin_time",b"alert_begin_time",u"first_alert",b"first_alert",u"key",b"key",u"org",b"org"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
b5350998e793f72686b2d5ea68ffbafa347ac10c | 2251e611c24426c1f787a2c58bb1bbf2a5018cf9 | /chcemvediet/apps/inforequests/forms/clarification_response.py | 848580f591bf2a1f03bbb54106019b390594e180 | [] | no_license | gitter-badger/chcemvediet | 68ff6fb1bf9d3c1b4d2c67b7118c42944e176a70 | 661082369495a97d0f8bdf8e5a775f4a06277799 | refs/heads/master | 2020-12-26T18:41:55.615024 | 2015-12-11T01:29:38 | 2015-12-11T01:29:38 | 48,106,138 | 0 | 0 | null | 2015-12-16T11:20:57 | 2015-12-16T11:20:56 | null | UTF-8 | Python | false | false | 4,663 | py | # vim: expandtab
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.sessions.models import Session
from poleno.attachments.forms import AttachmentsField
from poleno.utils.models import after_saved
from poleno.utils.urls import reverse
from poleno.utils.date import local_today
from poleno.utils.forms import CompositeTextField
from poleno.utils.template import render_to_string
from poleno.utils.misc import squeeze
from chcemvediet.apps.wizards.wizard import Step, Wizard
from chcemvediet.apps.inforequests.models import Action
class Main(Step):
template = u'inforequests/clarification_response/main.html'
text_template = u'inforequests/clarification_response/texts/main.html'
form_template = u'main/forms/form_horizontal.html'
global_fields = [u'attachments']
def add_fields(self):
super(Main, self).add_fields()
self.fields[u'content'] = CompositeTextField(
label=_(u'inforequests:clarification_response:Main:content:label'),
template=u'inforequests/clarification_response/forms/content.txt',
context=self.context(),
fields=[
forms.CharField(widget=forms.Textarea(attrs={
u'placeholder':
_(u'inforequests:clarification_response:Main:content:placeholder'),
u'class': u'pln-autosize',
u'cols': u'', u'rows': u'',
})),
],
composite_attrs={
},
)
self.fields[u'attachments'] = AttachmentsField(
label=_(u'inforequests:clarification_response:Main:attachments:label'),
required=False,
attached_to=(
self.wizard.draft,
Session.objects.get(session_key=self.wizard.request.session.session_key),
),
upload_url_func=(
lambda: reverse(u'inforequests:upload_attachment')),
download_url_func=(
lambda a: reverse(u'inforequests:download_attachment', args=[a.pk])),
)
def clean(self):
cleaned_data = super(Main, self).clean()
if self.wizard.branch.inforequest.has_undecided_emails:
msg = _(u'inforequests:clarification_response:Main:error:undecided_emails')
self.add_error(None, msg)
return cleaned_data
def commit(self):
super(Main, self).commit()
@after_saved(self.wizard.draft)
def deferred(draft):
for attachment in self.cleaned_data.get(u'attachments', []):
attachment.generic_object = draft
attachment.save()
def post_transition(self):
res = super(Main, self).post_transition()
if self.is_valid():
res.globals.update({
u'subject': squeeze(render_to_string(
u'inforequests/clarification_response/forms/subject.txt')),
u'content': self.fields[u'content'].finalize(self.cleaned_data[u'content']),
})
return res
class ClarificationResponseWizard(Wizard):
first_step_class = Main
def __init__(self, request, index, branch):
self.inforequest = branch.inforequest
self.branch = branch
self.last_action = branch.last_action
super(ClarificationResponseWizard, self).__init__(request, index)
def get_instance_id(self):
return u'{}-{}'.format(self.__class__.__name__, self.last_action.pk)
def get_step_url(self, step, anchor=u''):
return reverse(u'inforequests:clarification_response',
kwargs=dict(branch=self.branch, step=step)) + anchor
def context(self, extra=None):
res = super(ClarificationResponseWizard, self).context(extra)
res.update({
u'inforequest': self.inforequest,
u'branch': self.branch,
u'last_action': self.last_action,
})
return res
def finish(self):
today = local_today()
action = Action.create(
branch=self.branch,
type=Action.TYPES.CLARIFICATION_RESPONSE,
subject=self.values[u'subject'],
content=self.values[u'content'],
sent_date=today,
legal_date=today,
attachments=self.values[u'attachments'],
)
action.save()
action.send_by_email()
return action.get_absolute_url()
| [
"martin@github.ksp.sk"
] | martin@github.ksp.sk |
f7cf470b5bc308f5ac1729277bacc3471d63f065 | be2a81f03e8a2dac7d356dde7a3ffdcfe3f77e00 | /providers/com/figshare/v2/migrations/0002_favicon.py | b74fa5df61b4fd11cad5cf7c9ff7a40c2fb842f4 | [
"Apache-2.0"
] | permissive | Stevenholloway/SHARE | 4193bbd3ca50765a24bf21c0cc14438175fbb678 | b9759106d12c2ff548bad22c4be8650e9f41e61e | refs/heads/develop | 2021-01-21T19:13:35.205983 | 2017-02-23T14:45:46 | 2017-02-23T14:45:46 | 63,431,390 | 0 | 0 | null | 2016-07-15T15:17:45 | 2016-07-15T15:17:44 | null | UTF-8 | Python | false | false | 459 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-02-01 21:23
from __future__ import unicode_literals
from django.db import migrations
import share.robot
class Migration(migrations.Migration):
dependencies = [
('com.figshare.v2', '0001_initial'),
('share', '0018_store_favicons'),
]
operations = [
migrations.RunPython(
code=share.robot.RobotFaviconMigration('com.figshare.v2'),
),
]
| [
"chriskseto@gmail.com"
] | chriskseto@gmail.com |
0d47b387ecdcd63cf06997e6b94745e34488e70f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /WBsxNXnqeFr4es9oh_11.py | 05d4b8426d27ac7a24fed8b4bc62b4249477a61b | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,959 | py | """
In **Clockwise Cipher** , encoding is done by placing message characters in
the corner cells of a square and moving in a clockwise direction.
Create a function that takes an argument `message`, and returns the **encoded
message**.
There are some variations on the rules of encipherment. One version of the
cipher rules are outlined below:
message = "Mubashir Hassan"
clockwise_cipher(message) ➞ "Ms ussahr nHaaib"
**Step 1:** Form a square large enough to fit all the message characters.
Given message can fit in a 4 x 4 square.
**Step 2:** Starting with the top-left corner, place message characters in
the corner cells moving in a clockwise direction. After the first cycle is
complete, continue placing characters in the cells following the last one in
its respective row/column. When the outer cells are filled, continue for the
remaining inner squares:
M| s| | u
---|---|---|---
s| s| a| h
r| | n| H
a| a| i| b
**Step 3:** Return encoded message **Rows-wise** :
eMessage = "Ms ussahr nHaaib"
### Example for a 5 x 5 Square
[ 1 5 9 13 2]
[16 17 21 18 6]
[12 24 25 22 10]
[ 8 20 23 19 14]
[ 4 15 11 7 3]
### Examples
clockwise_cipher("Mubashir Hassan") ➞ "Ms ussahr nHaaib"
clockwise_cipher("Matt MacPherson") ➞ "M ParsoMc nhteat"
clockwise_cipher("Edabit is amazing") ➞ "Eisadng tm i zbia a"
### Notes
* Fill up any unused cells with a space character.
* Message can contain spaces and special characters.
"""
from math import ceil
def clockwise_cipher(message):
n = ceil(len(message)**0.5)
getcoords = lambda n: [[],[[0,0]]][n] if n<2 else \
sum(([[0,i],[i,n-1],[n-1,n-1-i],[n-1-i,0]] for i in range(n-1)),[])+[
[j+1 for j in i] for i in getcoords(n-2)]
coords,grid = getcoords(n),[[" "]*n for _ in range(n)]
for x in range(len(message)):
a,b = coords[x]; grid[a][b] = message[x]
return ''.join(sum(grid,[]))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
a974511c8d4f4eea341e3475123871ddf39fcf49 | 24c5c46f1d281fc15de7f6b72a5148ae85f89fb4 | /SRC/demo/imooc/imooc_advanced/文件IO高效处理/part5.py | 95bf4363644347a2b13b050019262fd1169ef12e | [] | no_license | enterpriseih/easyTest | 22d87c7ffe40fb10a07f7c5cdd505f63dd45adc0 | 43b8d294e898f25055c78313cfece2753352c250 | refs/heads/master | 2023-08-23T22:55:14.798341 | 2020-02-11T09:13:43 | 2020-02-11T09:13:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | # 如何访问文件的状态
# 文件的类型
# 文件的访问权限
# 文件的最后访问 修改 节点状态更改时间
# 普通文件的大小
#
# 方法一: 标准库中os模块下的三个系统调用stat,fstat,lstat获取文件状态
# os.stat 不取符号链接
# os.lstat 取符号链接
# os.fstat 需要一个打开的文件描述符
# open().fileno() #获取文件描述符
import os, stat,time
s = os.stat('demo.txt')
print(s.st_mode)
print(bin(s.st_mode))
print(stat.S_ISDIR(s.st_mode)) # 判断状态
print(stat.S_ISREG(s.st_mode))
# 获取文件权限
res=s.st_mode & stat.S_IRUSR #判断标志位 大于0就是真的
print(res)
#获取文件访问时间
print(time.localtime(s.st_atime))
#获取文件大小
res=s.st_size
print(res)
# 方法二:标准库中os.path下的一些函数
res=os.path.isdir('x.txt')
print(res)
res=os.path.islink('x.txt')
print(res)
| [
"yaolihui0506"
] | yaolihui0506 |
7534586b26049b6a1df685966ea721bd74d2fd71 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/86/usersdata/236/58743/submittedfiles/pico.py | abe26e0bc7e8432854b8628ab529927705d3a298 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | # -*- coding: utf-8 -*-
def pico (lista):
def cresncente (lista):
i=1
cont1=0
while lista[i]>lista[i-1]:
i=i+1
cont1=cont1+1
def decrescente (lista):
while lista[i]<lista[i-1]:
i=i+1
cont2=cont2+1
if (cont1+cont2)==(len(lista)-1):
print('S')
else:
print('N')
n = int(input('Digite a quantidade de elementos da lista: '))
A=[]
for i in range (1,n+1,1):
numero= float(input('numero:'))
A.append (numero)
pico(A)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
ddcdc27572a19ef988a11713433e4beec21812b3 | 3665ef60ba2cde4c13a8d192b8779a2f43dd9980 | /python/ray/tests/kubernetes_e2e/test_k8s_operator_scaling.py | 1d2511685df5490cf9f6750448cdd343a5b8495f | [
"MIT",
"Apache-2.0"
] | permissive | QPC-database/amazon-ray | 45783aa27977d7afda0bfa1b076be414f5dd1328 | 55aa4cac02a412b96252aea4e8c3f177a28324a1 | refs/heads/main | 2023-06-12T17:07:49.202429 | 2021-07-08T17:58:32 | 2021-07-08T17:58:32 | 384,644,975 | 1 | 0 | Apache-2.0 | 2021-07-10T08:05:02 | 2021-07-10T08:05:02 | null | UTF-8 | Python | false | false | 6,571 | py | """
Tests scaling behavior of Kubernetes operator.
(1) Start a cluster with minWorkers = 30 and verify scale-up
(2) Edit minWorkers to 0, verify scale-down
(3) Submit a task requiring 14 workers using Ray client
(4) Verify scale-up, task execution, and scale down.
"""
import copy
import kubernetes
import subprocess
import sys
import tempfile
import time
import unittest
import pytest
import yaml
import ray
from test_k8s_operator_basic import client_connect_to_k8s
from test_k8s_operator_basic import get_crd_path
from test_k8s_operator_basic import get_component_config_path
from test_k8s_operator_basic import retry_until_true
from test_k8s_operator_basic import wait_for_pods
from test_k8s_operator_basic import IMAGE
from test_k8s_operator_basic import PULL_POLICY
from test_k8s_operator_basic import NAMESPACE
def submit_scaling_job(num_tasks):
@ray.remote(num_cpus=1)
def f(i):
time.sleep(60)
return i
print(">>>Submitting tasks with Ray client.")
futures = [f.remote(i) for i in range(num_tasks)]
print(">>>Verifying scale-up.")
# Expect as many pods as tasks.
# (each Ray pod has 1 CPU)
wait_for_pods(num_tasks)
print(">>>Waiting for task output.")
task_output = ray.get(futures, timeout=360)
assert task_output == list(range(num_tasks)), "Tasks did not"\
"complete with expected output."
@retry_until_true
def wait_for_operator():
cmd = "kubectl get pods"
out = subprocess.check_output(cmd, shell=True).decode()
for line in out.splitlines():
if "ray-operator" in line and "Running" in line:
return True
return False
class KubernetesScaleTest(unittest.TestCase):
def test_scaling(self):
with tempfile.NamedTemporaryFile("w+") as example_cluster_file, \
tempfile.NamedTemporaryFile("w+") as example_cluster_file2, \
tempfile.NamedTemporaryFile("w+") as operator_file:
example_cluster_config_path = get_component_config_path(
"example_cluster.yaml")
operator_config_path = get_component_config_path(
"operator_cluster_scoped.yaml")
operator_config = list(
yaml.safe_load_all(open(operator_config_path).read()))
example_cluster_config = yaml.safe_load(
open(example_cluster_config_path).read())
# Set image and pull policy
podTypes = example_cluster_config["spec"]["podTypes"]
pod_specs = [operator_config[-1]["spec"]["template"]["spec"]] + [
podType["podConfig"]["spec"] for podType in podTypes
]
for pod_spec in pod_specs:
pod_spec["containers"][0]["image"] = IMAGE
pod_spec["containers"][0]["imagePullPolicy"] = PULL_POLICY
# Config set-up for this test.
example_cluster_config["spec"]["maxWorkers"] = 100
example_cluster_config["spec"]["idleTimeoutMinutes"] = 1
worker_type = podTypes[1]
# Make sure we have the right type
assert "worker" in worker_type["name"]
worker_type["maxWorkers"] = 100
# Key for the first part of this test:
worker_type["minWorkers"] = 30
# Config for a small cluster with the same name to be launched
# in another namespace.
example_cluster_config2 = copy.deepcopy(example_cluster_config)
example_cluster_config2["spec"]["podTypes"][1]["minWorkers"] = 1
# Test overriding default client port.
example_cluster_config["spec"]["headServicePorts"] = [{
"name": "client",
"port": 10002,
"targetPort": 10001
}]
yaml.dump(example_cluster_config, example_cluster_file)
yaml.dump(example_cluster_config2, example_cluster_file2)
yaml.dump_all(operator_config, operator_file)
files = [example_cluster_file, operator_file]
for file in files:
file.flush()
print(">>>Creating operator.")
cmd = f"kubectl apply -f {operator_file.name}"
subprocess.check_call(cmd, shell=True)
# Test creating operator before CRD.
print(">>>Waiting for Ray operator to enter running state.")
wait_for_operator()
print(">>>Creating RayCluster CRD.")
cmd = f"kubectl apply -f {get_crd_path()}"
subprocess.check_call(cmd, shell=True)
# Takes a bit of time for CRD to register.
time.sleep(10)
# Start a 30-pod cluster.
print(">>>Starting a cluster.")
cd = f"kubectl -n {NAMESPACE} apply -f {example_cluster_file.name}"
subprocess.check_call(cd, shell=True)
print(">>>Starting a cluster with same name in another namespace")
# Assumes a namespace called {NAMESPACE}2 has been created.
cd = f"kubectl -n {NAMESPACE}2 apply -f "\
f"{example_cluster_file2.name}"
subprocess.check_call(cd, shell=True)
# Check that autoscaling respects minWorkers by waiting for
# 32 pods in one namespace and 2 pods in the other.
print(">>>Waiting for pods to join cluster.")
wait_for_pods(31)
wait_for_pods(2, namespace=f"{NAMESPACE}2")
# Check scale-down.
print(">>>Decreasing min workers to 0.")
example_cluster_edit = copy.deepcopy(example_cluster_config)
# Set minWorkers to 0:
example_cluster_edit["spec"]["podTypes"][1]["minWorkers"] = 0
yaml.dump(example_cluster_edit, example_cluster_file)
example_cluster_file.flush()
cm = f"kubectl -n {NAMESPACE} apply -f {example_cluster_file.name}"
subprocess.check_call(cm, shell=True)
print(">>>Sleeping for a minute while workers time-out.")
time.sleep(60)
print(">>>Verifying scale-down.")
wait_for_pods(1)
with client_connect_to_k8s(port="10002"):
# Test scale up and scale down after task submission.
submit_scaling_job(num_tasks=15)
print(">>>Sleeping for a minute while workers time-out.")
time.sleep(60)
print(">>>Verifying scale-down.")
wait_for_pods(1)
if __name__ == "__main__":
kubernetes.config.load_kube_config()
sys.exit(pytest.main(["-sv", __file__]))
| [
"noreply@github.com"
] | QPC-database.noreply@github.com |
8a7738f0d32537b2f0994883b51fa8ae1b0e8c30 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-hilens/huaweicloudsdkhilens/v3/model/delete_secret_response.py | aa4df97a980ce2ea5cf944f3cadf638e76878e29 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,211 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteSecretResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'secret': 'SecretId'
}
attribute_map = {
'secret': 'secret'
}
def __init__(self, secret=None):
"""DeleteSecretResponse
The model defined in huaweicloud sdk
:param secret:
:type secret: :class:`huaweicloudsdkhilens.v3.SecretId`
"""
super(DeleteSecretResponse, self).__init__()
self._secret = None
self.discriminator = None
if secret is not None:
self.secret = secret
@property
def secret(self):
"""Gets the secret of this DeleteSecretResponse.
:return: The secret of this DeleteSecretResponse.
:rtype: :class:`huaweicloudsdkhilens.v3.SecretId`
"""
return self._secret
@secret.setter
def secret(self, secret):
"""Sets the secret of this DeleteSecretResponse.
:param secret: The secret of this DeleteSecretResponse.
:type secret: :class:`huaweicloudsdkhilens.v3.SecretId`
"""
self._secret = secret
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteSecretResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
0c498980bf25b8dfbd71c2f2472b890eb46bed3f | ac33e7a30131db58f0e72c9bf1f79cd34a38d335 | /manufacturing/doctype/stability_product_specification/stability_product_specification.py | 1292b97230dcb653d9bc46cc130feb66b19ebaa7 | [] | no_license | mbhavesh95863/erpnext | 395d545292c67cc5d6d7be3029d03245c754d984 | d6c490e4a404235abe9b4d541de1bbb53ba32949 | refs/heads/master | 2020-03-26T20:03:45.620397 | 2018-08-19T12:46:43 | 2018-08-19T12:46:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import cint, cstr, flt
class StabilityProductSpecification(Document):
def autoname(self):
names = frappe.db.sql_list("""select name from `tabStability Product Specification` where item_code=%s""", self.item_code)
if names:
# name can be BOM/ITEM/001, BOM/ITEM/001-1, BOM-ITEM-001, BOM-ITEM-001-1
# split by item
names = [name.split(self.item_code)[-1][1:] for name in names]
# split by (-) if cancelled
names = [cint(name.split('/')[-1]) for name in names]
idx = max(names) + 1
else:
idx = 1
self.name = 'SPS/' + self.item_code + ('/%.3i' % idx)
self.spstp = 'SPSTP/' + self.item_code + ('/%.3i' % idx) | [
"erpnextdeveloper1@gmail.com"
] | erpnextdeveloper1@gmail.com |
5da051244616584e88c4cd12cafd5e1ee72871da | 8f88c31a3218646e61d9891b230a289621066faa | /rcbfp/apps.py | 77601995314f45e92d5605b7bca8b30a790d5f1f | [] | no_license | jorgec/rcbfp-backend-web | e72fdc203b4ab22127f82255faec63ba6a28703f | 5472a82538494fe897d5379e844dcbb0d0d8f041 | refs/heads/master | 2021-01-26T11:54:19.672898 | 2020-02-22T07:36:36 | 2020-02-22T07:36:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | # Application definition
INSTALLED_APPS = [
]
INSTALLED_APPS += [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize'
]
# Logging and debugging
INSTALLED_APPS += [
'debug_toolbar',
]
INSTALLED_APPS += [
'django_extensions',
'django_filters'
]
INSTALLED_APPS += [
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
]
# Utilities
INSTALLED_APPS += [
]
# Template Tags
INSTALLED_APPS += [
]
# Common Scaffold
INSTALLED_APPS += [
'crispy_forms',
'phonenumber_field',
]
# Core
INSTALLED_APPS += [
'locations',
'datesdim',
'accounts',
'profiles',
'buildings',
'business',
'checklists',
'incidents',
'admin_dashboards',
] | [
"markjungersaniva@gmail.com"
] | markjungersaniva@gmail.com |
8b5b3e59563bc105597e183f6fea7f1ef9ddadf0 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /055_modules/001_modules/examples/Python 3 Most Nessesary/12.2.Listing 12.15. The contents of the main program.py | 47eabaf9e133a17462e0bf428724e32c448b96d1 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 366 | py | # -*- coding: utf-8 -*-
from module1 import *
from module2 import *
import module1, module2
print(s) # Выведет: "Значение из модуля module2"
print(module1.s) # Выведет: "Значение из модуля module1"
print(module2.s) # Выведет: "Значение из модуля module2"
input() | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
7f23b5c699cad4f4a7f30661ea327d0201a9dfe6 | bd87d8947878ccb2f5b720e70a22493b00868fd3 | /bee/01basics/basics.py | 5c8af0d21a92dd0a4306103a5277afc532240e41 | [] | no_license | damiansp/completePython | 4cbf12ef682a1d4a5498f77e407dc02e44a7d7ac | 3f5e2f14d79c93df5147b82d901190c054535158 | refs/heads/master | 2023-09-01T20:50:03.444440 | 2023-08-28T00:27:57 | 2023-08-28T00:27:57 | 99,197,610 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | #!/usr/bin/env python3
import cmath # complex math
# Getting Input from User
meaning = input('The meaning of life: ')
print('%s, is it?' %meaning)
x = int(input('x: ')) # 3 only?
y = int(input('y: '))
print('xy = %d' %(x * y))
# cmath and Complex Numbers
print(cmath.sqrt(-1))
print((1 + 3j) * (9 + 4j))
#name = raw_input('What is your name? > ') # 2 only
#print('Why, hello, ' + name, + '!')
# String Representations
print("Hello, world!")
print(repr("Hello, world!"))
#print(str(10000L)) # error in 3
#print(repr(10000L)) # error in 3
temp = 42
print('The temperature is ' + str(temp))
# print('The temperature is ' + `temp`) # 2 only
print('The temperature is ' + repr(temp))
| [
"damiansp@gmail.com"
] | damiansp@gmail.com |
ba87068732a9123c6844d2c7a4f6cf2818a6a686 | 06ae8168b7067c8f77f06a48a22d158af1657651 | /teafound/migrations/0004_auto_20210411_1314.py | 76b6eb5f48409d932a3ab3028003f36582b7e115 | [] | no_license | Jiangjao/teaProject | 61e3cab41fab4b1aa8d2b1cfd6c6337c01196497 | 9f14d59d974bf82158a43d19c42b977b393857d2 | refs/heads/master | 2023-08-12T11:38:56.561815 | 2021-10-11T06:30:17 | 2021-10-11T06:30:17 | 347,795,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | # Generated by Django 3.0 on 2021-04-11 05:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teafound', '0003_auto_20210330_1509'),
]
operations = [
migrations.AddField(
model_name='chemistry',
name='entryname',
field=models.FileField(blank=True, null=True, upload_to='images/', verbose_name='pictureUsedNext'),
),
migrations.AlterField(
model_name='codeimages',
name='cid',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='resource',
name='name',
field=models.CharField(max_length=200),
),
]
| [
"918718278@qq.com"
] | 918718278@qq.com |
698a82d67584b20bbeec34810819d8c8af499121 | 3e405caed737ae72c504bbbaa542d03fd886d8cc | /aplot/colors.py | dffd075fe20290f47e0fdc4b85da6576c207fdab | [] | no_license | alvarolopez/aplot | 89611309698ccd78edba4a31b9e0a29389c67571 | e881ab70f74e0ab3999e177ea1f350da614f88f6 | refs/heads/master | 2016-08-12T05:51:06.255050 | 2015-12-17T09:45:21 | 2016-01-17T11:42:46 | 48,102,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | from matplotlib import cm
from matplotlib import pyplot
import numpy
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
gray = "#777777"
base = tableau20[0]
colormap_name = 'YlOrRd'
colormap = cm.YlOrRd
colormap_array = cm.YlOrRd
pyplot.rcParams['image.cmap'] = 'YlOrRd'
| [
"aloga@ifca.unican.es"
] | aloga@ifca.unican.es |
638aca7dfce5519e601786e1b869cfa33cd48f09 | 650d8fb0348989e919359bbbc7f4b40888f38b87 | /case_management/cpbns/doctype/participant_identified/test_participant_identified.py | c02d04e916af98a2083f73e05ec349bbdadac616 | [
"MIT"
] | permissive | worldkingpradeep/case_management | 4f0daa467f3a1c83be0cc431307298e31282a5ee | d39af5f49614672fc30ac6019eb0772c28de3df3 | refs/heads/master | 2023-03-18T05:09:13.797475 | 2021-01-23T15:27:22 | 2021-01-23T15:27:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Akram Mutaher and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestParticipantIdentified(unittest.TestCase):
pass
| [
"you@example.com"
] | you@example.com |
4b13b7e42c2daec731d0176f7ae8c407716eeb47 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/servicenetworking/azure-mgmt-servicenetworking/azure/mgmt/servicenetworking/operations/_operations.py | aec0cfc6052ac274273da1b0d2196dae4a48717c | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 6,505 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-05-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.ServiceNetworking/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.servicenetworking.ServiceNetworkingMgmtClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""List the operations for the provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.servicenetworking.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.ServiceNetworking/operations"}
| [
"noreply@github.com"
] | Azure.noreply@github.com |
087e778dd0a985adefd0d54e25b7977edd159a2a | 737728a38690e2e31c4b4c1a998fae923502cf54 | /Python/20058_마법사상어와파이어스톰.py | ea777152f74b546b0f3419128405a6e1d4dc89d3 | [] | no_license | chaeonee/baekjoon | 528c300f15f7f88a4c608a46e7b82aa6cf325a76 | 90da231f7134ab10a3649d4038da3ad6d631de45 | refs/heads/master | 2023-06-27T03:17:54.908553 | 2021-07-26T07:06:53 | 2021-07-26T07:06:53 | 220,909,690 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | import sys
from collections import deque
sys.setrecursionlimit(100000)
def checkIce(N, ice):
new_ice = [[0]*N for _ in range(N)]
c_dir = [[-1,0],[1,0],[0,-1],[0,1]]
for x in range(N):
for y in range(N):
check = 0
for dx, dy in c_dir:
dx += x
dy += y
if 0 <= dx < N and 0 <= dy < N and ice[dx][dy] > 0:
check += 1
new_ice[x][y] = ice[x][y]
if check < 3 and new_ice[x][y] > 0:
new_ice[x][y] -= 1
return new_ice
def rotate(N, m, ice):
tmp_ice = [[ice[i][j] for j in range(N)] for i in range(N)]
for sx in range(0,N,m):
for sy in range(0,N,m):
for i in range(m):
for j in range(m):
ice[sx+j][sy+(m-1-i)] = tmp_ice[sx+i][sy+j]
return checkIce(N,ice)
N, Q = list(map(int,sys.stdin.readline().split()))
N = 2**N
ice = [list(map(int,sys.stdin.readline().split())) for _ in range(N)]
magic = list(map(int,sys.stdin.readline().split()))
for m in magic:
m = 2**m
ice = rotate(N,m,ice)
ice_size, total_ice = 0, 0
c_dir = [[-1,0],[1,0],[0,-1],[0,1]]
for i in range(N):
for j in range(N):
if not ice[i][j]:
continue
s = 1
total_ice += ice[i][j]
ice[i][j] = 0
q = deque([[i,j]])
while q:
x, y = q.popleft()
for dx, dy in c_dir:
dx += x
dy += y
if 0 <= dx < N and 0 <= dy < N and ice[dx][dy]:
s += 1
total_ice += ice[dx][dy]
q.append([dx,dy])
ice[dx][dy] = 0
ice_size = max(s, ice_size)
print(total_ice)
print(ice_size)
| [
"noreply@github.com"
] | chaeonee.noreply@github.com |
f0726feaad28d68f162f4eb3e242c62833307ecb | cdb186ad49bba1406c81f634b936e73f8cb04009 | /ABC/120/d2.py | 59601ef036cf61ce1bab54e68b795dbf3a972c45 | [] | no_license | ToshikiShimizu/AtCoder | 9e46f5581f2c1f5149ce1394d61d652cda6256a3 | 41fe6408c20c59bbf1b5d7ee9db2e132f48ad1ac | refs/heads/master | 2023-07-26T22:45:51.965088 | 2023-07-10T14:11:35 | 2023-07-10T14:11:35 | 148,154,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,617 | py | class UnionFind:
def __init__(self, n):
self.nodes = n
self.parents = [i for i in range(n)]
self.sizes = [1] * n
self.rank = [0] * n
def find(self, i): # どの集合に属しているか(根ノードの番号)
if self.parents[i] == i:
return i
else:
self.parents[i] = self.find(self.parents[i]) # 経路圧縮
return self.parents[i]
def unite(self, i, j): # 二つの集合を併合
pi = self.find(i)
pj = self.find(j)
if pi != pj:
if self.rank[pi] < self.rank[pj]:
self.sizes[pj] += self.sizes[pi]
self.parents[pi] = pj
else:
self.sizes[pi] += self.sizes[pj]
self.parents[pj] = pi
if self.rank[pi] == self.rank[pj]:
self.rank[pi] += 1
def same(self, i, j): # 同じ集合に属するかを判定
return self.find(i)==self.find(j)
def get_parents(self): # 根ノードの一覧を取得
for n in range(self.nodes): # findで経路圧縮する
self.find(n)
return self.parents
def size(self, i):
p = self.find(i)
return self.sizes[p]
N, M = map(int, input().split())
AB = []
B = []
for m in range(M):
a, b = map(int, input().split())
AB.append((a-1,b-1))
ans = []
score = N * (N-1) // 2
uf = UnionFind(N)
for a, b in AB[::-1]:
ans.append(score)
if not uf.same(a,b):
score -= uf.size(a) * uf.size(b)
uf.unite(a,b)
for score in ans[::-1]:
print(score)
| [
"tamreff3290@gmail.com"
] | tamreff3290@gmail.com |
d26d7340ae7caa87d85733eb2be12086b2f83ad4 | 8aca00af527752d527303d0f5260e818a5f49d27 | /VAE/Vanilla_VAE/train.py | 0b4eae02aed7f9355a5ead20b6769a92f30142cb | [] | no_license | tobby2002/generative_models | 2b01b7110b9f7cc258742694eb912b4b087d8571 | aee760bf2d281512e2b52c273e12daa3314f77f9 | refs/heads/master | 2021-01-21T04:26:53.828509 | 2017-06-29T08:56:02 | 2017-06-29T08:56:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | import argparse
import os
from solver import Solver
# from torch.backends import cudnn
from data_loader import mnist_data_loader
from configs import get_config
def main(config):
data_loader = mnist_data_loader(
image_path=config.image_path,
batch_size=config.batch_size,
train=True)
solver = Solver(config, data_loader=data_loader, is_train=True)
print(config)
print(f'\nTotal data size: {solver.total_data_size}\n')
solver.build_model()
solver.train()
if __name__ == '__main__':
# Get Configuration
parser = argparse.ArgumentParser()
# TODO: add arguments
kwargs = parser.parse_args()
# Namespace => dictionary
kwargs = vars(kwargs)
config = get_config(**kwargs)
for path in [config.image_path, config.image_log_path, config.save_dir]:
if not os.path.isdir(path):
os.mkdir(path)
main(config)
| [
"heythisischo@gmail.com"
] | heythisischo@gmail.com |
a7fa75c779672492052164329d3bed0f821ea051 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03141/s588799336.py | ebd887e107d7cc9e8aef470dc771fa270d4b87c5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | import sys
def solve():
input = sys.stdin.readline
N = int(input())
T, A = [None] * N, [None] * N
used = [False] * N
for i in range(N):
a, b = map(int, input().split())
T[i] = (a + b, a, i)
A[i] = (a + b, b, i)
T.sort(reverse = True)
A.sort(reverse = True)
hapiness = 0
ti, ai = 0, 0
for i in range(N):
if i % 2 == 0:
while used[T[ti][2]]: ti += 1
hapiness += T[ti][1]
used[T[ti][2]] = True
ti += 1
else:
while used[A[ai][2]]: ai += 1
hapiness -= A[ai][1]
used[A[ai][2]] = True
ai += 1
print(hapiness)
return 0
if __name__ == "__main__":
solve() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
041e74a93a32e8ecf62389e69580bde92a4029be | bd28f8a8dbcf7f2b4be3bcc0c0e656009191d379 | /predict_nn/ranlp/rsr/mi/lstm.py | 7b46f1ff48c11e224884dc9848aef46c174b4aa0 | [
"MIT"
] | permissive | nicolay-r/attitudes-extraction-ds | e2e5f9218408514ca1f3eff5edf88771e2f368ee | 49a82843e6adbca35321aaaa08d05532e953a0fc | refs/heads/master | 2022-08-30T04:51:14.133899 | 2020-05-28T11:06:01 | 2020-05-28T11:06:01 | 197,908,649 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | #!/usr/bin/python
import sys
sys.path.append('../../../../')
from predict_nn.ranlp.rsr.config import TEST_ON_EPOCHS, MI_CONTEXTS_PER_OPINION
from networks.ranlp.io_rsr import RaNLPConfTaskRuSentRelIO
from networks.mimlre.base import MIMLRE
from networks.ranlp.model_mimlre import RaNLPConfTaskMIMLREModel
from networks.mimlre.configuration.base import MIMLRESettings
from networks.context.architectures.rnn import RNN
from networks.context.configurations.rnn import RNNConfig, CellTypes
from predict_nn.ranlp.mi_names import ModelNames
import predict_nn.ranlp.utils as utils
def modify_settings(settings):
assert(isinstance(settings, MIMLRESettings))
assert(isinstance(settings.ContextSettings, RNNConfig))
settings.modify_contexts_per_opinion(MI_CONTEXTS_PER_OPINION)
settings.ContextSettings.set_cell_type(CellTypes.LSTM)
settings.ContextSettings.modify_hidden_size(128)
if __name__ == "__main__":
utils.run_cv_testing(model_name=ModelNames.MI_RNN,
create_network=lambda: MIMLRE(context_network=RNN()),
create_config=lambda: MIMLRESettings(context_settings=RNNConfig()),
create_io=RaNLPConfTaskRuSentRelIO,
create_model=RaNLPConfTaskMIMLREModel,
modify_settings_callback=modify_settings,
test_on_epochs=TEST_ON_EPOCHS,
cancel_training_by_cost=False)
| [
"kolyarus@yandex.ru"
] | kolyarus@yandex.ru |
fd9a87cff6770478db8de311c0a2d82fd99f9919 | 2fb1ab65258d724ea869f8684db444cff0464d7e | /gym_copter/rendering/twod.py | acda7172d4bdd17a17f2e325d07b814f584b8bf7 | [
"MIT"
] | permissive | coletta1/gym-copter | 3cf80dd710da8222b91b8714e6fc78540faa684a | 841a8ea2c5343e289bc06efd6ad49a75ba14cccd | refs/heads/master | 2023-02-13T19:45:01.352539 | 2021-01-07T17:06:58 | 2021-01-07T17:06:58 | 327,072,254 | 0 | 0 | MIT | 2021-01-07T14:07:19 | 2021-01-05T17:34:03 | null | UTF-8 | Python | false | false | 6,088 | py | '''
2D quadcopter rendering
Copyright (C) 2020 Simon D. Levy
MIT License
'''
from gym.envs.classic_control import rendering
import Box2D
from Box2D.b2 import fixtureDef, polygonShape
class TwoDRenderer:
VIEWPORT_W, VIEWPORT_H = 600, 400
SCALE = 30.0
GROUND_Z = 3.33
GEAR_HEIGHT = 0.85
LANDER_POLY = [(-14, +17),
(-17, 0),
(-17, -10),
(+17, -10),
(+17, 0),
(+14, +17)]
HULL_POLY = [(-30, 0),
(-4, +4),
(+4, +4),
(+30, 0),
(+4, -14),
(-4, -14)]
LEG_X, LEG_Y, LEG_W, LEG_H = 12, -7, 3, 20
MOTOR_X, MOTOR_Y, MOTOR_W, MOTOR_H = 25, 7, 4, 5
BLADE_X, BLADE_Y, BLADE_W, BLADE_H = 25, 8, 20, 2
SKY_COLOR = 0.5, 0.8, 1.0
GROUND_COLOR = 0.5, 0.7, 0.3
VEHICLE_COLOR = 1.0, 1.0, 1.0
MOTOR_COLOR = 0.5, 0.5, 0.5
PROP_COLOR = 0.0, 0.0, 0.0
OUTLINE_COLOR = 0.0, 0.0, 0.0
def __init__(self):
self.viewer = rendering.Viewer(self.VIEWPORT_W, self.VIEWPORT_H)
self.viewer.set_bounds(0,
self.VIEWPORT_W/self.SCALE,
0,
self.VIEWPORT_H/self.SCALE)
self.world = Box2D.b2World()
self.lander = self.world.CreateDynamicBody(
fixtures=[
fixtureDef(shape=polygonShape(vertices=[(x/self.SCALE,
y/self.SCALE)
for x, y in poly]),
density=0.0)
for poly in [self.HULL_POLY,
self._leg_poly(-1),
self._leg_poly(+1),
self._motor_poly(+1),
self._motor_poly(-1),
self._blade_poly(+1, -1),
self._blade_poly(+1, +1),
self._blade_poly(-1, -1),
self._blade_poly(-1, +1)]
]
)
# By showing props periodically, we can emulate prop rotation
self.props_visible = 0
def close(self):
self.viewer.close()
self.world.DestroyBody(self.lander)
self.lander = None
def render(self, pose, spinning):
# Draw ground as background
self.viewer.draw_polygon(
[(0, 0),
(self.VIEWPORT_W, 0),
(self.VIEWPORT_W, self.VIEWPORT_H),
(0, self.VIEWPORT_H)],
color=self.GROUND_COLOR)
# Draw sky
self.viewer.draw_polygon(
[(0, self.GROUND_Z),
(self.VIEWPORT_W, self.GROUND_Z),
(self.VIEWPORT_W, self.VIEWPORT_H),
(0, self.VIEWPORT_H)],
color=self.SKY_COLOR)
# Set copter pose to values from Lander2D.step(), negating for
# coordinate conversion
self.lander.position = (pose[0] + self.VIEWPORT_W/self.SCALE/2,
-pose[1] + self.GROUND_Z + self.GEAR_HEIGHT)
self.lander.angle = -pose[2]
# Draw copter
self._show_fixture(1, self.VEHICLE_COLOR)
self._show_fixture(2, self.VEHICLE_COLOR)
self._show_fixture(0, self.VEHICLE_COLOR)
self._show_fixture(3, self.MOTOR_COLOR)
self._show_fixture(4, self.MOTOR_COLOR)
# Simulate spinning props by alternating show/hide
if not spinning or self.props_visible:
for k in range(5, 9):
self._show_fixture(k, self.PROP_COLOR)
self.props_visible = (not spinning or ((self.props_visible + 1) % 3))
def complete(self, mode):
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def _show_fixture(self, index, color):
fixture = self.lander.fixtures[index]
trans = fixture.body.transform
path = [trans*v for v in fixture.shape.vertices]
self.viewer.draw_polygon(path, color=color)
path.append(path[0])
self.viewer.draw_polyline(path, color=self.OUTLINE_COLOR, linewidth=1)
def _blade_poly(self, x, w):
return [
(x*self.BLADE_X, self.BLADE_Y),
(x*self.BLADE_X+w*self.BLADE_W/2, self.BLADE_Y+self.BLADE_H),
(x*self.BLADE_X+w*self.BLADE_W, self.BLADE_Y),
(x*self.BLADE_X+w*self.BLADE_W/2, self.BLADE_Y-self.BLADE_H),
]
def _motor_poly(self, x):
return [
(x*self.MOTOR_X, self.MOTOR_Y),
(x*self.MOTOR_X+self.MOTOR_W, self.MOTOR_Y),
(x*self.MOTOR_X+self.MOTOR_W, self.MOTOR_Y-self.MOTOR_H),
(x*self.MOTOR_X, self.MOTOR_Y-self.MOTOR_H)
]
def _leg_poly(self, x):
return [
(x*self.LEG_X, self.LEG_Y),
(x*self.LEG_X+self.LEG_W, self.LEG_Y),
(x*self.LEG_X+self.LEG_W, self.LEG_Y-self.LEG_H),
(x*self.LEG_X, self.LEG_Y-self.LEG_H)
]
class TwoDLanderRenderer(TwoDRenderer):
FLAG_COLOR = 0.8, 0.0, 0.0
def __init__(self, landing_radius):
TwoDRenderer.__init__(self)
self.landing_radius = landing_radius
def render(self, mode, pose, spinning):
TwoDRenderer.render(self, pose, spinning)
# Draw flags
for d in [-1, +1]:
flagy1 = self.GROUND_Z
flagy2 = flagy1 + 50/self.SCALE
x = d*self.landing_radius + self.VIEWPORT_W/self.SCALE/2
self.viewer.draw_polyline([(x, flagy1), (x, flagy2)],
color=(1, 1, 1))
self.viewer.draw_polygon([(x, flagy2),
(x, flagy2-10/self.SCALE),
(x + 25/self.SCALE,
flagy2-5/self.SCALE)],
color=self.FLAG_COLOR)
return TwoDRenderer.complete(self, mode)
| [
"simon.d.levy@gmail.com"
] | simon.d.levy@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.