blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73aaac7076a35b1339df56a6ec97847718e5b576 | a9063fd669162d4ce0e1d6cd2e35974274851547 | /swagger_client/models/inline_response20037.py | 95fc2cf006de2311defe2c4535c4bd38e08daa3b | [] | no_license | rootalley/py-zoom-api | 9d29a8c750e110f7bd9b65ff7301af27e8518a3d | bfebf3aa7b714dcac78be7c0affb9050bbce8641 | refs/heads/master | 2022-11-07T14:09:59.134600 | 2020-06-20T18:13:50 | 2020-06-20T18:13:50 | 273,760,906 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,916 | py | # coding: utf-8
"""
Zoom API
The Zoom API allows developers to safely and securely access information from Zoom. You can use this API to build private services or public applications on the [Zoom App Marketplace](http://marketplace.zoom.us). To learn how to get your credentials and create private/public applications, read our [Authorization Guide](https://marketplace.zoom.us/docs/guides/authorization/credentials). All endpoints are available via `https` and are located at `api.zoom.us/v2/`. For instance you can list all users on an account via `https://api.zoom.us/v2/users/`. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: developersupport@zoom.us
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InlineResponse20037(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'next_page_token': 'str',
'page_count': 'int',
'page_size': 'int',
'participants': 'list[InlineResponse20037Participants]',
'total_records': 'int'
}
attribute_map = {
'next_page_token': 'next_page_token',
'page_count': 'page_count',
'page_size': 'page_size',
'participants': 'participants',
'total_records': 'total_records'
}
def __init__(self, next_page_token=None, page_count=None, page_size=None, participants=None, total_records=None): # noqa: E501
"""InlineResponse20037 - a model defined in Swagger""" # noqa: E501
self._next_page_token = None
self._page_count = None
self._page_size = None
self._participants = None
self._total_records = None
self.discriminator = None
if next_page_token is not None:
self.next_page_token = next_page_token
if page_count is not None:
self.page_count = page_count
if page_size is not None:
self.page_size = page_size
if participants is not None:
self.participants = participants
if total_records is not None:
self.total_records = total_records
@property
def next_page_token(self):
"""Gets the next_page_token of this InlineResponse20037. # noqa: E501
The next page token is used to paginate through large result sets. A next page token will be returned whenever the set of available results exceeds the current page size. The expiration period for this token is 15 minutes. # noqa: E501
:return: The next_page_token of this InlineResponse20037. # noqa: E501
:rtype: str
"""
return self._next_page_token
@next_page_token.setter
def next_page_token(self, next_page_token):
"""Sets the next_page_token of this InlineResponse20037.
The next page token is used to paginate through large result sets. A next page token will be returned whenever the set of available results exceeds the current page size. The expiration period for this token is 15 minutes. # noqa: E501
:param next_page_token: The next_page_token of this InlineResponse20037. # noqa: E501
:type: str
"""
self._next_page_token = next_page_token
@property
def page_count(self):
"""Gets the page_count of this InlineResponse20037. # noqa: E501
The number of pages returned for the request made. # noqa: E501
:return: The page_count of this InlineResponse20037. # noqa: E501
:rtype: int
"""
return self._page_count
@page_count.setter
def page_count(self, page_count):
"""Sets the page_count of this InlineResponse20037.
The number of pages returned for the request made. # noqa: E501
:param page_count: The page_count of this InlineResponse20037. # noqa: E501
:type: int
"""
self._page_count = page_count
@property
def page_size(self):
"""Gets the page_size of this InlineResponse20037. # noqa: E501
The number of records returned within a single API call. # noqa: E501
:return: The page_size of this InlineResponse20037. # noqa: E501
:rtype: int
"""
return self._page_size
@page_size.setter
def page_size(self, page_size):
"""Sets the page_size of this InlineResponse20037.
The number of records returned within a single API call. # noqa: E501
:param page_size: The page_size of this InlineResponse20037. # noqa: E501
:type: int
"""
self._page_size = page_size
@property
def participants(self):
"""Gets the participants of this InlineResponse20037. # noqa: E501
Array of meeting participant objects. # noqa: E501
:return: The participants of this InlineResponse20037. # noqa: E501
:rtype: list[InlineResponse20037Participants]
"""
return self._participants
@participants.setter
def participants(self, participants):
"""Sets the participants of this InlineResponse20037.
Array of meeting participant objects. # noqa: E501
:param participants: The participants of this InlineResponse20037. # noqa: E501
:type: list[InlineResponse20037Participants]
"""
self._participants = participants
@property
def total_records(self):
"""Gets the total_records of this InlineResponse20037. # noqa: E501
The number of all records available across pages. # noqa: E501
:return: The total_records of this InlineResponse20037. # noqa: E501
:rtype: int
"""
return self._total_records
@total_records.setter
def total_records(self, total_records):
"""Sets the total_records of this InlineResponse20037.
The number of all records available across pages. # noqa: E501
:param total_records: The total_records of this InlineResponse20037. # noqa: E501
:type: int
"""
self._total_records = total_records
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse20037, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse20037):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"github@rootalley.com"
] | github@rootalley.com |
473643508173d56ecf3fe8c14b5fb62600047a35 | 4f4ab46092c73b1cfb0a7730bb3ed59b829552a9 | /src/transitive_closure.py | 724181ea557f3b713347230a46afc1f6c3137049 | [
"Unlicense"
] | permissive | kemingy/daily-coding-problem | ab3dcab662593fa9ce15ef49a9105cf962514189 | 0839311ec0848f8f0b4a9edba817ecceb8f944a0 | refs/heads/master | 2020-03-27T06:05:52.028030 | 2019-08-07T03:32:33 | 2019-08-07T03:32:33 | 146,078,162 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | # The transitive closure of a graph is a measure of which vertices are reachable
# from other vertices. It can be represented as a matrix M, where M[i][j] == 1
# if there is a path between vertices i and j, and otherwise 0.
# For example, suppose we are given the following graph in adjacency list form:
# graph = [
# [0, 1, 3],
# [1, 2],
# [2],
# [3]
# ]
# The transitive closure of this graph would be:
# [1, 1, 1, 1]
# [0, 1, 1, 0]
# [0, 0, 1, 0]
# [0, 0, 0, 1]
# Given a graph, find its transitive closure.
def closure(graph):
n = len(graph)
matrix = [[0 for _ in range(n)] for row in range(n)]
for i in range(n):
for j in graph[i]:
matrix[i][j] = 1
for i in range(n-2, -1, -1):
path = [p for p in range(i+1, n) if matrix[i][p] == 1]
if not path:
continue
for j in range(i-1, -1, -1):
if matrix[j][i] == 1:
for p in path:
matrix[j][p] = 1
return matrix
if __name__ == '__main__':
graph = [
[0, 1, 3],
[1, 2],
[2,],
[3,],
]
print(closure(graph)) | [
"kemingy94@gmail.com"
] | kemingy94@gmail.com |
b92d0a536b3759e0b69960e134bad0a246f04b01 | 641f76328bfeb7e54f0793a18c5b7c00595b98fd | /packages/qcache/store/goods/migrations/0002_goods_version_timestamp.py | 0b0f87ec3a36bf092a87088fbcb5790ff2d77058 | [
"Apache-2.0"
] | permissive | lianxiaopang/camel-store-api | 1d16060af92eb01607757c0423377a8c94c3a726 | b8021250bf3d8cf7adc566deebdba55225148316 | refs/heads/master | 2020-12-29T13:23:18.118617 | 2020-02-09T08:38:53 | 2020-02-09T08:38:53 | 238,621,246 | 0 | 0 | Apache-2.0 | 2020-02-07T14:28:35 | 2020-02-06T06:17:47 | Python | UTF-8 | Python | false | false | 414 | py | # Generated by Django 2.2.7 on 2019-12-02 09:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='goods',
name='version_timestamp',
field=models.DateTimeField(auto_now=True, verbose_name='更新时间'),
),
]
| [
"lyh@gzqichang.com"
] | lyh@gzqichang.com |
8f91fa57271dc972b722d8279dc9b614fbfcc268 | baf3996414315ffb60470c40c7ad797bf4e6897f | /02_ai/1_ml/8_ensemble_learning/code/chapter_23/10_tune_learning_rate.py | 8d53a5f6b6e2f205074bced889b430ee25fd1526 | [
"MIT"
] | permissive | thiago-allue/portfolio | 8fbbecca7ce232567aebe97c19944f444508b7f4 | 0acd8253dc7c5150fef9b2d46eead3db83ca42de | refs/heads/main | 2023-03-15T22:10:21.109707 | 2022-09-14T17:04:35 | 2022-09-14T17:04:35 | 207,919,073 | 0 | 0 | null | 2019-11-13T18:18:23 | 2019-09-11T22:40:46 | Python | UTF-8 | Python | false | false | 1,580 | py | # explore xgboost learning rate effect on performance
from numpy import mean
from numpy import std
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from xgboost import XGBClassifier
from matplotlib import pyplot
# get the dataset
def get_dataset():
X, y = make_classification(n_samples=1000, n_features=20, n_informative=15, n_redundant=5, random_state=7)
return X, y
# get a list of models to evaluate
def get_models():
models = dict()
# define learning rates to explore
rates = [0.0001, 0.001, 0.01, 0.1, 1.0]
for r in rates:
key = '%.4f' % r
models[key] = XGBClassifier(eta=r)
return models
# evaluate a given model using cross-validation
def evaluate_model(model, X, y):
# define the evaluation procedure
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate the model and collect the results
scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
return scores
# define dataset
X, y = get_dataset()
# get the models to evaluate
models = get_models()
# evaluate the models and store results
results, names = list(), list()
for name, model in models.items():
# evaluate the model
scores = evaluate_model(model, X, y)
# store the results
results.append(scores)
names.append(name)
# summarize the performance along the way
print('>%s %.3f (%.3f)' % (name, mean(scores), std(scores)))
# plot model performance for comparison
pyplot.boxplot(results, labels=names, showmeans=True)
pyplot.show() | [
"thiago.allue@yahoo.com"
] | thiago.allue@yahoo.com |
5ca887c236dfdd6747f88e3232427442936668e0 | 3d6bb3df9ca1d0de6f749b927531de0790aa2e1d | /calibrate_CN_histograms.py | f60c6b0dedfab4fd51811d34d1ef3f915ca51ac7 | [] | no_license | standardgalactic/kuhner-python | da1d66a6d638a9a379ba6bae2affdf151f8c27c5 | 30b73554cc8bc9d532c8108b34dd1a056596fec7 | refs/heads/master | 2023-07-07T04:18:30.634268 | 2020-04-06T04:37:48 | 2020-04-06T04:37:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,893 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 7 10:33:59 2016
@author: lpsmith
"""
from __future__ import division
import lucianSNPLibrary as lsl
from os import walk
nsamples_min = 21 #Arbitrary value: minimum number of samples we require
nsamples_max = 100000000
binwidth = 0.001
#indir = "CN_calc_log2rs/"
#outdir = "CN_smoothed_histograms/"
indir = "CN_calc_log2rs_rejoined/"
outdir = "CN_rejoined_histograms/"
srange = ""
if (nsamples_max > 0):
srange = "_" + str(nsamples_min) + "-" + str(nsamples_max)
# read the filtered data that compares Xiaohong's segmentation data with raw SNP data
#filenames = ["954_20016_avglog2rs.txt", "1049_20782_avglog2rs.txt"]
filenames = []
for (_, _, f) in walk(indir):
filenames += f
break
doubled = [[141, 21060], [141, 21062], [141, 21064], [163, 19208], [163, 19214], [194, 19868], [194, 19880], [450, 18974], [450, 18982], [512, 18744], [512, 18746], [512, 18748], [512, 18750], [512, 18762], [660, 19260], [660, 19262], [660, 19264], [660, 19272], [664, 19954], [772, 18944], [772, 18946], [848, 18794], [884, 20354], [884, 20358], [954, 20014], [954, 20016], [954, 20018], [991, 20600], [997, 20656], [997, 20666], [997, 20668], [997, 20672], [997, 20674], [1006, 21104], [1044, 20856], [1044, 20864], [997, 20658], [997, 20660], [997, 20662], [660, 19266], [660, 19270], [997, 20664], [740, 20000]]
#new_doubled = [[141, 21062], [163, 19208], [163, 19214], [194, 19868], [509, 19000], [512, 18748], [512, 18762], [660, 19260], [660, 19262], [660, 19264], [660, 19272], [664, 19954], [772, 18944]]
#doubled += new_doubled
#rejected_doubles = []
#doubled += rejected_doubles
all_data = []
double_loss_data = []
double_loss_from_doubled_data = []
loss_data = []
loss_from_doubled_data = []
wt_data = []
gain_data = []
balanced_gain_data = []
for filename in filenames:
if (filename.find(".txt") == -1):
continue
split= filename.split("_")
if (len(split) < 3):
continue
patient = split[0]
sample = split[1]
if (patient[0] < '0' or patient[0] > '9'):
continue
patient = int(patient)
sample = int(sample)
file = open(indir + filename, "r")
total_n = 0
sample_data = []
for line in file:
(chr, start, end, x_log2r, call, n_log2r, avg_log2r, stdev) = line.rstrip().split()
if (chr == "chr"):
continue
chr = int(chr)
if (chr >= 23):
continue
n_log2r = int(n_log2r)
if (n_log2r < nsamples_min):
continue
if (nsamples_max > 0 and n_log2r > nsamples_max):
continue
total_n += n_log2r
avg_log2r = float(avg_log2r)
all_data.append(avg_log2r)
sample_data.append(avg_log2r)
if (call == "Double_d"):
if ([patient, sample] in doubled):
double_loss_from_doubled_data.append(avg_log2r)
else:
double_loss_data.append(avg_log2r)
elif (call == "Loss"):
if ([patient, sample] in doubled):
loss_from_doubled_data.append(avg_log2r)
else:
loss_data.append(avg_log2r)
#loss_data.append(avg_log2r)
elif (call == "wt"):
wt_data.append(avg_log2r)
elif (call == "Gain"):
gain_data.append(avg_log2r)
elif (call == "Balanced_gain"):
balanced_gain_data.append(avg_log2r)
else:
print "Unknown call ", call
lsl.createPrintAndSaveHistogram(double_loss_from_doubled_data, outdir + str(patient) + "_" + str(sample) + "_smoothhist.txt", binwidth, show=False)
print "Double-loss from doubled genomes histogram:"
lsl.createPrintAndSaveHistogram(double_loss_from_doubled_data, outdir + "double_loss_from_doubled_hist" + srange + ".txt", binwidth, axis=(-3.5, 1.5, 0))
print "Loss from doubled genomes histogram:"
lsl.createPrintAndSaveHistogram(loss_from_doubled_data, outdir + "loss_from_doubled_hist" + srange + ".txt", binwidth, axis=(-3.5, 1.5, 0))
print "Double-loss histogram:"
lsl.createPrintAndSaveHistogram(double_loss_data, outdir + "double_loss" + srange + ".txt", binwidth, axis=(-3.5, 1.5, 0))
print "Loss histogram:"
lsl.createPrintAndSaveHistogram(loss_data, outdir + "loss" + srange + ".txt", binwidth, axis=(-3.5, 1.5, 0))
print "WT histogram:"
lsl.createPrintAndSaveHistogram(wt_data, outdir + "wt_hist" + srange + ".txt", binwidth, axis=(-3.5, 1.5, 0))
print "Balanced gain histogram:"
lsl.createPrintAndSaveHistogram(balanced_gain_data,outdir + "balanced_gain_hist" + srange + ".txt", binwidth, axis=(-3.5, 1.5, 0))
print "Gain histogram:"
lsl.createPrintAndSaveHistogram(gain_data,outdir + "gain_hist" + srange + ".txt", binwidth, axis=(-3.5, 1.5, 0))
print "All data histogram:"
lsl.createPrintAndSaveHistogram(all_data,outdir + "all_hist" + srange + ".txt", binwidth, axis=(-3.5, 1.5, 0))
| [
"lpsmith@uw.edu"
] | lpsmith@uw.edu |
f3831fe26da6c678dc2e49829f78175589d64d94 | 37d8802ecca37cc003053c2175f945a501822c82 | /10-贪心算法/0455.py | abdf70825a17b0ff77b8e07df26ed88d679067fd | [
"Apache-2.0"
] | permissive | Sytx74/LeetCode-Solution-Python | cc0f51e31a58d605fe65b88583eedfcfd7461658 | b484ae4c4e9f9186232e31f2de11720aebb42968 | refs/heads/master | 2020-07-04T18:17:24.781640 | 2019-07-30T03:34:19 | 2019-07-30T03:34:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | class Solution:
def findContentChildren(self, g: List[int], s: List[int]) -> int:
g.sort() # [2,3]
s.sort() # [1,2,3]
# 如果小的饼干满足不了贪心指数最小的小朋友,就放弃这个饼干
gi = 0
si = 0
res = 0
while gi < len(g) and si < len(s):
if s[si] >= g[gi]:
si += 1
gi += 1
res += 1
else:
si += 1
return res
| [
"121088825@qq.com"
] | 121088825@qq.com |
8e9b01a78a6b4215a07315e90ff3abf14419c6af | fed019ab105b513e935fc9f3f3119db5c212c565 | /algorithm/community_detection.py | 77676a894fcbe29355151a013e612afea50e7a2e | [] | no_license | superf2t/ringnet | d7194d31c22f7d5d1dd1aa46d62fca5c34a6f566 | 322942f47bdf27728bdf17e2d71122d8da13361a | refs/heads/master | 2021-01-24T02:53:44.552344 | 2012-12-25T15:22:48 | 2012-12-25T15:22:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,605 | py | '''
Created on Dec 19, 2012
@author: Yutao
Implementation of Girvan-Newman Community Detection Algorithm For Weighted Graphs
'''
import networkx as nx
import pkg_resources
pkg_resources.require("matplotlib")
import matplotlib.pylab as plt
import math
import csv
import random as rand
from metadata import settings
#this method just reads the graph structure from the file
def buildG( G, file_, delimiter_ ):
#construct the weighted version of the contact graph from cgraph.dat file
#reader = csv.reader(open("/home/kazem/Data/UCI/karate.txt"), delimiter=" ")
reader = csv.reader(open(file_), delimiter=delimiter_)
for line in reader:
if float(line[2]) != 0.0:
G.add_edge(int(line[0]),int(line[1]),weight=float(line[2]))
#Keep removing edges from Graph until one of the connected components of Graph splits into two.
#compute the edge betweenness
def CmtyGirvanNewmanStep( G ):
#print "call CmtyGirvanNewmanStep"
init_ncomp = nx.number_connected_components(G) #no of components
ncomp = init_ncomp
while ncomp <= init_ncomp:
bw = nx.edge_betweenness_centrality(G) #edge betweenness for G
#find the edge with max centrality
max_ = 0.0
#find the edge with the highest centrality and remove all of them if there is more than one!
for k, v in bw.iteritems():
_BW = float(v)/float(G[k[0]][k[1]]['weight']) #weighted version of betweenness
if _BW >= max_:
max_ = _BW
for k, v in bw.iteritems():
if float(v)/float(G[k[0]][k[1]]['weight']) == max_:
G.remove_edge(k[0],k[1]) #remove the central edge
ncomp = nx.number_connected_components(G) #recalculate the no of components
#compute the modularity of current split
def _GirvanNewmanGetModularity( G, deg_):
New_A = nx.adj_matrix(G)
New_deg = {}
UpdateDeg(New_deg, New_A)
#Let's compute the Q
comps = nx.connected_components(G) #list of components
print 'no of comp: %d' % len(comps)
Mod = 0 #Modularity of a given partitionning
for c in comps:
EWC = 0 #no of edges within a community
RE = 0 #no of random edges
for u in c:
EWC += New_deg[u]
RE += deg_[u] #count the probability of a random edge
Mod += ( float(EWC) - float(RE*RE)/float(2*m_) )
Mod = Mod/float(2*m_)
#print "Modularity: %f" % Mod
return Mod
def UpdateDeg(deg_, A_):
for i in range(0,n):
deg = 0.0
for j in range(0,n):
deg += A_[i,j]
deg_[i] = deg
#let's create a graph and insert the edges
G = nx.Graph()
buildG(G, settings.GRAPH_PATH+'\\1999', ' ')
#G = nx.read_edgelist('E:\\My Projects\\Eclipse Workspace\\ringnet\\data\\graph\\1999')
n = G.number_of_nodes() #|V|
#adjacenct matrix
A = nx.adj_matrix(G)
m_ = 0.0 #the weighted version for number of edges
for i in range(0,n):
for j in range(0,n):
m_ += A[i,j]
m_ = m_/2.0
print "m: %f" % m_
#calculate the weighted degree for each node
Orig_deg = {}
UpdateDeg(Orig_deg, A)
#let's find the best split of the graph
BestQ = 0.0
Q = 0.0
while True:
CmtyGirvanNewmanStep(G)
Q = _GirvanNewmanGetModularity(G, Orig_deg);
print "current modularity: %f" % Q
if Q > BestQ:
BestQ = Q
Bestcomps = nx.connected_components(G) #Best Split
print "comps:"
print Bestcomps
if G.number_of_edges() == 0:
break
if BestQ > 0.0:
print "Best Q: %f" % BestQ
print Bestcomps
else:
print "Best Q: %f" % BestQ | [
"stack@live.cn"
] | stack@live.cn |
6753857b3ae30e5fc86eb478872920d47de427dc | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_nervier.py | 50e82697190213ebc90d28e9ab11efd7627c8f7f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py |
from xai.brain.wordbase.adjectives._nervy import _NERVY
#calss header
class _NERVIER(_NERVY, ):
def __init__(self,):
_NERVY.__init__(self)
self.name = "NERVIER"
self.specie = 'adjectives'
self.basic = "nervy"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
7165431f6198b23aa6e71561e48cfa8437fcba30 | 55173732ce1f2537a4fd8a6137b2a813f594b250 | /azure-mgmt-logic/azure/mgmt/logic/models/workflow.py | e29da68ee3c7ab95fd4d64e671e2c476bfbbee04 | [
"Apache-2.0"
] | permissive | dipple/azure-sdk-for-python | ea6e93b84bfa8f2c3e642aecdeab9329658bd27d | 9d746cb673c39bee8bd3010738c37f26ba6603a4 | refs/heads/master | 2020-02-26T15:32:39.178116 | 2016-03-01T19:25:05 | 2016-03-01T19:25:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,982 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class Workflow(Resource):
"""Workflow
:param str id: Gets or sets the resource id.
:param str name: Gets the resource name.
:param str type: Gets the resource type.
:param str location: Gets or sets the resource location.
:param dict tags: Gets or sets the resource tags.
:param str provisioning_state: Gets the provisioning state. Possible
values include: 'NotSpecified', 'Moving', 'Succeeded'
:param datetime created_time: Gets the created time.
:param datetime changed_time: Gets the changed time.
:param str state: Gets or sets the state. Possible values include:
'NotSpecified', 'Enabled', 'Disabled', 'Deleted', 'Suspended'
:param str version: Gets the version.
:param str access_endpoint: Gets the access endpoint.
:param Sku sku: Gets or sets the sku.
:param ContentLink definition_link: Gets or sets the link to definition.
:param object definition: Gets or sets the definition.
:param ContentLink parameters_link: Gets or sets the link to parameters.
:param dict parameters: Gets or sets the parameters.
"""
_required = []
_attribute_map = {
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'WorkflowProvisioningState', 'flatten': True},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601', 'flatten': True},
'changed_time': {'key': 'properties.changedTime', 'type': 'iso-8601', 'flatten': True},
'state': {'key': 'properties.state', 'type': 'WorkflowState', 'flatten': True},
'version': {'key': 'properties.version', 'type': 'str', 'flatten': True},
'access_endpoint': {'key': 'properties.accessEndpoint', 'type': 'str', 'flatten': True},
'sku': {'key': 'properties.sku', 'type': 'Sku', 'flatten': True},
'definition_link': {'key': 'properties.definitionLink', 'type': 'ContentLink', 'flatten': True},
'definition': {'key': 'properties.definition', 'type': 'object', 'flatten': True},
'parameters_link': {'key': 'properties.parametersLink', 'type': 'ContentLink', 'flatten': True},
'parameters': {'key': 'properties.parameters', 'type': '{WorkflowParameter}', 'flatten': True},
}
def __init__(self, id=None, name=None, type=None, location=None, tags=None, provisioning_state=None, created_time=None, changed_time=None, state=None, version=None, access_endpoint=None, sku=None, definition_link=None, definition=None, parameters_link=None, parameters=None):
super(Workflow, self).__init__(id=id, name=name, type=type, location=location, tags=tags)
self.provisioning_state = provisioning_state
self.created_time = created_time
self.changed_time = changed_time
self.state = state
self.version = version
self.access_endpoint = access_endpoint
self.sku = sku
self.definition_link = definition_link
self.definition = definition
self.parameters_link = parameters_link
self.parameters = parameters
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
ebfd9a9dfa0d0b83abbaeea867ebb361b67b0170 | 7cdd4ebfb492b6f50f8dba8c4adde9058d4d0105 | /thank_you_stars/_extractor.py | d96c781d19982fdc791cd2497c707bed696d2ea9 | [
"MIT"
] | permissive | jayvdb/thank-you-stars | 74b1321c258680d466aeba778cd719ec3a91ccf8 | 9dbeec873839a0472735dfdc38eeea8bcb0eb199 | refs/heads/master | 2021-05-21T07:44:44.142527 | 2020-04-02T13:43:41 | 2020-04-02T13:43:41 | 252,606,147 | 0 | 0 | MIT | 2020-04-03T01:40:35 | 2020-04-03T01:40:34 | null | UTF-8 | Python | false | false | 16,314 | py | import re
from collections import namedtuple
from difflib import SequenceMatcher
import msgfy
import retryrequests
import simplejson as json
from github.GithubException import RateLimitExceededException, UnknownObjectException
from mbstrdecoder import MultiByteStrDecoder
from pathvalidate import sanitize_filename
from tqdm import tqdm
from ._cache import CacheType, touch
from ._common import get_github_repo_id
from ._const import StarStatus
from ._logger import logger
from ._pip_show import PipShow
Contributor = namedtuple("Contributor", "login_name full_name")
class _GitHubRepoInfo(
namedtuple("_GitHubRepoInfo", "owner_name repo_name repo_id url match_endpos")
):
def equals_repo_name(self, name):
return self.repo_name.lower() == name.lower()
class GitHubStarredInfo(
namedtuple("GitHubStarredInfo", "pypi_pkg_name github_repo_id star_status is_owned url")
):
def asdict(self):
return self._asdict()
def validate(self):
if self.star_status not in (
StarStatus.STARRED,
StarStatus.NOT_STARRED,
StarStatus.NOT_FOUND,
StarStatus.NOT_AVAILABLE,
):
raise ValueError("invalid value: {}".format(self.star_status))
class GithubStarredInfoExtractor:
_MATCH_THRESHOLD = 0.6
@property
def repo_depth_map(self):
return self.__repo_depth_map
def __init__(self, github_client, max_depth, cache_mgr_map, starred_repo_id_list):
self.__github_client = github_client
self.__github_user = github_client.get_user()
self.__max_depth = max_depth
self.__starred_repo_id_list = starred_repo_id_list
self.__repo_depth_map = {}
self.__github_cache_mgr = cache_mgr_map[CacheType.GITHUB]
self.__pypi_cache_mgr = cache_mgr_map[CacheType.PYPI]
PipShow.cache_mgr = cache_mgr_map[CacheType.PIP]
if self.__max_depth < 0:
raise ValueError("max_depth must be greater or equal to zero")
self.__github_repo_url_regexp = re.compile(
"http[s]?://github.com/(?P<user_name>[a-zA-Z0-9][a-zA-Z0-9-]*?)/(?P<repo_name>[a-zA-Z0-9-_.]+)",
re.MULTILINE,
)
def list_pypi_packages(self, pypi_pkg_name_queue):
prev_depth = None
total = self.__max_depth + 1
i = 0
with tqdm(desc="Collect package info", total=total) as pbar:
while pypi_pkg_name_queue:
pypi_pkg_name, depth = pypi_pkg_name_queue.pop(0)
if prev_depth is None:
prev_depth = depth
elif prev_depth != depth:
i += 1
pbar.update(1)
prev_depth = depth
if pypi_pkg_name in self.__repo_depth_map:
logger.debug("skip: already checked: {}".format(pypi_pkg_name))
self.__repo_depth_map[pypi_pkg_name] = min(
depth, self.__repo_depth_map[pypi_pkg_name]
)
continue
self.__repo_depth_map[pypi_pkg_name] = depth
pip_show = PipShow.execute(pypi_pkg_name)
if depth < self.__max_depth:
for require_package in pip_show.extract_requires():
# recursively search repositories
pypi_pkg_name_queue.append((require_package.lower(), depth + 1))
while i < total:
i += 1
pbar.update(1)
def extract_starred_info(self, pypi_pkg_name):
cache_filepath = self.__pypi_cache_mgr.get_pkg_cache_filepath(pypi_pkg_name, "starred_info")
if self.__github_cache_mgr.is_cache_available(cache_filepath):
cache_data = self.__github_cache_mgr.load_json(cache_filepath)
if cache_data:
try:
info = GitHubStarredInfo(**cache_data)
info.validate()
return info
except (TypeError, ValueError) as e:
logger.debug("failed to load cache: {}".format(msgfy.to_debug_message(e)))
pip_show = PipShow.execute(pypi_pkg_name)
github_repo_info = self.__find_github_repo_info_from_text(pip_show.content)
if github_repo_info:
return self.__register_starred_status(pypi_pkg_name, github_repo_info, depth=0)
try:
starred_info = self.__traverse_github_repo(pip_show, pypi_pkg_name, depth=0)
if starred_info:
return starred_info
return GitHubStarredInfo(
pypi_pkg_name=pypi_pkg_name,
github_repo_id="[Repository not found]",
star_status=StarStatus.NOT_FOUND,
is_owned=None,
url=None,
)
except RateLimitExceededException as e:
logger.error(msgfy.to_error_message(e))
return GitHubStarredInfo(
pypi_pkg_name=pypi_pkg_name,
github_repo_id="Exceed API rate limit",
star_status=StarStatus.NOT_AVAILABLE,
is_owned=None,
url=None,
)
def __extract_github_repo_info(self, repo):
owner_name = repo.owner.login
repo_name = repo.name
repo_id = "{}/{}".format(owner_name, repo_name)
return _GitHubRepoInfo(
owner_name=owner_name,
repo_name=repo_name,
repo_id=repo_id,
url="https://github.com/{}".format(repo_id),
match_endpos=None,
)
@staticmethod
def __normalize_pkg_name(name):
return re.sub("python", "", name, flags=re.IGNORECASE).lower()
def __fetch_pypi_info(self, pypi_pkg_name):
cache_filepath = self.__pypi_cache_mgr.get_pkg_cache_filepath(pypi_pkg_name, "pypi_desc")
if self.__pypi_cache_mgr.is_cache_available(cache_filepath):
logger.debug("load PyPI info cache: {}".format(cache_filepath))
cache_data = self.__pypi_cache_mgr.load_json(cache_filepath)
if cache_data:
return cache_data
r = retryrequests.get("https://pypi.org/pypi/{}/json".format(pypi_pkg_name))
if r.status_code != 200:
return None
pypi_info = r.json().get("info")
with cache_filepath.open(mode="w") as f:
logger.debug("write PyPI info cache: {}".format(cache_filepath))
f.write(json.dumps(pypi_info))
return pypi_info
def __find_github_repo_info_from_text(self, text, pos=0):
match = self.__github_repo_url_regexp.search(text, pos)
if not match:
return None
owner_name = match.group("user_name")
repo_name = match.group("repo_name")
repo_id = "{}/{}".format(owner_name, repo_name)
negative_cache_filepath = self.__github_cache_mgr.get_misc_cache_filepath(
repo_id, "negative"
)
if self.__github_cache_mgr.is_cache_available(negative_cache_filepath):
return None
try:
repo_obj = self.__github_client.get_repo(repo_id) # noqa: W0612
except UnknownObjectException as e:
if e.status == 404:
logger.debug(
"create negative cache for a GitHub repo: {}".format(negative_cache_filepath)
)
touch(negative_cache_filepath)
return None
raise
return _GitHubRepoInfo(
owner_name=owner_name,
repo_name=repo_name,
repo_id=repo_id,
url="https://github.com/{}".format(repo_id),
match_endpos=match.endpos,
)
def __traverse_github_repo(self, pip_show, pypi_pkg_name, depth):
pypi_info = self.__fetch_pypi_info(pypi_pkg_name)
negative_cache_filepath = self.__pypi_cache_mgr.get_pkg_cache_filepath(
pypi_pkg_name, "negative"
)
if self.__github_cache_mgr.is_cache_available(negative_cache_filepath):
logger.debug(
" negative cache for a PyPI package found: {}".format(negative_cache_filepath)
)
return None
if pypi_info:
pos = 0
while True:
github_repo_info = self.__find_github_repo_info_from_text(
pypi_info.get("description"), pos
)
if not github_repo_info:
break
pos = github_repo_info.match_endpos
if github_repo_info.equals_repo_name(pypi_pkg_name):
return self.__register_starred_status(pypi_pkg_name, github_repo_info, depth)
logger.debug("search at github: {}".format(pypi_pkg_name))
results = self.__github_client.search_repositories(
query="{} language:python".format(pypi_pkg_name), sort="stars", order="desc"
)
author_name = pip_show.extract_author()
author_email = pypi_info.get("author_email")
for i, repo in enumerate(results.get_page(0)):
if self.__calc_match_ratio(pypi_pkg_name, repo.name) < self._MATCH_THRESHOLD:
continue
if i > 4:
break
github_repo_info = self.__extract_github_repo_info(repo)
if self.__search_github_repo(
github_repo_info.repo_id, author_name, "author_name"
) and self.__search_github_repo(
github_repo_info.repo_id, author_email, "author_email"
):
return self.__register_starred_status(pypi_pkg_name, github_repo_info, depth)
try:
if author_email.rsplit(".", 1)[0] == repo.organization.email.rsplit(".", 1)[0]:
return self.__register_starred_status(
pypi_pkg_name, github_repo_info, depth
)
except AttributeError:
pass
if self.__search_contributor_github(repo, pypi_pkg_name, author_name):
return self.__register_starred_status(pypi_pkg_name, github_repo_info, depth)
logger.debug(
"create negative cache for a PyPI package: {}".format(negative_cache_filepath)
)
touch(negative_cache_filepath)
return None
def __calc_match_ratio(self, a, b):
if not a or not b:
return 0
return SequenceMatcher(
a=self.__normalize_pkg_name(a), b=self.__normalize_pkg_name(b)
).ratio()
def __match_contributor(self, repo_id, pip_author_name, github_contributor_name):
for author_name in pip_author_name.split(", "):
match_ratio = self.__calc_match_ratio(author_name, github_contributor_name)
if match_ratio >= self._MATCH_THRESHOLD:
logger.debug(
"found contributor: repo={}, github_user={}, pip_author={}, match_ratio={}".format(
repo_id, github_contributor_name, author_name, match_ratio
)
)
return True
return False
def __search_github_repo(self, repo_id, search_value, category_name):
cache_filepath = self.__github_cache_mgr.get_misc_cache_filepath(
"/".join([repo_id, category_name]), sanitize_filename(search_value)
)
msg_template = "source {result} include {category}: repo={repo} path={path}"
if self.__github_cache_mgr.is_cache_available(cache_filepath):
with cache_filepath.open() as f:
try:
if int(f.read()):
logger.debug(
msg_template.format(
result="found",
category=category_name,
repo=repo_id,
path=cache_filepath,
)
)
return True
else:
logger.debug(
msg_template.format(
result="not found",
category=category_name,
repo=repo_id,
path=cache_filepath,
)
)
return False
except ValueError as e:
logger.warn(msgfy.to_error_message(e))
query = "{} in:file language:python repo:{}".format(search_value, repo_id)
logger.debug("search {}: {}".format(category_name, query))
results = self.__github_client.search_code(query)
search_regexp = re.compile(search_value, re.MULTILINE)
with cache_filepath.open(mode="w") as f:
for content_file in results.get_page(0):
decoded_content = MultiByteStrDecoder(content_file.decoded_content).unicode_str
if not search_regexp.search(decoded_content):
continue
logger.debug(
msg_template.format(
result="found", category=category_name, repo=repo_id, path=content_file.path
)
)
f.write("1")
return True
f.write("0")
return False
def __search_contributor_github(self, repo, pypi_pkg_name, author_name):
repo_id = get_github_repo_id(repo)
cache_filepath = self.__github_cache_mgr.get_misc_cache_filepath(repo_id, "contributors")
if self.__github_cache_mgr.is_cache_available(cache_filepath):
logger.debug("load contributors cache: {}".format(cache_filepath))
with cache_filepath.open() as f:
for line in f:
contributor = Contributor(**json.loads(line))
if self.__match_contributor(repo_id, author_name, contributor.full_name):
return True
if self.__match_contributor(repo_id, author_name, contributor.login_name):
return True
logger.debug(
"contributor not found in the contributors cache: pkg={}, author={}".format(
pypi_pkg_name, author_name
)
)
return False
logger.debug("find contributors: {}".format(repo_id))
with cache_filepath.open(mode="w") as f:
for contributor in repo.get_contributors():
contributor_map = {"login_name": contributor.login, "full_name": contributor.name}
f.write("{}\n".format(json.dumps(contributor_map)))
for contributor_name in (contributor.name, contributor.login):
if self.__match_contributor(repo_id, author_name, contributor_name):
logger.debug(
"found contributor: auth={}, contributor={}".format(
author_name, contributor_name
)
)
return True
logger.debug("author not found in the github repository: {}".format(repo_id))
return False
def __register_starred_status(self, pypi_pkg_name, repo_info, depth):
repo_id = repo_info.repo_id
logger.debug("found a GitHub repository: {}".format(repo_id))
starred_info = GitHubStarredInfo(
pypi_pkg_name=pypi_pkg_name,
github_repo_id=repo_id,
star_status=StarStatus.STARRED
if repo_id in self.__starred_repo_id_list
else StarStatus.NOT_STARRED,
is_owned=self.__github_user.login == repo_info.owner_name,
url=repo_info.url,
)
cache_filepath = self.__pypi_cache_mgr.get_pkg_cache_filepath(pypi_pkg_name, "starred_info")
logger.debug("write starred_info cache: {}".format(cache_filepath))
with cache_filepath.open(mode="w") as f:
json.dump(starred_info.asdict(), f, indent=4)
return starred_info
| [
"tsuyoshi.hombashi@gmail.com"
] | tsuyoshi.hombashi@gmail.com |
abf5cb941fac1874cfb1518b1df89f92c70bdc1b | ae7ba9c83692cfcb39e95483d84610715930fe9e | /jw2013/Leetcode-Py/Word Ladder II.py | f25c932467a340d39233741d56bf9fa9a057abd0 | [] | no_license | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | class Solution:
def backtrack(self, result, trace, path, word):
if len(trace[word]) == 0:
result.append([word] + path)
else:
for prev in trace[word]:
self.backtrack(result, trace, [word] + path, prev)
def findLadders(self, start, end, dict):
result, trace, current = [], {word: [] for word in dict}, set([start])
while current and end not in current:
for word in current:
dict.remove(word)
next = set([])
for word in current:
for i in range(len(word)):
for j in 'abcdefghijklmnopqrstuvwxyz':
candidate = word[:i] + j + word[i + 1:]
if candidate in dict:
trace[candidate].append(word)
next.add(candidate)
current = next
if current:
self.backtrack(result, trace, [], end)
return result | [
"xenron@outlook.com"
] | xenron@outlook.com |
11f8805a3402a80099be4d5d5858bfcfe7aa76e9 | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /updated_debug_branch_loop_mcs/interreplay_26_r.3/replay_config.py | 16dc3f2fb2d3bad8277505363a2b8eb9c6866f0c | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import Replayer
from sts.simulation_state import SimulationConfig
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pox.py --verbose sts.syncproto.pox_syncer --blocking=False openflow.discovery forwarding.l2_multi sts.util.socket_mux.pox_monkeypatcher openflow.of_01 --address=__address__ --port=__port__', address='127.0.0.1', port=6633, cwd='pox', sync='tcp:localhost:18899')],
topology_class=MeshTopology,
topology_params="num_switches=2",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=True)
control_flow = Replayer(simulation_config, "experiments/updated_debug_branch_loop_mcs/interreplay_26_r.3/events.trace",
wait_on_deterministic_values=False)
# Invariant check: 'None'
| [
"cs@cs.berkeley.edu"
] | cs@cs.berkeley.edu |
2c0f1cf4b9b357f1424d5f1ef20680b82fdd0856 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /know_problem_over_group/hand_or_young_way/able_case/fact.py | 5e6e5cdadb856652ee5e1124bc52d79def4c99ec | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py |
#! /usr/bin/env python
def time(str_arg):
get_case_with_case(str_arg)
print('hand')
def get_case_with_case(str_arg):
print(str_arg)
if __name__ == '__main__':
time('week')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
b0b16093569febbff40973724100b76c6c799077 | 79910ec27631b7e1f7f36eef0ab453ab66ebdc1b | /smartcontract/forms.py | 50cf4d81e6f670820991a3be7380302b09af2e5f | [] | no_license | SaurabhAgarwala/VeriBuy | bdbe2d93cff7e5335e09f3946dd1566c82ef599b | f971a21752dfd1c9ca9c143beb2ac30bdfa22b41 | refs/heads/main | 2023-01-13T00:57:10.869994 | 2020-11-04T16:47:58 | 2020-11-04T16:47:58 | 309,657,968 | 0 | 0 | null | 2020-11-03T14:01:19 | 2020-11-03T11:00:02 | Python | UTF-8 | Python | false | false | 290 | py | from django import forms
from . import models
class ProductForm(forms.ModelForm):
class Meta:
model = models.Product
fields = ['name', 'desc', 'retailer']
class EditOwnerForm(forms.ModelForm):
class Meta:
model = models.Product
fields = ['owner']
| [
"saur.agarwala@gmail.com"
] | saur.agarwala@gmail.com |
3cf80b733c974ec0f5aaaa1bc68f553ec0617fcd | 3b239e588f2ca6e49a28a63d906dd8dd26173f88 | /code/dlgo/gtp/play_local.py | cc9bdc1acbc90d702fa98051d9d530a2ce4c61b1 | [] | no_license | Angi16/deep_learning_and_the_game_of_go | 3bbf4f075f41359b87cb06fe01b4c7af85837c18 | ba63d5e3f60ec42fa1088921ecf93bdec641fd04 | refs/heads/master | 2020-03-23T16:02:47.431241 | 2018-07-21T02:57:16 | 2018-07-21T02:57:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,604 | py | from __future__ import print_function
# tag::play_local_imports[]
import subprocess
import re
import h5py
from dlgo.agent.predict import load_prediction_agent
from dlgo.agent.termination import PassWhenOpponentPasses, TerminationAgent
from dlgo.goboard_fast import GameState, Move
from dlgo.gotypes import Player
from dlgo.gtp.board import gtp_position_to_coords, coords_to_gtp_position
from dlgo.gtp.utils import SGFWriter
from dlgo.utils import print_board
from dlgo.scoring import compute_game_result
# end::play_local_imports[]
# TODO: py3 problems with strings
# tag::play_local_init[]
class LocalGtpBot():
def __init__(self, agent, termination=None, handicap=0,
opponent='gnugo', output_sgf="out.sgf",
our_color='b'):
self.bot = TerminationAgent(agent, termination) # <1>
self.handicap = handicap
self._stopped = False # <2>
self.game_state = GameState.new_game(19)
self.sgf = SGFWriter(output_sgf) # <3>
self.our_color = Player.black if 'b' else Player.white
self.their_color = self.our_color.other
cmd = self.opponent_cmd(opponent) # <4>
pipe = subprocess.PIPE
self.gtp_stream = subprocess.Popen(
cmd, stdin=pipe, stdout=pipe # <5>
)
def opponent_cmd(self, opponent):
if opponent == 'gnugo':
return ["gnugo", "--mode", "gtp"]
elif opponent == 'pachi':
return ["pachi"]
else:
raise ValueError("Unknown bot name {}".format(opponent))
# <1> We initialize a bot from an agent and a termination strategy.
# <2> We play until the game is stopped by one of the players.
# <3> At the end we write the the game to the provided file in SGF format
# <4> Our opponent will either be GNU Go or Pachi.
# <5> We read and write GTP commands from the command line.
# end::play_local_init[]
# tag::play_local_commands[]
def send_command(self, cmd):
self.gtp_stream.stdin.write(cmd.encode('utf-8'))
def get_response(self):
succeeded = False
result = ''
while succeeded == False:
line = self.gtp_stream.stdout.readline()
if line[0] == '=':
succeeded = True
line = line.strip()
result = re.sub('^= ?', '', line)
return result
def command_and_response(self, cmd):
self.send_command(cmd)
return self.get_response()
# end::play_local_commands[]
# tag::play_local_run[]
def run(self):
self.command_and_response("boardsize 19\n")
self.set_handicap()
self.play()
self.sgf.write_sgf()
def set_handicap(self):
if(self.handicap == 0):
self.command_and_response("komi 7.5\n")
self.sgf.append("KM[7.5]\n")
else:
stones = self.command_and_response("fixed_handicap {}\n".format(self.handicap))
sgf_handicap = "HA[{}]AB".format(self.handicap)
for pos in stones.split(" "):
move = gtp_position_to_coords(pos)
self.game_state = self.game_state.apply_move(move)
sgf_handicap = sgf_handicap + "[" + self.sgf.coordinates(move) + "]"
self.sgf.append(sgf_handicap + "\n")
# end::play_local_run[]
# tag::play_local_play[]
def play(self):
while not self._stopped:
if(self.game_state.next_player == self.our_color):
self.play_our_move()
else:
self.play_their_move()
print(chr(27) + "[2J")
print_board(self.game_state.board)
print("Estimated result: ")
print(compute_game_result(self.game_state))
# end::play_local_play[]
# tag::play_local_our[]
def play_our_move(self):
move = self.bot.select_move(self.game_state)
self.game_state = self.game_state.apply_move(move)
our_name = self.our_color.name
our_letter = our_name[0].upper()
sgf_move = ""
if move.is_pass:
self.command_and_response("play {} pass\n".format(our_name))
elif move.is_resign:
self.command_and_response("play {} resign\n".format(our_name))
else:
pos = coords_to_gtp_position(move)
self.command_and_response("play {} {}\n".format(our_name, pos))
sgf_move = self.sgf.coordinates(move)
self.sgf.append(";{}[{}]\n".format(our_letter, sgf_move))
# end::play_local_our[]
# tag::play_local_their[]
def play_their_move(self):
their_name = self.their_color.name
their_letter = their_name[0].upper()
pos = self.command_and_response("genmove {}\n".format(their_name))
if(pos.lower() == 'resign'):
self.game_state = self.game_state.apply_move(Move.resign())
self._stopped = True
elif(pos.lower() == 'pass'):
self.game_state = self.game_state.apply_move(Move.pass_turn())
self.sgf.append(";{}[]\n".format(their_letter))
if self.game_state.last_move.is_pass:
self._stopped = True
else:
move = gtp_position_to_coords(pos)
self.game_state = self.game_state.apply_move(move)
self.sgf.append(";{}[{}]\n".format(their_letter, self.sgf.coordinates(move)))
# end::play_local_their[]
if __name__ == "__main__":
agent = load_prediction_agent(h5py.File("../../agents/betago.hdf5", "r"))
gnu_go = LocalGtpBot(agent=agent, termination=PassWhenOpponentPasses(),
handicap=0, opponent='pachi', )
gnu_go.run()
| [
"max.pumperla@googlemail.com"
] | max.pumperla@googlemail.com |
16a959f9551c63b9e032874c0a95782b2913e825 | 4577d8169613b1620d70e3c2f50b6f36e6c46993 | /students/1762390/homework02/program01.py | 484371a7177473da3ea507bbb64ce466f2882439 | [] | no_license | Fondamenti18/fondamenti-di-programmazione | cbaf31810a17b5bd2afaa430c4bf85d05b597bf0 | 031ec9761acb1a425fcc4a18b07884b45154516b | refs/heads/master | 2020-03-24T03:25:58.222060 | 2018-08-01T17:52:06 | 2018-08-01T17:52:06 | 142,419,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,633 | py | '''
I post di un forum sono raccolti in alcuni file che hanno il seguente formato.
Un file contiene uno o piu' post, l'inizio di un post e' marcato da una linea che contiene
in sequenza le due sottostringhe "<POST>" ed "N" (senza virgolette) eventualmente
inframmezzate, precedute e/o seguite da 0,1 o piu' spazi.
"N" e' l'ID del post (un numero positivo).
Il contenuto del post e' nelle linee successive fino alla linea che marca il prossimo post
o la fine del file (si veda ad esempio il file "file01.txt").
E' assicurato che la stringa "<POST>" non e' contenuta in nessun post.
Nel seguito per parola intendiamo come al solito una qualsiasi sequenza di caratteri
alfabetici di lunghezza massimale. I caratteri alfabetici sono quelli per cui
ritorna True il metodo isalpha().
Scrivere una funzione post(fposts,insieme) che prende in input:
- il percorso di un file (fposts)
- ed un insieme di parole (insieme)
e che restituisce un insieme (risultato).
L'insieme restituito (risultato) dovra' contenere gli identificativi (ID) dei post
che contengono almeno una parola dell'inseme in input.
Due parole sono considerate uguali anche se alcuni caratteri alfabetici compaiono in una
in maiuscolo e nell'altra in minuscolo.
Per gli esempi vedere il file grade.txt
AVVERTENZE:
non usare caratteri non ASCII, come le lettere accentate;
non usare moduli che non sono nella libreria standard.
NOTA: l'encoding del file e' 'utf-8'
ATTENZIONE: Se un test del grader non termina entro 10 secondi il punteggio di quel test e' zero.
'''
def post(fposts,insieme):
messaggio = ""
listaFinale = []
with open(fposts, 'r', encoding = 'utf8') as file:
listaPost = []
for f in file:
if '<POST>' in f:
stringa = f.replace('\n', "").replace(" ", "").replace('<POST>', "")
listaPost.append(stringa)
else:
messaggio += f.replace("\n", "").replace("!", "").replace("£", "").replace("$","").replace("%","").replace("/","").replace("(","").replace(")","").replace("^","").replace(",","").replace("{","").replace("}","").replace(";","").replace(".","").replace(":","").replace("_","").replace("-","").replace("?","")
messaggio = messaggio.lower()
msg = messaggio.split(" ")
for m in msg:
for i in insieme:
if m == i.lower() :
listaFinale.append(str(listaPost[len(listaPost) - 1]))
messaggio = ""
return set(listaFinale)
| [
"a.sterbini@gmail.com"
] | a.sterbini@gmail.com |
a2123dd872e54d5bfb69f05e3c75c3e39798d898 | e41651d8f9b5d260b800136672c70cb85c3b80ff | /Notification_System/temboo/Library/Google/Contacts/DeleteContact.py | 78bb4c79c9ac5d784d8bf6d9ad842b30e04edc0a | [] | no_license | shriswissfed/GPS-tracking-system | 43e667fe3d00aa8e65e86d50a4f776fcb06e8c5c | 1c5e90a483386bd2e5c5f48f7c5b306cd5f17965 | refs/heads/master | 2020-05-23T03:06:46.484473 | 2018-10-03T08:50:00 | 2018-10-03T08:50:00 | 55,578,217 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,655 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# DeleteContact
# Deletes a specified contact.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteContact(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteContact Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DeleteContact, self).__init__(temboo_session, '/Library/Google/Contacts/DeleteContact')
def new_input_set(self):
return DeleteContactInputSet()
def _make_result_set(self, result, path):
return DeleteContactResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteContactChoreographyExecution(session, exec_id, path)
class DeleteContactInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteContact
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) The access token retrieved in the last step of the OAuth process. Access tokens that are expired will be refreshed and returned in the Choreo output.)
"""
super(DeleteContactInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((required, string) The OAuth client ID provided by Google when you register your application.)
"""
super(DeleteContactInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((required, string) The OAuth client secret provided by Google when you registered your application.)
"""
super(DeleteContactInputSet, self)._set_input('ClientSecret', value)
def set_ContactID(self, value):
"""
Set the value of the ContactID input for this Choreo. ((required, string) The unique ID string for the contact you want to delete.)
"""
super(DeleteContactInputSet, self)._set_input('ContactID', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((required, string) The refresh token retrieved in the last step of the OAuth process. This is used when an access token is expired or not provided.)
"""
super(DeleteContactInputSet, self)._set_input('RefreshToken', value)
class DeleteContactResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteContact Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_AccessToken(self):
"""
Retrieve the value for the "AccessToken" output from this Choreo execution. ((optional, string) The access token retrieved in the last step of the OAuth process. Access tokens that are expired will be refreshed and returned in the Choreo output.)
"""
return self._output.get('AccessToken', None)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Google. No content is returned for a successful delete request.)
"""
return self._output.get('Response', None)
class DeleteContactChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteContactResultSet(response, path)
| [
"shriswissfed@gmail.com"
] | shriswissfed@gmail.com |
825c346e273d17a72f1d35f21788c2d5ab12a219 | a8cdbe8b8fc01d5b1ec46fadbcd6914c2ddee55f | /test.py | a4beae7ea36c5e8bde211b068eae002ddc8bc276 | [] | no_license | kiwonNam/bl-df-crawl-amz | 8b8b8e88a343183d473d24a42a52011a22ce822b | 9e70c0e1ce80a9e8f33568621222d3cf2a42ad33 | refs/heads/master | 2021-04-15T15:36:13.438663 | 2018-03-27T02:31:02 | 2018-03-27T02:31:02 | 126,917,413 | 0 | 1 | null | 2018-03-27T02:32:06 | 2018-03-27T02:32:06 | null | UTF-8 | Python | false | false | 95 | py | from stylelens_crawl_amazon.stylelens_crawl import StylensCrawler
crawler = StylensCrawler()
| [
"master@bluehack.net"
] | master@bluehack.net |
21232647b1233b1d7d6dffa03ced25db2595b0a7 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_1_1_neat/16_1_1_swmuron_a.py | 8645bb1c638b89d9b6e0812c33738dfd700156c8 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 768 | py |
import sys
def solveIt(s):
maxletter = ''
word = ''
index = 0
slen = len(s)
while (index < slen):
print 'm: ' + maxletter + ' s[index]:' + s[index] + ' word: '+word
if (s[index] >= maxletter):
maxletter = s[index]
word = maxletter + word
else:
word = word + s[index]
index += 1
print 'word is '+word
return word
# actually process the solution file
with open(sys.argv[1]) as file:
with open("ans_"+sys.argv[1],'w') as outFile:
lines = file.read().splitlines()
count = -1
for line in lines:
# skip the test count line
count += 1
if (count == 0):
continue
result = solveIt(line)
outFile.write('Case #'+str(count)+': '+str(result))
outFile.write('\n') | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
9dc93f7d0d72e35617b1dbf9bce7acc7bee7d8d1 | c33690a1cf47cd18e755f30260291d51912c690f | /App_Base/migrations/0011_reunion_url.py | 35a13408cfa6f5046d11c39f2cee2758916a8862 | [] | no_license | otonelunico/LegalAssistant | 6cb5075b096684546a6ad862aa5c486c1efc59ad | 5c2c4308c10e0a353fa58e9d8bd4d699e74f3f38 | refs/heads/master | 2021-05-14T12:25:24.181305 | 2018-01-19T06:14:36 | 2018-01-19T06:14:36 | 116,408,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # Generated by Django 2.0.1 on 2018-01-18 03:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App_Base', '0010_auto_20180118_0007'),
]
operations = [
migrations.AddField(
model_name='reunion',
name='url',
field=models.CharField(default='2', max_length=200),
preserve_default=False,
),
]
| [
"ocubillosj@gmail.com"
] | ocubillosj@gmail.com |
12e6bd1a6377a46682d6e5a77fc44c3c2f105f1c | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nineveh.py | aac83ac0a32d4a323fd3bad7f4af0cfb0cd40214 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 138 | py | ii = [('GodwWSL2.py', 1), ('CarlTFR.py', 1), ('WilkJMC.py', 1), ('DequTKM.py', 1), ('JacoWHI.py', 1), ('ClarGE3.py', 1), ('TaylIF.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
69b79dfd44ec5c39409aab69e8881d86af9b6259 | cb062c48280311134fe22573a41f9c4d6631b795 | /tests/misc/helper.py | 81168afcfd985e0ac0e4ce347719d64a6c5ce40e | [
"MIT"
] | permissive | xm-blockchain/xm-core | da1e6bb4ceb8ab642e5d507796e2cc630ed23e0f | 2282b435a02f061424d656155756d8f50238bcfd | refs/heads/main | 2023-01-15T19:08:31.399219 | 2020-11-19T03:54:19 | 2020-11-19T03:54:19 | 314,127,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,848 | py | # coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import contextlib
import os
import shutil
import tempfile
import time
from copy import deepcopy
import simplejson as json
from mock import mock
from pyxmlib.pyxmlib import XmssFast
from pyxmlib.pyxmlib import bin2hstr, hstr2bin
from pyqryptonight.pyqryptonight import StringToUInt256
from xm.core import config
from xm.core.Block import Block
from xm.core.BlockMetadata import BlockMetadata
from xm.core.ChainManager import ChainManager
from xm.core.OptimizedAddressState import OptimizedAddressState
from xm.core.GenesisBlock import GenesisBlock
from xm.core.txs.Transaction import Transaction
from xm.core.txs.SlaveTransaction import SlaveTransaction
from xm.core.txs.TokenTransaction import TokenTransaction
from xm.crypto.xmss import XMSS
from xm.generated import xm_pb2
def replacement_getTime():
return int(time.time())
@contextlib.contextmanager
def set_default_balance_size(new_value=100 * int(config.dev.shor_per_quanta)):
old_value = config.dev.default_account_balance
try:
config.dev.default_account_balance = new_value
yield
finally:
config.dev.default_account_balance = old_value
@contextlib.contextmanager
def set_hard_fork_block_number(hard_fork_index=0, new_value=1):
old_value = config.dev.hard_fork_heights[hard_fork_index]
try:
config.dev.hard_fork_heights[hard_fork_index] = new_value
yield
finally:
config.dev.hard_fork_heights[hard_fork_index] = old_value
@contextlib.contextmanager
def set_wallet_dir(wallet_name):
dst_dir = tempfile.mkdtemp()
prev_val = config.user.wallet_dir
try:
test_path = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.join(test_path, "..", "data", wallet_name)
shutil.rmtree(dst_dir)
shutil.copytree(src_dir, dst_dir)
config.user.wallet_dir = dst_dir
yield dst_dir
finally:
shutil.rmtree(dst_dir)
config.user.wallet_dir = prev_val
@contextlib.contextmanager
def set_xm_dir(data_name):
dst_dir = tempfile.mkdtemp()
prev_val = config.user.xm_dir
try:
test_path = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(test_path, "..", "data")
src_dir = os.path.join(data_dir, data_name)
shutil.rmtree(dst_dir)
shutil.copytree(src_dir, dst_dir)
shutil.copy(os.path.join(data_dir, 'core', 'genesis.yml'), dst_dir)
shutil.copy(os.path.join(data_dir, 'core', 'config.yml'), dst_dir)
config.user.xm_dir = dst_dir
yield dst_dir
finally:
shutil.rmtree(dst_dir)
config.user.xm_dir = prev_val
def get_genesis_with_only_coin_base_txn(coin_base_reward_addr, dev_config):
g = GenesisBlock()
coin_base_tx = Transaction.from_pbdata(g.transactions[0])
coin_base_tx.update_mining_address(coin_base_reward_addr)
# Remove all other transaction except CoinBase txn
del g.transactions[:]
g.pbdata.transactions.extend([coin_base_tx.pbdata])
g.blockheader.generate_headerhash(dev_config)
return g
def read_data_file(filename):
test_path = os.path.dirname(os.path.abspath(__file__))
src_file = os.path.join(test_path, "..", "data", filename)
with open(src_file, 'r') as f:
return f.read()
@contextlib.contextmanager
def mocked_genesis():
custom_genesis_block = deepcopy(GenesisBlock())
with mock.patch('xm.core.GenesisBlock.GenesisBlock.instance'):
GenesisBlock.instance = custom_genesis_block
yield custom_genesis_block
@contextlib.contextmanager
def clean_genesis():
data_name = "no_data"
dst_dir = tempfile.mkdtemp()
prev_val = config.user.xm_dir
try:
GenesisBlock.instance = None
test_path = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.join(test_path, "..", "data", data_name)
shutil.rmtree(dst_dir)
shutil.copytree(src_dir, dst_dir)
config.user.xm_dir = dst_dir
_ = GenesisBlock() # noqa
config.user.xm_dir = prev_val
config.user = config.UserConfig(True)
yield
finally:
shutil.rmtree(dst_dir)
GenesisBlock.instance = None
config.user.xm_dir = prev_val
def get_some_address(idx=0) -> bytes:
seed = bytearray([i for i in range(48)])
seed[0] = idx
xmss = XMSS(XmssFast(seed, 4))
return xmss.address
def get_alice_xmss(xmss_height=6) -> XMSS:
seed = bytes([i for i in range(48)])
return XMSS(XmssFast(seed, xmss_height))
def get_bob_xmss(xmss_height=6) -> XMSS:
seed = bytes([i + 5 for i in range(48)])
return XMSS(XmssFast(seed, xmss_height))
def get_slave_xmss() -> XMSS:
xmss_height = 6
seed = bytes([i + 10 for i in range(48)])
return XMSS(XmssFast(seed, xmss_height))
def get_random_xmss(xmss_height=6) -> XMSS:
return XMSS.from_height(xmss_height)
def get_token_transaction(xmss1, xmss2, amount1=400000000, amount2=200000000, fee=1) -> TokenTransaction:
initial_balances = list()
initial_balances.append(xm_pb2.AddressAmount(address=xmss1.address,
amount=amount1))
initial_balances.append(xm_pb2.AddressAmount(address=xmss2.address,
amount=amount2))
return TokenTransaction.create(symbol=b'xm',
name=b'Quantum Resistant Ledger',
owner=xmss1.address,
decimals=4,
initial_balances=initial_balances,
fee=fee,
xmss_pk=xmss1.pk)
def destroy_state():
try:
db_path = os.path.join(config.user.data_dir, config.dev.db_name)
shutil.rmtree(db_path)
except FileNotFoundError:
pass
def get_slaves(alice_ots_index, txn_nonce):
# [master_address: bytes, slave_seeds: list, slave_txn: json]
slave_xmss = get_slave_xmss()
alice_xmss = get_alice_xmss()
alice_xmss.set_ots_index(alice_ots_index)
slave_txn = SlaveTransaction.create([slave_xmss.pk],
[1],
0,
alice_xmss.pk)
slave_txn._data.nonce = txn_nonce
slave_txn.sign(alice_xmss)
slave_data = json.loads(json.dumps([bin2hstr(alice_xmss.address), [slave_xmss.extended_seed], slave_txn.to_json()]))
slave_data[0] = bytes(hstr2bin(slave_data[0]))
return slave_data
def get_random_master():
random_master = get_random_xmss(config.dev.xmss_tree_height)
slave_data = json.loads(json.dumps([bin2hstr(random_master.address), [random_master.extended_seed], None]))
slave_data[0] = bytes(hstr2bin(slave_data[0]))
return slave_data
def gen_blocks(block_count, state, miner_address):
blocks = []
block = None
with mock.patch('xm.core.misc.ntp.getTime') as time_mock:
time_mock.return_value = 1615270948
addresses_state = dict()
for i in range(0, block_count):
if i == 0:
block = GenesisBlock()
for genesis_balance in GenesisBlock().genesis_balance:
bytes_addr = genesis_balance.address
addresses_state[bytes_addr] = OptimizedAddressState.get_default(bytes_addr)
addresses_state[bytes_addr]._data.balance = genesis_balance.balance
else:
block = Block.create(dev_config=config.dev,
block_number=i,
prev_headerhash=block.headerhash,
prev_timestamp=block.timestamp,
transactions=[],
miner_address=miner_address,
seed_height=None,
seed_hash=None)
addresses_set = ChainManager.set_affected_address(block)
coin_base_tx = Transaction.from_pbdata(block.transactions[0])
coin_base_tx.set_affected_address(addresses_set)
chain_manager = ChainManager(state)
state_container = chain_manager.new_state_container(addresses_set,
block.block_number,
False,
None)
coin_base_tx.apply(state, state_container)
for tx_idx in range(1, len(block.transactions)):
tx = Transaction.from_pbdata(block.transactions[tx_idx])
if not chain_manager.update_state_container(tx, state_container):
return False
tx.apply(state, state_container)
block.set_nonces(dev_config=config.dev, mining_nonce=10, extra_nonce=0)
blocks.append(block)
metadata = BlockMetadata()
metadata.set_block_difficulty(StringToUInt256('256'))
BlockMetadata.put_block_metadata(state, block.headerhash, metadata, None)
Block.put_block(state, block, None)
bm = xm_pb2.BlockNumberMapping(headerhash=block.headerhash,
prev_headerhash=block.prev_headerhash)
Block.put_block_number_mapping(state, block.block_number, bm, None)
state.update_mainchain_height(block.block_number, None)
OptimizedAddressState.put_optimized_addresses_state(state, addresses_state)
return blocks
| [
"74695206+xm-blockchain@users.noreply.github.com"
] | 74695206+xm-blockchain@users.noreply.github.com |
012177334ec8f809c2bbf8045d8214bce7b3681b | aa73e301f658b45a9674df4b619b288945dd0669 | /branches/sal_refactor_generic_classes/dot/pygraph/readwrite/dot.py | 4b78b3c840094be85ee1db739bc835d7801abd8b | [
"MIT"
] | permissive | svn2github/python-graph2 | e1c37f77cc0a27ac9099208876c63693bffbc929 | f19039d7f3fc1f04977c3f1d1d6128e8545ebef1 | refs/heads/master | 2020-04-30T04:11:38.475209 | 2013-04-19T01:06:14 | 2013-04-19T01:06:14 | 9,714,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,956 | py | # Copyright (c) 2007-2009 Pedro Matiello <pmatiello@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Functions for reading and writing graphs in Dot language.
@sort: read, read_hypergraph, write, write_hypergraph
"""
# Imports
from pygraph.classes.digraph import digraph
from pygraph.classes.exceptions import InvalidGraphType
from pygraph.classes.graph import graph
from pygraph.classes.hypergraph import hypergraph
import pydot
import pygraph
# Values
colors = ['aquamarine4', 'blue4', 'brown4', 'cornflowerblue', 'cyan4',
'darkgreen', 'darkorange3', 'darkorchid4', 'darkseagreen4', 'darkslategray',
'deeppink4', 'deepskyblue4', 'firebrick3', 'hotpink3', 'indianred3',
'indigo', 'lightblue4', 'lightseagreen', 'lightskyblue4', 'magenta4',
'maroon', 'palevioletred3', 'steelblue', 'violetred3']
def read(string):
"""
Read a graph from a string in Dot language and return it. Nodes and edges specified in the
input will be added to the current graph.
@type string: string
@param string: Input string in Dot format specifying a graph.
@rtype: graph
@return: Graph
"""
dotG = pydot.graph_from_dot_data(string)
if (dotG.get_type() == "graph"):
G = graph()
elif (dotG.get_type() == "digraph"):
G = digraph()
else:
raise InvalidGraphType
# Read nodes...
# Note: If the nodes aren't explicitly listed, they need to be
for each_node in dotG.get_nodes():
G.add_node(each_node.get_name())
for each_attr_key, each_attr_val in each_node.get_attributes().items():
G.add_node_attribute(each_node.get_name(), (each_attr_key, each_attr_val))
# Read edges...
for each_edge in dotG.get_edges():
# Check if the nodes have been added
if not dotG.get_node(each_edge.get_source()):
G.add_node(each_edge.get_source())
if not dotG.get_node(each_edge.get_destination()):
G.add_node(each_edge.get_destination())
# See if there's a weight
if 'weight' in each_edge.get_attributes().keys():
_wt = each_edge.get_attributes()['weight']
else:
_wt = 1
# See if there is a label
if 'label' in each_edge.get_attributes().keys():
_label = each_edge.get_attributes()['label']
else:
_label = ''
G.add_edge(each_edge.get_source(), each_edge.get_destination(), wt = _wt, label = _label)
for each_attr_key, each_attr_val in each_edge.get_attributes().items():
if not each_attr_key in ['weight', 'label']:
G.add_edge_attribute(each_edge.get_source(), each_edge.get_destination(), \
(each_attr_key, each_attr_val))
return G
def write(G, weighted=False):
"""
Return a string specifying the given graph in Dot language.
@type G: graph
@param G: Graph.
@type weighted: boolean
@param weighted: Whether edges should be labelled with their weight.
@rtype: string
@return: String specifying the graph in Dot Language.
"""
dotG = pydot.Dot()
if not 'name' in dir(G):
dotG.set_name('graphname')
else:
dotG.set_name(graph.name)
if (type(G) == graph):
dotG.set_type('graph')
directed = False
elif (type(G) == digraph):
dotG.set_type('digraph')
directed = True
else:
raise InvalidGraphType("Expected graph or digraph, got %s" % repr(G) )
for node in G.nodes():
attr_list = {}
for attr in G.node_attributes(node):
attr_list[str(attr[0])] = str(attr[1])
newNode = pydot.Node(str(node), **attr_list)
dotG.add_node(newNode)
# Pydot doesn't work properly with the get_edge, so we use
# our own set to keep track of what's been added or not.
seen_edges = set([])
for edge_from, edge_to in G.edges():
if (str(edge_from) + "-" + str(edge_to)) in seen_edges:
continue
if (not directed) and (str(edge_to) + "-" + str(edge_from)) in seen_edges:
continue
attr_list = {}
for attr in G.edge_attributes(edge_from, edge_to):
attr_list[str(attr[0])] = str(attr[1])
if str(G.edge_label(edge_from, edge_to)):
attr_list['label'] = str(G.edge_label(edge_from, edge_to))
elif weighted:
attr_list['label'] = str(G.edge_weight(edge_from, edge_to))
if weighted:
attr_list['weight'] = str(G.edge_weight(edge_from, edge_to))
newEdge = pydot.Edge(str(edge_from), str(edge_to), **attr_list)
dotG.add_edge(newEdge)
seen_edges.add(str(edge_from) + "-" + str(edge_to))
return dotG.to_string()
def read_hypergraph(string):
"""
Read a hypergraph from a string in dot format. Nodes and edges specified in the input will be added to the current hypergraph.
@type string: string
@param string: Input string in dot format specifying a graph.
@rtype: hypergraph
@return: Hypergraph
"""
hgr = hypergraph()
dotG = pydot.graph_from_dot_data(string)
# Read the hypernode nodes...
# Note 1: We need to assume that all of the nodes are listed since we need to know if they
# are a hyperedge or a normal node
# Note 2: We should read in all of the nodes before putting in the links
for each_node in dotG.get_nodes():
if 'node' == each_node.get('hyper_node_type'):
hgr.add_node(each_node.get_name())
elif 'hyperedge' == each_node.get('hyper_node_type'):
hgr.add_hyperedge(each_node.get_name())
else:
print ("Error: improper hyper_node_type - %s" % str(each_node.get('hyper_node_type')))
# Now read in the links to connect the hyperedges
for each_link in dotG.get_edges():
if hgr.has_node(each_link.get_source()):
link_hypernode = each_link.get_source()
link_hyperedge = each_link.get_destination()
elif hgr.has_node(each_link.get_destination()):
link_hypernode = each_link.get_destination()
link_hyperedge = each_link.get_source()
hgr.link(link_hypernode, link_hyperedge)
return hgr
def write_hypergraph(hgr, colored = False):
"""
Return a string specifying the given hypergraph in DOT Language.
@type hgr: hypergraph
@param hgr: Hypergraph.
@type colored: boolean
@param colored: Whether hyperedges should be colored.
@rtype: string
@return: String specifying the hypergraph in DOT Language.
"""
dotG = pydot.Dot()
if not 'name' in dir(hgr):
dotG.set_name('hypergraph')
else:
dotG.set_name(hgr.name)
colortable = {}
colorcount = 0
# Add all of the nodes first
for node in hgr.nodes():
newNode = pydot.Node(str(node), hyper_node_type = 'node')
dotG.add_node(newNode)
for hyperedge in hgr.hyperedges():
if (colored):
colortable[hyperedge] = colors[colorcount % len(colors)]
colorcount += 1
newNode = pydot.Node(str(hyperedge), hyper_node_type = 'hyperedge', \
color = str(colortable[hyperedge]), \
shape = 'point')
else:
newNode = pydot.Node(str(hyperedge), hyper_node_type = 'hyperedge')
dotG.add_node(newNode)
for link in hgr.links(hyperedge):
newEdge = pydot.Edge(str(hyperedge), str(link))
dotG.add_edge(newEdge)
return dotG.to_string() | [
"salimfadhley@70df0079-b534-0410-988b-a5721c0f2d16"
] | salimfadhley@70df0079-b534-0410-988b-a5721c0f2d16 |
bb1f4318b509f59fd872544e381aac00d5246fa6 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R4/benchmark/startQiskit_QC726.py | 332c0f75c85ee32a7ea60c476b48ef53c4b5a3db | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,699 | py | # qubit number=3
# total number=15
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.cx(input_qubit[0],input_qubit[2]) # number=12
prog.x(input_qubit[2]) # number=13
prog.cx(input_qubit[0],input_qubit[2]) # number=14
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.h(input_qubit[1]) # number=11
prog.swap(input_qubit[1],input_qubit[0]) # number=8
prog.y(input_qubit[0]) # number=9
prog.y(input_qubit[0]) # number=10
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit_QC726.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
c9010c7ba0e51b3a1b1197ee877afebf8cfe4af1 | 9ccf9e6ad40081e40d604976cf7d28fd99dcb128 | /alr/training/ephemeral_trainer.py | bae429ef9cfe1efe3aa35ba342f2063cc0192f20 | [
"MIT"
] | permissive | darth-donut/alr | 30e6c16e54aca182fa24f0510d7208e3b9629620 | ee561c545bd98ec17c4f9c3040ef23b0222ef71a | refs/heads/master | 2023-03-31T07:29:47.629779 | 2021-01-03T23:36:05 | 2021-01-03T23:36:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,965 | py | from collections import defaultdict
from alr import ALRModel
from alr.data import RelabelDataset, PseudoLabelDataset, UnlabelledDataset
from alr.training.utils import EarlyStopper, PLPredictionSaver
from alr.utils._type_aliases import _DeviceType, _Loss_fn
from typing import Optional, Callable, Union
import numpy as np
import torch
from torch import nn
import torch.utils.data as torchdata
from ignite.engine import create_supervised_evaluator, Events, Engine
from ignite.metrics import Accuracy, Loss
from alr.training.progress_bar.ignite_progress_bar import ProgressBar
from alr.training import Trainer
from alr.training.samplers import MinLabelledSampler, RandomFixedLengthSampler
import copy
class PseudoLabelManager:
def __init__(
self,
pool: UnlabelledDataset,
model: nn.Module,
threshold: float,
init_pseudo_labelled: Optional[torchdata.Dataset] = None,
log_dir: Optional[str] = None,
device: _DeviceType = None,
**kwargs,
):
bs = kwargs.pop("batch_size", 1024)
shuffle = kwargs.pop("shuffle", False)
assert not shuffle
self._pool = pool
self._loader = torchdata.DataLoader(
pool, batch_size=bs, shuffle=shuffle, **kwargs
)
self._model = model
self._log_dir = log_dir
self._device = device
self._threshold = threshold
self.acquired_sizes = []
# keep a copy of the latest pseudo-labelled dataset
self._init_pld = init_pseudo_labelled
self.history = [init_pseudo_labelled]
def attach(self, engine: Engine):
engine.add_event_handler(Events.STARTED, self._initialise)
# could also be EPOCH_COMPLETED since there's only one iteration in each epoch
engine.add_event_handler(Events.ITERATION_COMPLETED, self._load_labels)
def _load_labels(self, engine: Engine):
evaluator = create_supervised_evaluator(
self._model, metrics=None, device=self._device
)
plc = PseudoLabelCollector(
self._threshold,
log_dir=self._log_dir,
)
plc.attach(evaluator, batch_size=self._loader.batch_size)
plc.global_step_from_engine(engine)
evaluator.run(self._loader)
indices, pseudo_labels = (
evaluator.state.pl_indices.cpu().numpy(),
evaluator.state.pl_plabs.cpu().numpy(),
)
self.acquired_sizes.append(indices.shape[0])
if indices.shape[0]:
confident_points = torchdata.Subset(copy.copy(self._pool), indices)
if self._pool.debug:
# pool returns target labels too
pld = RelabelDataset(confident_points, pseudo_labels)
else:
pld = PseudoLabelDataset(confident_points, pseudo_labels)
else:
pld = None
engine.state.pseudo_labelled_dataset = pld
self.history.append(pld)
def _initialise(self, engine: Engine):
engine.state.pseudo_labelled_dataset = self._init_pld
class PseudoLabelCollector:
def __init__(
self,
threshold: float,
log_dir: Optional[str] = None,
pred_transform: Callable[[torch.Tensor], torch.Tensor] = lambda x: x.exp(),
):
self._indices = []
self._plabs = []
self._pred_transform = pred_transform
self._output_transform = lambda x: x
self._thresh = threshold
self._targets = []
self._preds = []
if log_dir:
self._saver = PLPredictionSaver(log_dir, pred_transform=pred_transform)
else:
self._saver = None
self._batch_size = None
def _parse(self, engine: Engine):
preds, targets = self._output_transform(engine.state.output)
# state.iteration starts with 1
iteration = engine.state.iteration - 1
offset = iteration * self._batch_size
with torch.no_grad():
preds = self._pred_transform(preds)
preds_max, plabs = torch.max(preds, dim=-1)
mask = torch.nonzero(preds_max >= self._thresh).flatten()
if mask.shape[0]:
# plabs = [N,]
self._plabs.append(plabs[mask])
self._indices.append(mask + offset)
def _flush(self, engine: Engine):
if self._indices and self._plabs:
engine.state.pl_indices = torch.cat(self._indices)
engine.state.pl_plabs = torch.cat(self._plabs)
else:
engine.state.pl_indices = torch.Tensor([])
engine.state.pl_plabs = torch.Tensor([])
self._indices = []
self._plabs = []
def attach(self, engine: Engine, batch_size: int, output_transform=lambda x: x):
r"""
Args:
engine (Engine): ignite engine object
batch_size (int): engine's batch size
output_transform (Callable): if engine.state.output is not (preds, target),
then output_transform should return aforementioned tuple.
Returns:
NoneType: None
"""
engine.add_event_handler(Events.ITERATION_COMPLETED, self._parse)
engine.add_event_handler(Events.COMPLETED, self._flush)
self._output_transform = output_transform
self._batch_size = batch_size
if self._saver:
self._saver.attach(engine, output_transform=output_transform)
def global_step_from_engine(self, engine: Engine):
if self._saver:
self._saver.global_step_from_engine(engine)
def _update_dataloader_batch_sampler(
loader: torchdata.DataLoader,
pseudo_labelled_dataset: torchdata.Dataset,
min_labelled: Union[float, int],
):
labelled_dataset = loader.dataset
# attributes that usually go in dataloader's constructor
attrs = [k for k in loader.__dict__.keys() if not k.startswith("_")]
drop = [
"dataset",
"sampler",
"batch_sampler",
"dataset_kind",
"batch_size",
"shuffle",
"drop_last",
]
kwargs = {k: getattr(loader, k) for k in attrs if k not in drop}
kwargs["dataset"] = torchdata.ConcatDataset(
(labelled_dataset, pseudo_labelled_dataset)
)
kwargs["batch_sampler"] = MinLabelledSampler(
labelled_dataset,
pseudo_labelled_dataset,
loader.batch_size,
min_labelled=min_labelled,
)
return torchdata.DataLoader(**kwargs)
def _update_dataloader(
loader: torchdata.DataLoader,
dataset: torchdata.Dataset,
sampler: Optional[torchdata.Sampler] = None,
):
# attributes that usually go in dataloader's constructor
attrs = [k for k in loader.__dict__.keys() if not k.startswith("_")]
drop = ["dataset", "sampler", "batch_sampler", "dataset_kind"]
kwargs = {k: getattr(loader, k) for k in attrs if k not in drop}
if not isinstance(
loader.sampler,
(
torchdata.SequentialSampler,
torchdata.RandomSampler,
RandomFixedLengthSampler,
),
):
raise ValueError(
f"Only sequential, random, and random fixed length samplers "
f"are supported in _update_dataloader"
)
kwargs["dataset"] = dataset
# Sequential and Random will be automatically determined if sampler is None (depending on shuffle)
kwargs["sampler"] = sampler
return torchdata.DataLoader(**kwargs)
def create_pseudo_label_trainer(
model: ALRModel,
loss: _Loss_fn,
optimiser: str,
train_loader: torchdata.DataLoader,
val_loader: torchdata.DataLoader,
pseudo_label_manager: PseudoLabelManager,
rfls_len: Optional[int] = None,
min_labelled: Optional[Union[float, int]] = None,
patience: Optional[int] = None,
reload_best: Optional[bool] = None,
epochs: Optional[int] = 1,
lr_scheduler: Optional[str] = None,
lr_scheduler_kwargs: Optional[dict] = None,
device: _DeviceType = None,
*args,
**kwargs,
):
assert (
not min_labelled or not rfls_len
), "rfls_len and min_labelled are mutually exclusive"
def _step(engine: Engine, _):
model.reset_weights()
# update loader accordingly: if pld is not none, concatenate them
new_loader = train_loader
pld = engine.state.pseudo_labelled_dataset
if pld is not None:
train_ds = torchdata.ConcatDataset((train_loader.dataset, pld))
# update dataloader's dataset attribute
if rfls_len:
new_loader = _update_dataloader(
train_loader,
train_ds,
RandomFixedLengthSampler(train_ds, length=rfls_len, shuffle=True),
)
elif min_labelled:
new_loader = _update_dataloader_batch_sampler(
train_loader, pld, min_labelled
)
else:
new_loader = _update_dataloader(train_loader, train_ds)
# begin supervised training
trainer = Trainer(
model,
loss,
optimiser,
patience,
reload_best,
lr_scheduler=lr_scheduler,
lr_scheduler_kwargs=lr_scheduler_kwargs,
device=device,
*args,
**kwargs,
)
history = trainer.fit(
new_loader,
val_loader=val_loader,
epochs=epochs,
)
# if early stopping was applied w/ patience, then the actual train acc and loss should be
# -patience from the final loss/acc UNLESS we reached the maximum number of epochs.
if patience and len(history["train_loss"]) != epochs:
return history["train_loss"][-patience], history["train_acc"][-patience]
return history["train_loss"][-1], history["train_acc"][-1]
e = Engine(_step)
pseudo_label_manager.attach(e)
return e
class EphemeralTrainer:
def __init__(
self,
model: ALRModel,
pool: UnlabelledDataset,
loss: _Loss_fn,
optimiser: str,
threshold: float,
min_labelled: Optional[Union[float, int]] = None,
random_fixed_length_sampler_length: Optional[int] = None,
log_dir: Optional[str] = None,
patience: Optional[Union[int, tuple]] = None,
reload_best: Optional[bool] = False,
lr_scheduler: Optional[str] = None,
lr_scheduler_kwargs: Optional[dict] = None,
init_pseudo_label_dataset: Optional[torchdata.Dataset] = None,
device: _DeviceType = None,
pool_loader_kwargs: Optional[dict] = None,
*args,
**kwargs,
):
assert (
not min_labelled or not random_fixed_length_sampler_length
), "random_fixed_length_sampler_length and min_labelled are mutually exclusive"
self._pool = pool
self._model = model
self._loss = loss
self._optimiser = optimiser
self._patience = patience
self._reload_best = reload_best
self._device = device
self._args = args
self._kwargs = kwargs
self._threshold = threshold
self._log_dir = log_dir
self._pool_loader_kwargs = (
pool_loader_kwargs if pool_loader_kwargs is not None else {}
)
self._min_labelled = min_labelled
self._rfls_len = random_fixed_length_sampler_length
self._lr_scheduler = lr_scheduler
self._lr_scheduler_kwargs = (
lr_scheduler_kwargs if lr_scheduler_kwargs is not None else {}
)
self._init_pseudo_label_dataset = init_pseudo_label_dataset
self.last_pseudo_label_dataset = None
def fit(
self,
train_loader: torchdata.DataLoader,
val_loader: Optional[torchdata.DataLoader] = None,
iterations: Optional[int] = 1,
epochs: Optional[int] = 1,
):
if self._patience:
if isinstance(self._patience, int):
pat1 = pat2 = self._patience
else:
pat1, pat2 = self._patience
else:
pat1 = pat2 = None
if self._patience and val_loader is None:
raise ValueError(
"If patience is specified, then val_loader must be provided in .fit()."
)
val_evaluator = create_supervised_evaluator(
self._model,
metrics={"acc": Accuracy(), "loss": Loss(self._loss)},
device=self._device,
)
history = defaultdict(list)
pbar = ProgressBar(desc=lambda _: "Ephemeral")
def _log_metrics(engine: Engine):
# train_loss and train_acc are moving averages of the last epoch
# in the supervised training loop
train_loss, train_acc = engine.state.output
history[f"train_loss"].append(train_loss)
history[f"train_acc"].append(train_acc)
pbar.log_message(
f"Eph. iteration {engine.state.epoch}/{engine.state.max_epochs}\n"
f"\ttrain acc = {train_acc}, train loss = {train_loss}"
)
if val_loader is None:
return # job done
# val loader - save to history and print metrics. Also, add handlers to
# evaluator (e.g. early stopping, model checkpointing that depend on val_acc)
metrics = val_evaluator.run(val_loader).metrics
history[f"val_acc"].append(metrics["acc"])
history[f"val_loss"].append(metrics["loss"])
pbar.log_message(
f"\tval acc = {metrics['acc']}, val loss = {metrics['loss']}"
)
pseudo_label_manager = PseudoLabelManager(
pool=self._pool,
model=self._model,
threshold=self._threshold,
init_pseudo_labelled=self._init_pseudo_label_dataset,
log_dir=self._log_dir,
device=self._device,
**self._pool_loader_kwargs,
)
trainer = create_pseudo_label_trainer(
model=self._model,
loss=self._loss,
optimiser=self._optimiser,
train_loader=train_loader,
val_loader=val_loader,
pseudo_label_manager=pseudo_label_manager,
rfls_len=self._rfls_len,
min_labelled=self._min_labelled,
patience=pat1,
reload_best=self._reload_best,
epochs=epochs,
lr_scheduler=self._lr_scheduler,
lr_scheduler_kwargs=self._lr_scheduler_kwargs,
device=self._device,
*self._args,
**self._kwargs,
)
# output of trainer are running averages of train_loss and train_acc (from the
# last epoch of the supervised trainer)
pbar.attach(trainer)
if val_loader is not None and self._patience:
es = EarlyStopper(self._model, pat2, trainer, key="acc", mode="max")
es.attach(val_evaluator)
trainer.add_event_handler(Events.EPOCH_COMPLETED, _log_metrics)
trainer.run(
range(iterations),
max_epochs=iterations,
epoch_length=1,
)
if val_loader is not None and self._patience and self._reload_best:
es.reload_best()
history["train_size"] = np.array(pseudo_label_manager.acquired_sizes) + len(
train_loader.dataset
)
if trainer.state.epoch == epochs:
self.last_pseudo_label_dataset = pseudo_label_manager.history[-2]
else:
self.last_pseudo_label_dataset = pseudo_label_manager.history[-(pat2 + 2)]
return history
def evaluate(self, data_loader: torchdata.DataLoader) -> dict:
evaluator = create_supervised_evaluator(
self._model,
metrics={"acc": Accuracy(), "loss": Loss(self._loss)},
device=self._device,
)
return evaluator.run(data_loader).metrics
| [
"jiahfong@gmail.com"
] | jiahfong@gmail.com |
98583b6671a894809709798e797a7a4c7c2b95e3 | e700cbfcfa43aa42449cbcd2c337727fe398f253 | /twit/api/security.py | 0db3ac998687106af9443c90eae6643494595d72 | [
"MIT"
] | permissive | pchudzik/tweet | 28b12787667dae25dda64ab97218ed35703057c5 | 1938dae6be1359d73a8140b994c3db39d2b336da | refs/heads/master | 2020-04-23T11:57:40.508876 | 2019-03-15T20:03:55 | 2019-03-15T20:03:55 | 171,153,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | from flask_jwt_extended import jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt
from flask import jsonify, request, Flask
from twit import users, tokens
def login_user():
payload = request.get_json()
login_state = users.login(payload.get("login"), payload.get("password"))
if login_state:
return jsonify(login_state._asdict())
else:
return jsonify({"message": "Invalid credentials"}), 401
@jwt_refresh_token_required
def refresh_token():
user = get_jwt_identity()
return jsonify(tokens.refresh_token(user)._asdict())
@jwt_required
def logout():
tokens.revoke(get_raw_jwt()['jti'])
return '', 204
def init_security(app: Flask):
app.add_url_rule("/login", None, view_func=login_user, methods=["POST"])
app.add_url_rule("/login/refresh", view_func=refresh_token, methods=["POST"])
app.add_url_rule("/logout", view_func=logout, methods=["POST"])
| [
"pawel.chudzik@gmail.com"
] | pawel.chudzik@gmail.com |
310ff641b989d7940cc1695fbdb8b6061811b6d1 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /fNQEi9Y2adsERgn98_5.py | 326ac1c77e47cba36deeba658a0cc5c63b081e7c | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,002 | py | """
Write a function that takes the coordinates of three points in the form of a
2d array and returns the perimeter of the triangle. The given points are the
vertices of a triangle on a two-dimensional plane.
### Examples
perimeter( [ [15, 7], [5, 22], [11, 1] ] ) ➞ 47.08
perimeter( [ [0, 0], [0, 1], [1, 0] ] ) ➞ 3.42
perimeter( [ [-10, -10], [10, 10 ], [-10, 10] ] ) ➞ 68.28
### Notes
* The given points always create a triangle.
* The numbers in the argument array can be positive or negative.
* Output should have 2 decimal places
* This challenge is easier than it looks.
"""
class Triangle:
class Line:
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.p1x = p1.x
self.p1y = p1.y
self.p2x = p2.x
self.p2y = p2.y
if self.p2y - self.p1y != 0:
try:
self.m = (self.p2y - self.p1y) / (self.p2x - self.p1x)
self.b = self.p1y - (self.m * self.p1x)
self.equation = 'y = {m}*x + {b}'.format(m = self.m, b = self.b)
except ZeroDivisionError:
self.m = None
self.b = None
self.equation = 'x = {}'.format(self.p1x)
else:
self.m = None
self.b = None
self.equation = 'y = {}'.format(self.p1y)
self.length = ((self.p2x - self.p1x) ** 2 + (self.p2y - self.p1y) ** 2) ** .5
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __init__(self, points):
self.p1 = Triangle.Point(points[0][0], points[0][1])
self.p2 = Triangle.Point(points[1][0], points[1][1])
self.p3 = Triangle.Point(points[2][0], points[2][1])
self.l1 = Triangle.Line(self.p1, self.p2)
self.l2 = Triangle.Line(self.p2, self.p3)
self.l3 = Triangle.Line(self.p3, self.p1)
self.perimeter = round(self.l1.length + self.l2.length + self.l3.length, 2)
def perimeter(lst):
triangle = Triangle(lst)
return triangle.perimeter
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
02a82b5faedbd3a91ab09f0fa5a843fc3ac9a56f | 4e67c2edd71493a98a3f13e5b2073c1d05b1b656 | /Semestre 02/ProjetoIntegrador2/Aula 11.05.2020/heranca.py | e3253170a13af5d997f7c8028fca9b4ae7cf97aa | [] | no_license | felipellima83/UniCEUB | 05991d7a02b13cd4e236f3be3a34726af2dc1504 | dbc44866545b5247d1b5f76ec6e9b7778e54093e | refs/heads/master | 2023-07-08T19:04:19.830473 | 2021-08-12T12:33:49 | 2021-08-12T12:33:49 | 249,958,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,948 | py | ''' UniCEUB - Ciência da Computação - Prof. Barbosa
Atalho de teclado: ctlr <d>, duplica linha. ctrl <y>, apaga linha. ctrl </>, comenta linha
1- Crie a classe Funcionario com os atributos nome, cpf, salario
- Crie o construtor da classe Funcionario def __init___ (self, ...). Teste
3- Crie uma instância (objeto f1) da classe com os dados necessários (f1 = Funcionario ( ... ) )
- Crie alguns método get e set e teste.
5- Sobrescreva o método __str__. Ele recebe o objeto e retorna todos os dados do funcionário. Teste.
6- Antes do método main, crie a classe Gerente com os atributos nome, cpf, salario, senha, qtd_gerencia
7- Crie uma instância (objeto g1) da classe Gerente com os dados necessários
8- Mostre todos os dados (atributos) do objeto g1
9- Crie o método autentica dentro da classe Gerente. Ele recebe o objeto, o usuário digita a senha,
imprime: "Acesso permitido." ou "Acesso negado." e retorna um valor booleano (True ou False).
10- Use o método autentica para o gerente instanciado (objeto g1).
11- Use o método autentica para o funcionario instanciado (objeto f1). Por quê deu erro?
12- Use o método __ str__ para o gerente (objeto g1) instanciado. Por quê mostrou endereço hexadecimal?
13- Crie outra instância (objeto g2) da classe Gerente com os dados necessários.
14- Use todos os métodos da classe Gerente para o gerente g2. '''
class Funcionario(object):
def __init__(self, nome, cpf, salario=0.0): # Construtor
self.nome = nome
self.cpf = cpf
self.salario = salario
def get_nome(self): # Consulta
return self.nome
def set_nome(self, novo_nome): # Altera na memória
self.nome = novo_nome
def get_cpf(self):
return self.cpf
def get_salario(self):
return self.salario
def __str__(self): # Método mágico ou método dunder
# s = 'Nome: ' + self.nome+ ', CPF: ' + self.cpf+ ', salário: ' + str(self.salario) # Linhas equivalentes.
# s = "Nome: {}, CPF: {}, salario: {:.2f}" .format(self.nome, self.cpf, self.salario)
s = f"Nome: {self.nome}, CPF: {self.cpf}, salario: {self.salario:.2f}"
return s
class Gerente(object):
def __init__(self, nome, cpf, salario, senha, qtd_gerencia=0):
self.nome = nome
self.cpf = cpf
self.salario = salario
self.senha = senha
self.qtd_gerencia = qtd_gerencia
def get_nome(self):
return self.nome
def set_nome(self, novo_nome):
self.nome = novo_nome
def get_cpf(self):
return self.cpf
def get_salario(self):
return self.salario
def get_qtd_gerencia(self):
return self.qtd_gerencia
def autentica(self): # Solução 1
senha = input("Insira a senha: ")
if self.senha == senha:
print("Acesso permitido.")
return True
else:
print("Acesso negado.")
return False
# def autentica(self): # Solução 2
# senha = input("Insira a senha: ")
# while self.senha != senha:
# print("\033[31mAcesso negado!\033[m")
# senha = input("Insira a senha: ")
# return False
# else:
# print("\033[32mAcesso permitido!\033[m")
# return True
if __name__ == '__main__':
f1 = Funcionario('Paulo', '123', 1000.0) # Criando o objeto f1 e chamando o construtor
print(f1.get_nome())
print(f1.get_cpf())
print(f1.get_salario())
r = f1
print(r)
print(f1) # print(f1.__str__())
g1 = Gerente('Paula', '234', 3000.0, 's1', 5)
print(g1.get_nome())
print(g1.__str__()) # print(g1)
r = g1.autentica()
if r == True:
pass
print(r)
g2 = Gerente('Paulo', '34', 5000.0, 'g2', 3)
print('G2: ', g2.get_nome()) | [
"felipellima83@gmail.com"
] | felipellima83@gmail.com |
bf597a4acd8431cb675a1fa2e2141e59cced6163 | 805f2236caaec6c75629a7ce7a4b00c2c5b5e0f1 | /object_detection/anchors.py | b0d1d43863ac93eaee1e60dd2ca64758967d2ffa | [] | no_license | pai-plznw4me/object_detection | 7cccc46a32aded7828ce75edffbece35b6370177 | 43cf167e2c73c75682db888a11bce3321bb2d73f | refs/heads/master | 2020-11-24T20:27:01.116215 | 2019-12-28T07:18:19 | 2019-12-28T07:18:19 | 228,329,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,691 | py | import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
def generate_anchor(input_tensor,
backbone_output,
anchor_default_sizes=(32., 64., 128.),
anchor_ratio=(0.5, 1, 2)):
"""
Description:
Anchors 을 생성합니다
Args:
:param input_tensor: Keras Layer , 4D Tensor
:param backbone_output: Keras Layer , 4D Tensor
:param anchor_default_sizes
:param anchor_ratio
:return: anchor_grid: Tensor, 3D Tensor
"""
# input shape
input_h = K.shape(input_tensor)[1]
input_w = K.shape(input_tensor)[2]
# backbone shape
backbone_h = K.shape(backbone_output)[1]
backbone_w = K.shape(backbone_output)[2]
# to calculate the distance btw feature map pixels
stride_h = 2. ** tf.ceil(tf.log(input_h / backbone_h)/tf.log(2.))
stride_w = 2. ** tf.ceil(tf.log(input_w / backbone_w)/tf.log(2.))
# generate anchor sizes
n_anchor_sizes = len(anchor_default_sizes) * len(anchor_ratio)
anchor_sizes = []
for size in anchor_default_sizes:
for r in anchor_ratio:
anchor_sizes.append([size*np.sqrt(r), size/np.sqrt(r)])
anchor_sizes = np.asarray(anchor_sizes)
# generate anchor grid
# 4 => cx, cy, w, h
fmap_grid = tf.ones(shape=[backbone_h, backbone_w], dtype=tf.float64)
# generate coordinate center_x, center_y
range_h = tf.range(backbone_h)
range_w = tf.range(backbone_w)
cx, cy = tf.meshgrid(range_w, range_h)
cx = tf.cast(cx, tf.float64)
cy = tf.cast(cy, tf.float64)
# shift cx ,cy
# pixel_gap//2 은 stride 때문에 저렇게 된다.
# pixel 간 거리는 stride 만큼 떨어져 있다.
cx = cx * stride_w + stride_w // 2
cy = cy * stride_h + stride_h // 2
# cx 는 anchor 갯수만큼 있어서 저렇게 만든다
grid_cx = tf.stack([cx] * n_anchor_sizes, axis=-1)
grid_cy = tf.stack([cy] * n_anchor_sizes, axis=-1)
# mapping ws, hs to anchor grid
anchor_ws = anchor_sizes[:, 0]
anchor_hs = anchor_sizes[:, 1]
grid_ws = tf.expand_dims(fmap_grid, axis=-1) * anchor_ws
grid_hs = tf.expand_dims(fmap_grid, axis=-1) * anchor_hs
"""
Description:
grid_cx shape = (7,7,9),
grid_cx[0, 0, :] => [x1,x2,x3 .. ]
grid_cy = shape = (7,7,9) [[x1, x2, x3, ...]
grid_cy[0, 0, :] => [y1,y2,y3 .. ] [y1, y2, y3, ...]
==> [w1, w2, w3, ...]
grid_ws = shape = (7,7,9) [h1, h2, h3, ...]]
grid_ws[0, 0, :] => [w1,w2,w3 .. ]
grid_hs = shape = (7,7,9)
grid_hs[0, 0, :] => [h1,h2,h3 .. ]
"""
anchor_grid = tf.stack([grid_cx, grid_cy, grid_ws, grid_hs], axis=-1)
"""
Description:
[[x1, x2, x3, ...]
[y1, y2, y3, ...]
[w1, w2, w3, ...] => [x1,y1,w1,h1, x2,y2,w2,h2 ...]
[h1, h2, h3, ...]]
"""
anchor_grid = tf.reshape(anchor_grid, [backbone_h, backbone_w, -1])
return anchor_grid
def generate_trainable_anchors(normalize_anchors, matching_mask):
"""
Args:
normalize_anchors: 3D array, shape = [N_anchor, N_gt, 4]
matching_mask: Ndarray, 2D array,
anchor 로 사용할 것은 *1*로
anchor 로 사용하지 않을 것은 *-1* 로 표기
example: [[ 1 ,-1], <-anchor1
[-1 ,-1], <-anchor2
[-1 ,-1], <-anchor3
[ 1 , 1], <-anchor4
[-1 , 1]] <-anchor5
gt1 gt2
위 예제에서 사용할 anchor 는 (gt1, anchor1), (gt2, anchor4), (gt2, anchor5)
Description:
학습시킬수 있는 anchors을 생성합니다.
입력된 normalize_anchors 는 Shape 을 [N_anchor, N_gt, 4] 가집니다.
위 normalize_anchors 에서 학습해야 할 anchor 을 추출합니다.
최종 return 될 anchor 는 [N_acnhor , 4] 의 shape 을 가집니다.
해당 vector 에서 postive_mask 에 표시된(1로 표기된) 좌표의
anchor 만 가져옵니다.
해당 anchor 을 가져와 shape 가 [N_anchor , 4] 인 anchor 에 넣습니다.
# Caution! #
만약 가져올 anchor 가 없으면 (예제 anchor3) -1 -1 -1 -1로 채운다
만약 가져올 anchor 가 많다면 가장 오른쪽에 있는 (gt2, anchor4) anchor 을 선택한다.
"""
# Tensorflow
# TODO 여기서 mathcing_mask == 1 을 하면 Error 가 발생된다. 그 이유는?
indices_2d = tf.where(tf.equal(matching_mask, 1))
indices_2d = tf.stack(indices_2d, axis=0)
indices = indices_2d[:, 0]
indices = tf.expand_dims(indices, axis=-1)
# calculate delta
# [0] 을 붙이는 이유는 tf.gather_nd 을 사용하고 나면 출력 tensor의 shape 가 (1, N, 4) 로 나온다
# 1 은 필요없어 제거하기 위해 [0]을 붙인다
dx = tf.gather_nd(normalize_anchors[:, :, 0], [indices_2d])[0]
dy = tf.gather_nd(normalize_anchors[:, :, 1], [indices_2d])[0]
dw = tf.gather_nd(normalize_anchors[:, :, 2], [indices_2d])[0]
dh = tf.gather_nd(normalize_anchors[:, :, 3], [indices_2d])[0]
d_xywh = tf.stack([dx, dy, dw, dh], axis=-1)
n_anchors = tf.shape(normalize_anchors)[0]
ret_anchor = tf.ones([n_anchors, 4], dtype=tf.float32) * -1
ret_anchor = tf.tensor_scatter_nd_update(ret_anchor, indices, d_xywh)
return ret_anchor
def generate_trainble_classes(mask, gt_classes, n_classes):
"""
Description:
Args:
mask: Tensor, 2D array
example:
[[1 , -1, 1],
[1 , -1, 1],
[1 , -1, 1],
[1 , -1, 1],
...
[1 , -1, 1]]
gt_classes: Tensor, 1D vector
example:
[2, 2, 3]
Return:
matching_mask : Tensor, 2D array
example:
[[ 2, 2, -3],
[-2 , -2, -3],
[-2 , -2, -3],
...
[2 , 2, 3]]
"""
class_mask = mask * gt_classes
n_length = tf.shape(class_mask)[0]
background = tf.zeros(n_length, dtype=tf.int32)
background = tf.one_hot(background, n_classes)
positive_index = tf.where(class_mask > 0)
positive_value = tf.gather_nd(class_mask, positive_index)
positive_onehot = tf.one_hot(positive_value, n_classes)
indices = positive_index[:, 0]
indices = tf.expand_dims(indices, axis=-1)
pred_classes = tf.tensor_scatter_nd_update(background, indices, positive_onehot)
return pred_classes
| [
"plznw4me@naver.com"
] | plznw4me@naver.com |
0ead00a9de13ee038b09006097ebe531c1fb1e13 | 4369c5a214f8c4fb1f8a286f72d57cfa9c3f02c7 | /geotrek/flatpages/migrations/0006_auto_20200406_1413.py | 2c0159b11ab5d6e1e3e01fbedfeda9c5f6637a40 | [
"BSD-2-Clause"
] | permissive | GeotrekCE/Geotrek-admin | c13d251066e92359c26f22d185b8bd2e26e622ef | a91b75261a876be51ad2a693618629900bea6003 | refs/heads/master | 2023-08-21T12:45:25.586551 | 2023-08-09T12:28:33 | 2023-08-09T12:28:33 | 9,886,107 | 71 | 56 | BSD-2-Clause | 2023-09-13T09:40:33 | 2013-05-06T12:17:21 | Python | UTF-8 | Python | false | false | 550 | py | # Generated by Django 2.0.13 on 2020-04-06 14:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('flatpages', '0005_auto_20200228_2150'),
]
operations = [
migrations.RunSQL('ALTER SEQUENCE p_t_page_id_seq RENAME TO flatpages_flatpage_id_seq;'),
migrations.RunSQL('ALTER SEQUENCE t_r_page_portal_id_seq RENAME TO flatpages_flatpage_portal_id_seq;'),
migrations.RunSQL('ALTER SEQUENCE t_r_page_source_id_seq RENAME TO flatpages_flatpage_source_id_seq;'),
]
| [
"gael.utard@makina-corpus.com"
] | gael.utard@makina-corpus.com |
ca543090d0178402418aaec36a7a435942abb28f | 3b84ca7d132e6ca5004029d39bfa7c8fead07fe1 | /arnold/5.3.1.0/package.py | 69832010ff5d3970be4247eb14dfda606c5bd6b4 | [] | no_license | est77/rez-packages | 05a5a05224e02c0a28bc37a81cbd07ca7447d604 | 449ade7acf92196efda2e8ec883c52ba4e33262d | refs/heads/master | 2020-05-27T10:35:02.323417 | 2020-02-23T19:03:05 | 2020-02-23T19:03:05 | 82,542,112 | 22 | 7 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | # -*- coding: utf-8 -*-
name = "arnold"
version = "5.3.1.0"
description = "Arnold"
def commands():
env.PATH.append("{root}/bin")
env.LD_LIBRARY_PATH.append("{root}/bin")
env.PYTHONPATH.append("{root}/python")
| [
"ramenhdr@gmail.com"
] | ramenhdr@gmail.com |
20fd63457fbe8324e6d75d4f58117473bc620f2b | 4f972877da14226125440b3da9bdb058764d8a54 | /mlflowDemo/sklearn_logistic_regression.py | 09b1659d8a2feb4deb3ff623ec6a439ff6c83977 | [] | no_license | ZhiYinZhang/study | 16c29990cb371e7e278c437aa0abc7c348614063 | 8c085310b4f65e36f2d84d0acda4ca257b7389af | refs/heads/master | 2021-07-09T16:05:02.925343 | 2020-06-30T07:53:05 | 2020-06-30T07:53:05 | 153,767,096 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# datetime:2019/11/11 11:56
import numpy as np
from sklearn.linear_model import LogisticRegression
import mlflow
from mlflow import sklearn
if __name__ == "__main__":
mlflow.set_tracking_uri("http://localhost:5001")
# mlflow.create_experiment("sklearn logistic regression")
mlflow.set_experiment("sklearn logistic regression")
with mlflow.start_run() as active_run:
print(mlflow.active_run().info)
X = np.array([-2, -1, 0, 1, 2, 1]).reshape(-1, 1)
y = np.array([0, 0, 1, 1, 1, 0])
lr = LogisticRegression()
lr.fit(X, y)
score = lr.score(X, y)
print("Score: %s" % score)
mlflow.log_metric("score", score)
# sklearn.log_model(lr, "model")
mlflow.sklearn.log_model(lr,"model2")
# print("Model saved in run %s" % mlflow.active_run().info.run_uuid) | [
"2454099127@qq.com"
] | 2454099127@qq.com |
9dc567114028c18e7f20da8e620668d1ca00936d | ba35ce41c1cf8a1cd75441df1b7173c6606b8c7f | /si_prefix/tests/test_si_format.py | 502e967aed31fa6665d8f1b226678bb31eade23f | [
"BSD-3-Clause"
] | permissive | Lucaszw/si-prefix | 352396f184ed041d3054b10cddcd894deee3f3cf | e1f73d6abb3735cc6aad70eb216cb92a7736892a | refs/heads/master | 2021-06-20T11:06:08.101055 | 2017-07-30T04:17:32 | 2017-07-30T04:17:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | # coding: utf-8
from nose.tools import eq_
from si_prefix import si_format
TEST_CASES = [(1e-27, '1.00e-27'),
(1.764e-24, '1.76 y'),
(7.4088e-23, '74.09 y'),
(3.1117e-21, '3.11 z'),
(1.30691e-19, '130.69 z'),
(5.48903e-18, '5.49 a'),
(2.30539e-16, '230.54 a'),
(9.68265e-15, '9.68 f'),
(4.06671e-13, '406.67 f'),
(1.70802e-11, '17.08 p'),
(7.17368e-10, '717.37 p'),
(3.01295e-08, '30.13 n'),
(1.26544e-06, '1.27 u'),
(5.31484e-05, '53.15 u'),
(0.00223223, '2.23 m'),
(0.0937537, '93.75 m'),
(3.93766, '3.94 '), # Space added to help alignment
(165.382, '165.38 '), # Space added to help alignment
(6946.03, '6.95 k'),
(291733, '291.73 k'),
(1.22528e+07, '12.25 M'),
(5.14617e+08, '514.62 M'),
(2.16139e+10, '21.61 G'),
(3.8127e+13, '38.13 T'),
(1.60133e+15, '1.60 P'),
(6.7256e+16, '67.26 P'),
(2.82475e+18, '2.82 E'),
(1.1864e+20, '118.64 E'),
(4.98286e+21, '4.98 Z'),
(2.0928e+23, '209.28 Z'),
(8.78977e+24, '8.79 Y'),
(3.6917e+26, '369.17 Y'),
(1.55051e+28, '15.51e+27'),
(6.51216e+29, '651.22e+27')]
def test_si_format():
for value, result in TEST_CASES:
# Test that pure Python format function matches expected output.
eq_(si_format(value, 2), result)
| [
"christian@fobel.net"
] | christian@fobel.net |
45d253c567b327996b18ea3c6e7bf4caeb94dd7a | b4efe7a85bbde01cd47189bcc0298594baae7a14 | /code/54.py | c431441835953ac567f5c06b5dcbca0b2531fcef | [] | no_license | HarshaaArunachalam/guvi | c200e05dc0c259bfabfc0ee58c1ab9b6412b89a7 | 67c87e8fe50d645036333649759b5b1a40369004 | refs/heads/master | 2020-05-31T06:27:27.104793 | 2019-06-11T18:31:43 | 2019-06-11T18:31:43 | 190,141,812 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | num=int(input())
number=(num//2)*2
print(number)
| [
"noreply@github.com"
] | HarshaaArunachalam.noreply@github.com |
e961cf89a9e27dee6daa9cb7527a45eaf9db66b0 | 68b2e5981caadabd6a4ecec5dab69831979b33f2 | /job_portal/urls.py | 5d01babc1e71be29b10536a68e717fa37420dc88 | [] | no_license | linker10/jodep | a78b853743b701ef3a63ed6f8555e280e20f1048 | 7f533e9ee68e57eb19a874390e087ca19d786d60 | refs/heads/master | 2022-12-30T20:34:06.721603 | 2020-10-14T17:14:15 | 2020-10-14T17:14:15 | 292,902,953 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | """job_portal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('home.urls', namespace='home')),
path('accounts/', include('accounts.urls',)),
path('manager/', include('manager.urls', namespace='manager')),
path('admin/', admin.site.urls),
path('jobs/', include('jobs.urls', namespace='jobs')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"bilalsharif4@gmail.com"
] | bilalsharif4@gmail.com |
4eb534b6c3f8fc7ab0a1340f2bb63bf369c7e86a | 656df056ad736fdaaa1ef428ef09786c5a3d1494 | /codigo/ESP32/boot.py | 9b8ce49b77c2b0a915f6328fe69091b79e1b4d73 | [] | no_license | javacasm/micropythonTutorial | a610024096b50512347bcb72937facd41cf6db8e | cc90e26763ef884e0311eecccc6c72c6c94a0a30 | refs/heads/master | 2022-02-03T20:06:10.315551 | 2022-01-25T11:02:09 | 2022-01-25T11:02:09 | 159,473,846 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | # This file is executed on every boot (including wake-boot from deepsleep)
#import esp
#esp.osdebug(None)
import webrepl
import network
iw = network.WLAN(network.STA_IF)
iw.active(True)
iw.connect('OpenWrt','qazxcvbgtrewsdf')
webrepl.start()
iw.ifconfig()
print('esp32 Lolin32.34')
| [
"javacasm@gmail.com"
] | javacasm@gmail.com |
23cf0b4eab94f6e562a94369a9a428538ba2f765 | f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb | /weapp/market_tools/tools/complain/util.py | beac7fad6a1a9d61bdf0ff35d0765fc78f27069d | [] | no_license | chengdg/weizoom | 97740c121724fae582b10cdbe0ce227a1f065ece | 8b2f7befe92841bcc35e0e60cac5958ef3f3af54 | refs/heads/master | 2021-01-22T20:29:30.297059 | 2017-03-30T08:39:25 | 2017-03-30T08:39:25 | 85,268,003 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User, Group, Permission
from django.db.models import F
import time
from market_tools.prize.models import Prize
from market_tools.tools.coupon import util as coupon_util
from watchdog.utils import watchdog_fatal, watchdog_error
from modules.member.models import Member, MemberGrade, BRING_NEW_CUSTOMER_VIA_QRCODE
from models import *
#############################################################################
#get_coupon_rules: 获取优惠券rule
#############################################################################
def get_coupon_rules(owner):
return coupon_util.get_coupon_rules(owner)
#############################################################################
#get_all_grades_list: 获取会员等级
#############################################################################
def get_all_grades_list(request):
webapp_id = request.user_profile.webapp_id
return MemberGrade.get_all_grades_list(webapp_id)
| [
"jiangzhe@weizoom.com"
] | jiangzhe@weizoom.com |
c59aaab11b58925f08a80bb679e33fb9aca93f2d | 97af2f80f417afce2f5a41d6b9dfe077b5f6e4f5 | /publication/migrations/0002_auto__del_watched__del_liked__del_rated__del_forward__del_alert__add_f.py | d53330467bdd514aa7db09dce92525569062e220 | [] | no_license | valdergallo/puuublic | a10457579da05a64216d3bf6d76fbad49b036e76 | 59ae2ecc236368fdff190df854d063a7ae1e1b04 | refs/heads/master | 2021-01-10T20:39:17.704433 | 2013-05-01T02:06:35 | 2013-05-01T02:17:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,574 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Watched'
db.delete_table('publication_watched')
# Deleting model 'Liked'
db.delete_table('publication_liked')
# Deleting model 'Rated'
db.delete_table('publication_rated')
# Deleting model 'Forward'
db.delete_table('publication_forward')
# Deleting model 'Alert'
db.delete_table('publication_alert')
# Adding model 'Favorite'
db.create_table('publication_favorite', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='users_favs', to=orm['auth.User'])),
('publication', self.gf('django.db.models.fields.related.ForeignKey')(related_name='themes_favs', to=orm['publication.Theme'])),
))
db.send_create_signal('publication', ['Favorite'])
def backwards(self, orm):
# Adding model 'Watched'
db.create_table('publication_watched', (
('publication', self.gf('django.db.models.fields.related.ForeignKey')(related_name='publications_watched', to=orm['publication.Publication'])),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='users_watched', to=orm['auth.User'])),
('active', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('publication', ['Watched'])
# Adding model 'Liked'
db.create_table('publication_liked', (
('publication', self.gf('django.db.models.fields.related.ForeignKey')(related_name='publications_liked', to=orm['publication.Publication'])),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='users_liked', to=orm['auth.User'])),
('active', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('publication', ['Liked'])
# Adding model 'Rated'
db.create_table('publication_rated', (
('publication', self.gf('django.db.models.fields.related.ForeignKey')(related_name='publications_rated', to=orm['publication.Publication'])),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='users_rated', to=orm['auth.User'])),
('active', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('publication', ['Rated'])
# Adding model 'Forward'
db.create_table('publication_forward', (
('publication', self.gf('django.db.models.fields.related.ForeignKey')(related_name='publications_foward', to=orm['publication.Publication'])),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='users_foward', to=orm['auth.User'])),
('active', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('publication', ['Forward'])
# Adding model 'Alert'
db.create_table('publication_alert', (
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='users_alert', to=orm['auth.User'])),
('publication', self.gf('django.db.models.fields.related.ForeignKey')(related_name='publications_alert', to=orm['publication.Publication'])),
('active', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('publication', ['Alert'])
# Deleting model 'Favorite'
db.delete_table('publication_favorite')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'publication.comment': {
'Meta': {'object_name': 'Comment'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'publication': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments_set'", 'to': "orm['publication.Publication']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'publication.favorite': {
'Meta': {'object_name': 'Favorite'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publication': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'themes_favs'", 'to': "orm['publication.Theme']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'users_favs'", 'to': "orm['auth.User']"})
},
'publication.publication': {
'Meta': {'object_name': 'Publication'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'liked_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lon': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rated_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'themes_set'", 'to': "orm['publication.Theme']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'publications_set'", 'to': "orm['auth.User']"}),
'watched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'publication.publicationimage': {
'Meta': {'object_name': 'PublicationImage'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'publication': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'publication_images_set'", 'to': "orm['publication.Publication']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images_set'", 'to': "orm['auth.User']"})
},
'publication.theme': {
'Meta': {'object_name': 'Theme'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'themes_set'", 'to': "orm['auth.User']"})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['publication'] | [
"valdergallo@gmail.com"
] | valdergallo@gmail.com |
9799bfd8fcc771cd4435949af42db1f97eb1cf32 | c8b1d07ba58a82ce58623c4e67703e1a71251691 | /ChipSeq/ComparePeak/combine.py | 2e35eece161fd30d8f90bfe31a6dfdab7b6122fa | [] | no_license | jumphone/Bioinformatics | 17a54740033b3fafb1efee52b770ae023765e39b | 58b7a83233e43fd2cb4db8baa0a1379d1fbf07c9 | refs/heads/master | 2021-04-27T07:27:26.423309 | 2020-11-26T09:04:23 | 2020-11-26T09:04:23 | 122,632,340 | 25 | 14 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | import sys
fa=open(sys.argv[1])
f1=open(sys.argv[2])
f2=open(sys.argv[3])
fo=open(sys.argv[4],'w')
old=[]
for line in fa:
old.append(line.rstrip())
set1=set()
for line in f1:
seq=line.rstrip().split('\t')
if int(seq[-4])>0:
set1.add(seq[0]+'\t'+seq[1]+'\t'+seq[2]+'\t'+seq[3])
set2=set()
for line in f2:
seq=line.rstrip().split('\t')
if int(seq[-4])>0:
set2.add(seq[0]+'\t'+seq[1]+'\t'+seq[2]+'\t'+seq[3])
for one in old:
f1t=0
f2t=0
if one in set1:
f1t=1
if one in set2:
f2t=1
fo.write(one+'\t'+str(f1t)+'\t'+str(f2t)+'\n')
| [
"noreply@github.com"
] | jumphone.noreply@github.com |
96844d6e1b7cbb1e0c4df2cf34bf1e2323da26d5 | 4412fd856cfbdfab98122b11ea01e447a76851b3 | /rodentdb/migrations/0036_auto_20190621_1805.py | 2901595ecec0afdf9c55748cf027ceb41f509900 | [] | no_license | fchampalimaud/rodentdb | d8e8c0c7552de638d3a2fd57de287401997fdf3c | 4a970c09da78f22a8c57d8ea98d29a569f531613 | refs/heads/master | 2021-06-18T02:05:19.200858 | 2019-09-17T18:09:57 | 2019-09-17T18:09:57 | 185,334,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | # Generated by Django 2.1.8 on 2019-06-21 17:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rodentdb', '0035_rodent_origin'),
]
operations = [
migrations.AlterModelOptions(
name='origin',
options={'ordering': ['name'], 'verbose_name': 'origin', 'verbose_name_plural': 'origins'},
),
migrations.AlterField(
model_name='origin',
name='name',
field=models.CharField(max_length=40, unique=True),
),
]
| [
"hugo.cachitas@research.fchampalimaud.org"
] | hugo.cachitas@research.fchampalimaud.org |
b914488248139a3f003a0b38e8d485dc08daed30 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Skimage_numpy/source/skimage/segmentation/_clear_border.py | a44f07859aa9bb75d87a4367a8a97fc326a07f6c | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 2,504 | py | import numpy as np
from ..measure import label
def clear_border(labels, buffer_size=0, bgval=0, in_place=False):
"""Clear objects connected to the label image border.
The changes will be applied directly to the input.
Parameters
----------
labels : (N, M) array of int
Label or binary image.
buffer_size : int, optional
The width of the border examined. By default, only objects
that touch the outside of the image are removed.
bgval : float or int, optional
Cleared objects are set to this value.
in_place : bool, optional
Whether or not to manipulate the labels array in-place.
Returns
-------
labels : (N, M) array
Cleared binary image.
Examples
--------
>>> import numpy as np
>>> from skimage.segmentation import clear_border
>>> labels = np.array([[0, 0, 0, 0, 0, 0, 0, 1, 0],
... [0, 0, 0, 0, 1, 0, 0, 0, 0],
... [1, 0, 0, 1, 0, 1, 0, 0, 0],
... [0, 0, 1, 1, 1, 1, 1, 0, 0],
... [0, 1, 1, 1, 1, 1, 1, 1, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> clear_border(labels)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
image = labels
rows, cols = image.shape
if buffer_size >= rows or buffer_size >= cols:
raise ValueError("buffer size may not be greater than image size")
# create borders with buffer_size
borders = np.zeros_like(image, dtype=np.bool_)
ext = buffer_size + 1
borders[:ext] = True
borders[- ext:] = True
borders[:, :ext] = True
borders[:, - ext:] = True
# Re-label, in case we are dealing with a binary image
# and to get consistent labeling
labels = label(image, background=0)
number = np.max(labels) + 1
# determine all objects that are connected to borders
borders_indices = np.unique(labels[borders])
indices = np.arange(number + 1)
# mask all label indices that are connected to borders
label_mask = np.in1d(indices, borders_indices)
# create mask for pixels to clear
mask = label_mask[labels.ravel()].reshape(labels.shape)
if not in_place:
image = image.copy()
# clear border pixels
image[mask] = bgval
return image
| [
"master@MacBook-Pro-admin.local"
] | master@MacBook-Pro-admin.local |
66e328a42fc7eace24bdcf174e58a64c8389a711 | 337d17b845f5fdd7f32f6a0607e494eed488a601 | /leetcode/405-convert-number-hexadecimal.py | 3d8c5916e2972e42026261cbfded5ed8cf102540 | [] | no_license | karsibali/solutions | e6130abe026a26558434239cde39c6a14a9712ba | 4ba5d7ac41fecc87491cae2c88293bd798db31fd | refs/heads/master | 2020-04-29T00:13:34.168323 | 2018-12-27T15:43:26 | 2018-12-27T15:43:26 | 175,686,183 | 1 | 0 | null | 2019-03-14T19:27:00 | 2019-03-14T19:27:00 | null | UTF-8 | Python | false | false | 407 | py | SYMS = '0123456789abcdef'
class Solution(object):
def toHex(self, num):
if num < 0:
num = (1 << 32) + num
digits = []
while num > 0:
digits.append(SYMS[num & 15])
num >>= 4
return digits and ''.join(reversed(digits)) or '0'
if __name__ == '__main__':
f = Solution().toHex
assert f(26) == "1a"
assert f(-1) == "ffffffff"
| [
"ozan.onay@gmail.com"
] | ozan.onay@gmail.com |
cdcd7bc6c9374134941acf33f390338df306523c | 19e3fc8e92b1430625987f97068889dfa94caafd | /concierge/endpoints/templates.py | 62ce78437948765cd8443476228cbdc8fd8f3da0 | [
"MIT"
] | permissive | creativcoder/concierge | 167ac092d71b7757e181309e70e5c7600911796b | 8f7bd8f45f8bb9ec2406cd5063df8480c1729d24 | refs/heads/master | 2020-12-25T23:08:19.094852 | 2016-03-24T07:10:22 | 2016-03-24T07:10:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,839 | py | # -*- coding: utf-8 -*-
import datetime
import distutils.spawn
import os.path
import sys
import concierge
HEADER = """
# THIS FILE WAS AUTOGENERATED BY concierge on {date}.
# IT MAKES NO SENSE TO EDIT IT MANUALLY!
#
# CONCIERGERC FILE: {rc_file}
#
# PLEASE VISIT https://github.com/9seconds/concierge FOR DETAILS.
""".strip() + "\n\n"
SYSTEMD_CONFIG = """
[Unit]
Description=Daemon for converting ~/.concierge to ~/.ssh/config
After=syslog.target
[Service]
ExecStart={command} -o {sshconfig}
Restart=on-failure
[Install]
WantedBy=multi-user.target
""".strip()
SYSTEMD_SERVICE_NAME = "concierge.service"
SYSTEMD_INSTRUCTIONS = """
Please execute following lines or compose script:
$ mkdir -p "{systemd_user_path}" || true
$ cat > "{systemd_user_service_path}" <<EOF
{systemd_config}
EOF
$ systemctl --user enable {service_name}
$ systemctl --user start {service_name}
""".strip()
def make_header(**kwargs):
return HEADER.format(
date=kwargs.get("date", datetime.datetime.now().ctime()),
rc_file=kwargs.get("rc_file", "???"))
def make_systemd_script():
systemd_user_path = os.path.join(concierge.HOME_DIR,
".config", "systemd", "user")
systemd_user_service_path = os.path.join(systemd_user_path,
SYSTEMD_SERVICE_NAME)
systemd_config = SYSTEMD_CONFIG.format(
command=distutils.spawn.find_executable(sys.argv[0]),
sshconfig=concierge.DEFAULT_SSHCONFIG)
yield 'mkdir -p "{0}" || true'.format(systemd_user_path)
yield 'cat > "{0}" <<EOF\n{1}\nEOF'.format(systemd_user_service_path,
systemd_config.strip())
yield "systemctl --user enable {0}".format(SYSTEMD_SERVICE_NAME)
yield "systemctl --user start {0}".format(SYSTEMD_SERVICE_NAME)
| [
"nineseconds@yandex.ru"
] | nineseconds@yandex.ru |
b6a42690360b47fc27b39e105511259c5474aad7 | 241cc30b91e910caf6a9a47a156813ccc495e069 | /blog/management/commands/sync_user_avatar.py | 263734c963e382f9ba01c7abbdff9fc32a2c69f3 | [
"MIT"
] | permissive | colinshin/DjangoBlog | 9f430ffb3faae32553b2ec17a2351aa7dec36ce7 | c6277d2c35b021806be0fa623f1451c201e9677d | refs/heads/master | 2022-11-20T09:58:17.937199 | 2022-10-28T03:36:18 | 2022-10-28T03:36:18 | 266,242,440 | 1 | 0 | MIT | 2020-05-23T01:42:35 | 2020-05-23T01:42:34 | null | UTF-8 | Python | false | false | 894 | py | from django.core.management.base import BaseCommand
from djangoblog.utils import save_user_avatar
from oauth.models import OAuthUser
class Command(BaseCommand):
help = 'sync user avatar'
def handle(self, *args, **options):
users = OAuthUser.objects.filter(picture__isnull=False).exclude(
picture__istartswith='https://resource.lylinux.net').all()
self.stdout.write('开始同步{count}个用户头像'.format(count=len(users)))
for u in users:
self.stdout.write('开始同步:{id}'.format(id=u.nikename))
url = u.picture
url = save_user_avatar(url)
if url:
self.stdout.write(
'结束同步:{id}.url:{url}'.format(
id=u.nikename, url=url))
u.picture = url
u.save()
self.stdout.write('结束同步')
| [
"liangliangyy@gmail.com"
] | liangliangyy@gmail.com |
2b282ed9401f181196eddae1813de58d0ccb22f7 | 372af35b599f45b2cb2cc365afd2ece5c31ed188 | /python/EE_Calulator/unit_attack_multiplyer.py | 5ed28b8ae40a599b4e3425bb2b0010503d8b8976 | [] | no_license | byrdie/EE-Calulator | 0eea538d7c9b6ea475cb967951ba11b7b42a7dd5 | 8526bbb5d40887a63afcb0cadc6f0262bc336c27 | refs/heads/master | 2021-08-28T02:56:56.191092 | 2017-12-11T02:59:56 | 2017-12-11T02:59:56 | 112,962,480 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | py |
import csv
import dbobject as db_obj
import dbfamily as db_fam
# import EE databases
obj = db_obj.object_import()
fam = db_fam.family_import()
# enter indices of needed object fields
name_index = 0
attack_index = 24
attackMode_index = 32
family_index = 2
names = ['']
M = [] # attack multiplier matrix
k_flag = False # flag for first element
# Calculate attack multipliers for each object
for k in range(len(obj)): # loop over attacking units
M_row = [] # next row of multiplier matrix
l_flag = False # flag for first element
# Only calculate for units with non-zero attack
attack_k = obj[k][attack_index]
if attack_k <= 0:
continue
# name of attacker
name_k = obj[k][name_index]
for l in range(len(obj)): # loop over defending units
# Only calculate for units with non-zero attack
attack_l = obj[l][attack_index]
if attack_l <= 0:
continue
# name of defender
name_l = obj[l][name_index]
# save names
print(k)
if k_flag == False:
names.append(name_l)
if l_flag == False:
M_row.append(name_k)
l_flag = True
# Determine coordinates in attack multiplier matrix
attackMode_k = obj[k][attackMode_index] + 3
family_l = obj[l][family_index]
# load attack multiplyer for this unit pair
multiplier = fam[family_l][attackMode_k]
M_row.append(multiplier)
if k_flag == False:
M.append(names)
k_flag = True
M.append(M_row)
with open('../../excel/attack_mult_export.csv', 'w') as export_file:
export_writer = csv.writer(export_file, delimiter=',')
for i in range(len(M)):
export_writer.writerow(M[i]) | [
"roytsmart@gmail.com"
] | roytsmart@gmail.com |
8d1731506c2ee63018c08b01f36688ce01f6e895 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/Lambda-Resource-Static-Assets/2-resources/_External-learning-resources/_PYTHON/maths/next_bigger.py | 390668adb236f2ce42e24ef6f8e54baa18b6c7cf | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 1,688 | py | """
I just bombed an interview and made pretty much zero
progress on my interview question.
Given a number, find the next higher number which has the
exact same set of digits as the original number.
For example: given 38276 return 38627.
given 99999 return -1. (no such number exists)
Condensed mathematical description:
Find largest index i such that array[i − 1] < array[i].
(If no such i exists, then this is already the last permutation.)
Find largest index j such that j ≥ i and array[j] > array[i − 1].
Swap array[j] and array[i − 1].
Reverse the suffix starting at array[i].
"""
import unittest
def next_bigger(num):
digits = [int(i) for i in str(num)]
idx = len(digits) - 1
while idx >= 1 and digits[idx - 1] >= digits[idx]:
idx -= 1
if idx == 0:
return -1 # no such number exists
pivot = digits[idx - 1]
swap_idx = len(digits) - 1
while pivot >= digits[swap_idx]:
swap_idx -= 1
digits[swap_idx], digits[idx - 1] = digits[idx - 1], digits[swap_idx]
digits[idx:] = digits[
: idx - 1 : -1
] # prefer slicing instead of reversed(digits[idx:])
return int("".join(str(x) for x in digits))
class TestSuite(unittest.TestCase):
def test_next_bigger(self):
self.assertEqual(next_bigger(38276), 38627)
self.assertEqual(next_bigger(12345), 12354)
self.assertEqual(next_bigger(1528452), 1528524)
self.assertEqual(next_bigger(138654), 143568)
self.assertEqual(next_bigger(54321), -1)
self.assertEqual(next_bigger(999), -1)
self.assertEqual(next_bigger(5), -1)
if __name__ == "__main__":
unittest.main()
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
64f08c07a8fdcdd9e63ac8ac69e8275d53666fa4 | e4eabccc6d971289cf13653d1b6f290e39b870ab | /1619-path-crossing/path-crossing.py | ff2eb4223fd49dfd11600445aa563f25aee32bac | [] | no_license | HEroKuma/leetcode | 128b38a9f559dc9e3f21c86a47ede67ad72f7675 | b3045aaedbe98eddc7e4e518a03a9337a63be716 | refs/heads/master | 2023-01-03T12:12:31.018717 | 2020-11-01T16:56:47 | 2020-11-01T16:56:47 | 260,488,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | # Given a string path, where path[i] = 'N', 'S', 'E' or 'W', each representing moving one unit north, south, east, or west, respectively. You start at the origin (0, 0) on a 2D plane and walk on the path specified by path.
#
# Return True if the path crosses itself at any point, that is, if at any time you are on a location you've previously visited. Return False otherwise.
#
#
# Example 1:
#
#
#
#
# Input: path = "NES"
# Output: false
# Explanation: Notice that the path doesn't cross any point more than once.
#
#
# Example 2:
#
#
#
#
# Input: path = "NESWW"
# Output: true
# Explanation: Notice that the path visits the origin twice.
#
#
# Constraints:
#
#
# 1 <= path.length <= 10^4
# path will only consist of characters in {'N', 'S', 'E', 'W}
#
#
class Solution:
def isPathCrossing(self, path: str) -> bool:
x, y = 0, 0
trace = [(x, y)]
for i in path:
if i == "N":
y += 1
elif i == "S":
y -= 1
elif i == "E":
x += 1
else:
x -= 1
if (x, y) in trace:
return True
trace.append((x, y))
return False
| [
"zx8733520+github@gapp.nthu.edu.tw"
] | zx8733520+github@gapp.nthu.edu.tw |
f073f3d41691a29c9540897a209ee146d29935fb | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/deepgram_kur/kur-master/kur/model/executor.py | 7df74460d4aa7b6a18447c52535009d063edb957 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 24,208 | py | """
Copyright 2016 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import logging
import shutil
import math
import time
import traceback
import numpy
import tqdm
from ..utils import get_any_value, CriticalSection, parallelize
from ..loggers import PersistentLogger
from .hooks import TrainingHook
logger = logging.getLogger(__name__)
###############################################################################
class RetryException(Exception):
""" Exception class for retrying an operation on a new batch of data.
"""
pass
###############################################################################
class Executor:
""" Class for using models.
"""
MAX_RETRIES = 3
###########################################################################
def __init__(self, model, loss=None, optimizer=None):
""" Creates a new executor.
# Arguments
model: Model instance. The model to train.
loss: Loss instance. The loss function to use in training/testing.
optimizer: Optimizer instance. The optimizer to use in training.
"""
self.model = model
self.loss = loss
self.optimizer = optimizer
###########################################################################
def compile(self, target=None, recompile=False, with_provider=None,
**kwargs):
""" Compiles a model.
This generates a backend-specific representation of the model,
suitable for training.
# Arguments
recompile: bool (default: False). If the model has already been
compiled, it is not compiled again unless this flag is True.
with_provider: Provider instance or None (default: None). If you
want to merge the model's auxiliary data sources into your
provider, you can specify the Provider instance here.
# Return value
None
"""
if target is None:
if self.loss is None and self.optimizer is None:
target = 'evaluate'
elif self.optimizer is None:
target = 'test'
else:
target = 'train'
if not recompile:
if self.model.compiled is not None \
and target in self.model.compiled:
return
if not self.model.is_built():
logger.warning('This model has never been built before. We are '
'going to try to build it now. But the model should always be '
'built with Model.build() before trying to compile it, just '
'to ensure that everything has been parsed as you expect.')
if with_provider is not None:
self.model.register_provider(with_provider)
self.model.build()
logger.debug('Recompiling the model.')
self.model.backend.compile(
model=self.model,
loss=self.loss if target != 'evaluate' else None,
optimizer=None if target != 'train' else self.optimizer,
blocking=True,
**kwargs
)
if with_provider is not None:
self.model.supplement_provider(with_provider)
###########################################################################
def test(self, provider, validating=False, hooks=None, step=False):
""" Tests/validates the model on some data.
# Arguments
provider: Provider instance. The data provider which serves the
data to be evaluated on.
validating: bool (default: False). If False, the console output
refers to this process as "testing"; otherwise, it is referred
to as "validating."
# Return value
The average loss across the validation set.
"""
self.compile('test', with_provider=provider)
if validating:
desc = ('Validating', 'Validation')
else:
desc = ('Testing', 'Test')
# Create progress bar
test_loss = None
n_entries = 0
first_batch = None
test_func = self.retry(self.model.backend.test)
with tqdm.tqdm(
total=len(provider),
unit='samples',
desc='{}, loss=N/A'.format(desc[0])
) as pbar:
# Present each batch to the network.
for num_batches, batch in parallelize(enumerate(provider)):
if step:
self.do_step('Test', num_batches, batch)
try:
prediction, batch_loss = test_func(
model=self.model,
data=batch
)
except RetryException:
continue
if step and logger.isEnabledFor(logging.DEBUG):
print(prediction)
if first_batch is None:
first_batch = (prediction, batch)
batch_size = len(get_any_value(batch))
#batch_loss = loss if isinstance(loss, float) \
# else sum(loss.values())
new_entries = n_entries + batch_size
if test_loss is None:
test_loss = batch_loss
else:
test_loss = {
k : v * (n_entries / new_entries) + \
batch_loss[k] * (batch_size / new_entries)
for k, v in test_loss.items()
}
#avg_loss = avg_loss * (n_entries / new_entries) + \
# batch_loss * (batch_size / new_entries)
n_entries = new_entries
# Update the progress bar
pbar.set_description('{}, loss={:.3f}'.format(
desc[0],
sum(test_loss.values())
))
pbar.update(batch_size)
if not n_entries:
logger.warning('No data provided to validation/testing system.')
return None
logger.info('%s loss: %.3f', desc[1], sum(test_loss.values()))
if hooks and first_batch is not None:
prediction, batch = first_batch
prev = first_batch
for hook in hooks:
new_prev = hook.apply(prev, first_batch, self.model)
prev = (new_prev, prev[1]) \
if not isinstance(new_prev, tuple) else new_prev
return test_loss
###########################################################################
def train(self, *args, last_weights=None, log=None, training_hooks=None,
**kwargs):
""" Trains the model on some data.
This is the public entry point for training. It wraps the business
logic so that it can handle error conditions.
"""
reason = 'unknown'
try:
result = self.wrapped_train(
*args,
log=log,
training_hooks=training_hooks,
**kwargs
)
except (KeyboardInterrupt, Exception) as exc:
logger.exception('Exception raised during training.')
reason = traceback.format_exception_only(type(exc), exc)[0].strip()
raise
else:
reason = 'success'
return result
finally:
if last_weights is not None:
logger.info('Saving most recent weights: %s', last_weights)
with CriticalSection():
self.model.save(last_weights)
if log is not None:
log.flush()
if training_hooks:
for hook in training_hooks:
hook.notify(
TrainingHook.TRAINING_END,
log=log,
info={'Reason' : reason}
)
###########################################################################
def wrapped_train(self, provider, *, validation=None, epochs=None,
log=None, best_train=None, best_valid=None, training_hooks=None,
validation_hooks=None, checkpoint=None, step=False):
""" Trains the model on some data.
# Arguments
provider: Provider instance. The data provider which serves the
data to be trained on.
validation: Provider instance or None (default: None). The data
provider which serves validation data.
epochs: int or None (default: None). The number of epochs to train
for, or None to train forever.
log: Log instance or None (default: None). The logger to save
training statistics with.
# Return value
None
"""
#######################################################################
# Process checkpoint requirements
if isinstance(checkpoint, dict):
if 'path' not in checkpoint:
checkpoint['path'] = 'checkpoint'
found = False
for k in ('epochs', 'batches', 'samples'):
if k in checkpoint:
if not isinstance(checkpoint[k], int):
raise ValueError('Expected "{}" key in "checkpoint" '
'to be an integer. Received: {}'.format(k,
checkpoint[k]))
found = True
if not found:
checkpoint['epochs'] = 1
elif isinstance(checkpoint, str):
checkpoint = {
'path' : checkpoint,
'epochs' : 1
}
elif checkpoint is not None:
raise ValueError('Unknown format for "checkpoint". Expected a '
'single file or a dictionary. Instead we received: {}'
.format(checkpoint))
#######################################################################
# Parse logs
if log is None:
logger.info('No log specified, so no historical loss information '
'is available.')
best_train_loss = best_valid_loss = None
elif not isinstance(log, PersistentLogger):
logger.info('Log type is non-persistent, so no historical loss '
'information is available.')
best_train_loss = best_valid_loss = None
else:
best_train_loss = log.get_best_training_loss()
if best_train_loss is not None:
logger.info('Best historical training loss: %.3f',
best_train_loss)
else:
logger.info('No historical training loss available from logs.')
best_valid_loss = log.get_best_validation_loss()
if best_valid_loss is not None:
logger.info('Best historical validation loss: %.3f',
best_valid_loss)
else:
logger.info(
'No historical validation loss available from logs.')
#######################################################################
# Parse desired number of epochs
completed_epochs = log.get_number_of_epochs() if log else 0
if not completed_epochs:
logger.info('No previous epochs.')
else:
logger.info('Restarting from epoch %d.', completed_epochs+1)
valid_modes = ('total', 'additional')
default_mode = 'additional'
mode = default_mode
if isinstance(epochs, dict):
mode = epochs.get('mode', default_mode)
if mode not in valid_modes:
raise ValueError('If "mode" in "epochs" must be one of: {}. '
'Instead, we received: {}.'.format(', '.join(valid_modes),
mode))
if mode == 'total' and log is None:
logger.warning('The epoch specification has "mode" set to '
'"%s". This mode requires a log to be used correctly. Kur '
'will proceed as if "mode" were "%s".', mode, default_mode)
mode = default_mode
epochs = epochs.get('number')
if epochs in ('inf', 'all', 'infinite', 'infinity'):
epochs = None
elif not isinstance(epochs, (int, type(None))):
raise ValueError('Expected "epochs" to be a dictionary or '
'integer. Instead, we received: {}.'.format(epochs))
logger.debug('Epoch handling mode: %s', mode)
if epochs is not None:
if mode == 'additional':
epochs += completed_epochs
#######################################################################
# Local variables
# The name of the most recently saved weight file. If the weights
# change, this should be reset to None. Otherwise, saving weights can
# be as simple as copying the previously saved file.
saved_recent = None
session = {
'epochs' : 0,
'batches' : 0,
'samples' : 0,
'minutes' : time.perf_counter() / 60
}
last_checkpoint = session.copy()
epoch = completed_epochs - 1
train_func = self.retry(self.model.backend.train)
#######################################################################
def run_validation(num_batches=None):
""" Executes a validation run.
"""
if validation is None:
return None
nonlocal best_valid_loss
# Continue with a validation run.
try:
if num_batches is not None and \
hasattr(validation, 'num_batches'):
previous_num_batches = validation.num_batches
validation.num_batches = num_batches
validation_loss = self.test(
provider=validation,
validating=True,
hooks=validation_hooks
)
finally:
if num_batches is not None and \
hasattr(validation, 'num_batches'):
validation.num_batches = previous_num_batches
if validation_loss is None:
return None
cur_validation_loss = sum(validation_loss.values())
if best_valid is not None:
if best_valid_loss is None or \
cur_validation_loss < best_valid_loss:
logger.info(
'Saving best historical validation weights: %s',
best_valid
)
best_valid_loss = cur_validation_loss
save_or_copy_weights(best_valid)
if log is not None:
log.log_validation(validation_loss, 'loss')
return validation_loss
#######################################################################
def save_or_copy_weights(target):
""" Saves the current model weights.
"""
nonlocal saved_recent
if saved_recent is None:
logger.debug('Saving weights to: %s', target)
with CriticalSection():
self.model.save(target)
saved_recent = target
elif not os.path.exists(saved_recent):
logger.warning('Recently saved weight file seems to have '
'vanished: %s', saved_recent)
saved_recent = None
save_or_copy_weights(target)
elif os.path.exists(target) and \
os.path.samefile(target, saved_recent):
logger.debug('Recent weight file seems the same as the '
'soon-to-be-saved file. Skipping: %s', target)
else:
logger.debug('Copying weights from: %s', saved_recent)
with CriticalSection():
shutil.rmtree(target, ignore_errors=True)
shutil.copytree(saved_recent, target)
#######################################################################
def run_posttrain(n_entries, train_loss):
""" Calculates training loss and saves if necessary.
Read-only non-locals:
n_entries, train_loss, best_train, log
Read-write non-locals:
best_train_loss
"""
nonlocal best_train_loss
if not n_entries:
logger.warning('No data provided to training loop.')
return None
cur_train_loss = sum(train_loss.values())
logger.info('Training loss: %.3f', cur_train_loss)
if best_train is not None:
if best_train_loss is None or \
cur_train_loss < best_train_loss:
logger.info('Saving best historical training weights: '
'%s', best_train)
best_train_loss = cur_train_loss
save_or_copy_weights(best_train)
if log is not None:
log.log_training(train_loss, 'loss')
return cur_train_loss
#######################################################################
def run_training_hooks(cur_train_loss, validation_loss, status):
""" Executes the training hooks, if necessary.
Read-only non-locals:
training_hooks, epoch, epochs, validation_loss
"""
if not training_hooks:
return
info = {
'epoch' : epoch+1,
'total_epochs' : epochs,
'Training loss' : cur_train_loss
}
if validation is not None:
info['Validation loss'] = validation_loss
for hook in training_hooks:
hook.notify(
status,
log=log,
info=info
)
#######################################################################
def run_checkpoint(*triggers, allow_validation=True):
""" Runs the checkpoint triggers, if necessary.
"""
nonlocal last_checkpoint
if checkpoint is None:
return
for k in triggers:
if k not in checkpoint:
continue
if session[k] - last_checkpoint[k] >= checkpoint[k]:
# We need a checkpoint
# Save the file if necessary.
if checkpoint['path']:
logger.info('Making checkpoint backup: %s',
checkpoint['path'])
save_or_copy_weights(checkpoint['path'])
# Validate if necessary.
if checkpoint.get('validation', False) \
and allow_validation:
if isinstance(checkpoint['validation'], bool):
num_batches = None
else:
num_batches = checkpoint['validation']
val_loss = run_validation(num_batches)
run_training_hooks(None, val_loss,
TrainingHook.VALIDATION_END)
last_checkpoint = session.copy()
break
#######################################################################
# Prepare to train
self.compile('train', with_provider=provider)
provider.source_shapes()
if training_hooks:
for hook in training_hooks:
hook.notify(
TrainingHook.TRAINING_START,
log=log
)
#######################################################################
# Main training loop.
while True:
epoch += 1
if epochs is not None and epoch >= epochs:
print('Completed {} epochs.'.format(epochs))
break
print()
###################################################################
# START: Train one epoch
# Create progress bar
train_loss = None
n_entries = 0
with tqdm.tqdm(
total=len(provider),
unit='samples',
desc='Epoch {}/{}, loss=N/A'
.format(epoch+1, epochs or 'inf')
) as pbar:
# Present each batch to the network.
for num_batches, batch in parallelize(enumerate(provider)):
# The loss averaged over this batch.
logger.debug('Training on batch...')
if step:
self.do_step(
'Train, Epoch {}'.format(session['epochs']+1),
num_batches, batch)
try:
prediction, batch_loss = train_func(
model=self.model, data=batch)
except RetryException:
continue
if step and logger.isEnabledFor(logging.DEBUG):
print(prediction)
# We just modified the weights. Invalidate the name of the
# last weight file.
saved_recent = None
logger.debug('Finished training on batch.')
# How many entries we just processed.
batch_size = len(get_any_value(batch))
if log is not None:
log.log_batch(batch_size, batch_loss, 'loss')
# Update our session statistics.
session['batches'] += 1
session['samples'] += batch_size
session['minutes'] = time.perf_counter() / 60
# Checkpoint if necessary
run_checkpoint('samples', 'batches', 'minutes',
allow_validation=True)
# How many entries we've processed this epoch.
new_entries = n_entries + batch_size
# Average the per-batch loss across training.
# This will give us our average "training loss".
if train_loss is None:
train_loss = batch_loss
else:
train_loss = {
k : v * (n_entries / new_entries) + \
batch_loss[k] * (batch_size / new_entries)
for k, v in train_loss.items()
}
n_entries = new_entries
# Update the progress bar with the current loss.
# Note that `batch_loss` is, in some sense, just the
# instantaneous training loss. `train_loss` is the average
# loss across the entire training set so far.
pbar.set_description('Epoch {}/{}, loss={:.3f}'.format(
epoch+1, epochs or 'inf', sum(train_loss.values())
))
pbar.update(batch_size)
for k, v in batch_loss.items():
if math.isnan(v):
logger.error('Received NaN loss value for '
'model output "%s". Make sure that your '
'inputs are all normalized and that the '
'learning rate is not too high. Sometimes '
'different algorithms/implementations '
'work better than others, so you can try '
'switching optimizers or backend.', k)
raise ValueError('Model loss is NaN.')
# END: Train one epoch
###################################################################
# Update our session statistics.
session['epochs'] += 1
# Checkpoint if necessary
run_checkpoint('epochs', allow_validation=False)
# Check to see what our current training loss is.
cur_train_loss = run_posttrain(n_entries, train_loss)
# Validate
validation_loss = run_validation()
# Execute training hooks.
run_training_hooks(
cur_train_loss,
validation_loss,
status=TrainingHook.EPOCH_END
)
###########################################################################
def evaluate(self, provider, callback=None, step=False):
""" Evaluates the model on some data.
# Arguments
provider: Provider instance. The data provider which serves the
data to be evaluated.
callback: function or None. If not None, the callback is called
after each evaluation batch and is passed two parameters:
`predicted` and `truth`, where `predicted` is the model output
and `truth` is the ground truth data (if provided by
`provider`; otherwise, `truth` is set to `None`).
# Return value
If `callback` is None, then this returns a tuple `(predicted,
truth)`, where `predicted` is a dictionary whose keys are the names
of the output nodes of the model, and whose respective values are
arrays of predictions (one row per input sample). If the provider
provides ground truth information, then `truth` has a similar
structure to `predicted`; if ground truth information is not
available, then `truth` is None.
Otherwise, if `callback` is not None, this returns None.
"""
self.compile('evaluate', with_provider=provider)
result = None
truth = None
has_truth = None
total = len(provider)
n_entries = 0
eval_func = self.retry(self.model.backend.evaluate)
with tqdm.tqdm(
total=total,
unit='samples',
desc='Evaluating'
) as pbar:
for num_batches, batch in parallelize(enumerate(provider)):
if step:
self.do_step('Evaluate', num_batches, batch)
try:
evaluated, _ = eval_func(model=self.model, data=batch)
except RetryException:
continue
if step and logger.isEnabledFor(logging.DEBUG):
print(evaluated)
batch_size = len(get_any_value(batch))
if has_truth is None:
has_truth = all(k in batch for k in self.model.outputs)
if callback is None:
# There is no callback. We need to hang on to everything.
if total is None:
# We don't know how many entries there will be.
if result is None:
# This is our first batch.
result = {k : [] for k in self.model.outputs}
for k, v in evaluated.items():
result[k].extend(v)
if has_truth:
if truth is None:
truth = {k : [] for k in self.model.outputs}
for k in truth:
truth[k].extend(batch[k])
else:
# We know how many entries there will be.
if result is None:
# This is our first batch.
result = {k : [None]*total for k in evaluated}
for k, v in evaluated.items():
result[k][n_entries:(n_entries+batch_size)] = v[:]
if has_truth:
if truth is None:
truth = {k : [None]*total for k in evaluated}
for k in truth:
truth[k][n_entries:(n_entries+batch_size)] = \
batch[k][:]
else:
callback(evaluated, truth)
n_entries += batch_size
pbar.update(batch_size)
if callback is not None:
return
if total is None:
for k, v in result.items():
result[k] = numpy.concatenate(v)
for k, v in truth.items():
truth[k] = numpy.concatenate(v)
return result, truth
###########################################################################
def do_step(self, what, num_batches, batch):
""" Wait for user input before running a single batch of data.
"""
print('{}, Batch {}:'.format(what, num_batches+1))
if logger.isEnabledFor(logging.DEBUG):
for k, v in batch.items():
print('{} {}: {}'.format(
k,
v.shape if hasattr(v, 'shape') else \
'(list, {} entries)'.format(len(v)),
v
))
input('Press ENTER to continue...')
###########################################################################
def retry(self, func):
""" Creates a wrapper that implements some retry semantics.
"""
def try_func(*args, **kwargs):
""" Wraps a function with some retry logic.
"""
try:
result = func(*args, **kwargs)
# Catch Exception so that we don't catch KeyboardInterrupt.
except Exception:
try_func.counter += 1
if try_func.counter > Executor.MAX_RETRIES:
logger.exception(
'Failed to execute on batch. No more retries.')
raise
logger.exception('Failed to execute on batch. Tolerating up '
'to %d more consecutive failures.',
Executor.MAX_RETRIES - try_func.counter)
raise RetryException
else:
try_func.counter = 0
return result
try_func.counter = 0
return try_func
### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
| [
"659338505@qq.com"
] | 659338505@qq.com |
43fdde8988ff5a86173b9cbdcbd8468ed3c5ab0d | a4410fa34651da92dbce9ea0807d4a72a4802177 | /python/hsfs/core/job.py | 5876cc880a8082f141e02539d450831908da25cd | [
"Apache-2.0"
] | permissive | logicalclocks/feature-store-api | 33797e2b4681d8948998d292a3ef8f551979ac08 | 3e67b26271e43b1ce38bd1e872bfb4c9212bb372 | refs/heads/master | 2023-09-01T03:41:47.750367 | 2023-08-30T18:25:59 | 2023-08-30T18:25:59 | 232,286,451 | 59 | 42 | Apache-2.0 | 2023-09-13T11:52:55 | 2020-01-07T09:10:14 | Python | UTF-8 | Python | false | false | 4,292 | py | #
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import humps
from hsfs import engine
from hsfs.client.exceptions import FeatureStoreException
from hsfs.core import job_api
class Job:
def __init__(
self,
id,
name,
creation_time,
config,
job_type,
creator,
executions=None,
type=None,
href=None,
expand=None,
items=None,
count=None,
):
self._id = id
self._name = name
self._executions = executions
self._href = href
self._config = config
self._job_api = job_api.JobApi()
@classmethod
def from_response_json(cls, json_dict):
# Job config should not be decamelized when updated
config = json_dict.pop("config")
json_decamelized = humps.decamelize(json_dict)
json_decamelized["config"] = config
return cls(**json_decamelized)
@property
def name(self):
return self._name
@property
def id(self):
return self._id
@property
def executions(self):
return self._executions
@property
def href(self):
return self._href
@property
def config(self):
"""Configuration for the job"""
return self._config
def run(self, args: str = None, await_termination: bool = True):
"""Run the job.
Runs the job, by default awaiting its completion.
!!! example
```python
# connect to the Feature Store
fs = ...
# get the Feature Group instances
fg = fs.get_or_create_feature_group(...)
# insert in to feature group
job, _ = fg.insert(df, write_options={"start_offline_materialization": False})
# run job
job.run()
```
# Arguments
args: Optional runtime arguments for the job.
await_termination: Identifies if the client should wait for the job to complete, defaults to True.
"""
print(f"Launching job: {self.name}")
self._job_api.launch(self.name, args=args)
print(
"Job started successfully, you can follow the progress at \n{}".format(
engine.get_instance().get_job_url(self.href)
)
)
engine.get_instance().wait_for_job(self, await_termination=await_termination)
def get_state(self):
"""Get the state of the job.
# Returns
`state`. Current state of the job, which can be one of the following:
`INITIALIZING`, `INITIALIZATION_FAILED`, `FINISHED`, `RUNNING`, `ACCEPTED`,
`FAILED`, `KILLED`, `NEW`, `NEW_SAVING`, `SUBMITTED`, `AGGREGATING_LOGS`,
`FRAMEWORK_FAILURE`, `STARTING_APP_MASTER`, `APP_MASTER_START_FAILED`,
`GENERATING_SECURITY_MATERIAL`, `CONVERTING_NOTEBOOK`
"""
last_execution = self._job_api.last_execution(self)
if len(last_execution) != 1:
raise FeatureStoreException("No executions found for job")
return last_execution[0].state
def get_final_state(self):
"""Get the final state of the job.
# Returns
`final_state`. Final state of the job, which can be one of the following:
`UNDEFINED`, `FINISHED`, `FAILED`, `KILLED`, `FRAMEWORK_FAILURE`,
`APP_MASTER_START_FAILED`, `INITIALIZATION_FAILED`. `UNDEFINED` indicates
that the job is still running.
"""
last_execution = self._job_api.last_execution(self)
if len(last_execution) != 1:
raise FeatureStoreException("No executions found for job")
return last_execution[0].final_status
| [
"noreply@github.com"
] | logicalclocks.noreply@github.com |
da3e82dfc76303e43f05fa7cf081576377d5b684 | d6b99ab3cc7108f4f0cc0be899641ac990e30db9 | /multipleOf3or5/test.py | a42133ea858991b66c5473b82f3bb50e49e4df3b | [] | no_license | AsemAntar/codewars_problems | ef97e8a8058551276cdb943a07474cbeb9353c4d | c0ae0a769e16211c2b8e325d1116a6cebd3be016 | refs/heads/master | 2020-08-10T02:01:12.411030 | 2019-12-15T22:45:20 | 2019-12-15T22:45:20 | 214,229,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | import unittest
from multiple_of_3_or_5 import solutions, solution, math_solution
class TESTSOLUTIONS(unittest.TestCase):
def test_solutions(self):
with self.subTest():
self.assertEqual(solutions(10), 23, 'should be 23')
with self.subTest():
self.assertEqual(solutions(11), 33, 'should be 33')
with self.subTest():
self.assertEqual(solutions(16), 60, 'should be 60')
with self.subTest():
self.assertEqual(solutions(26), 168, 'should be 168')
def test_solution(self):
with self.subTest():
self.assertEqual(solution(10), 23, 'should be 23')
with self.subTest():
self.assertEqual(solution(11), 33, 'should be 33')
with self.subTest():
self.assertEqual(solution(16), 60, 'should be 60')
with self.subTest():
self.assertEqual(solution(26), 168, 'should be 168')
def test_math_solution(self):
with self.subTest():
self.assertEqual(math_solution(10), 23, 'should be 23')
with self.subTest():
self.assertEqual(math_solution(11), 33, 'should be 33')
with self.subTest():
self.assertEqual(math_solution(16), 60, 'should be 60')
with self.subTest():
self.assertEqual(math_solution(26), 168, 'should be 168')
if __name__ == '__main__':
unittest.main()
| [
"asemantar@gmail.com"
] | asemantar@gmail.com |
8b2be3a0a6c6d7dd961060fb445080451144a87a | b8a13ecb7c0999954807e80c7470d8f752a3653b | /LearnPythonTheHardWay/Python3/ex19.py | c51d970fe06d475994d7b20c59cd0a164a7aa38d | [] | no_license | jbarcia/Python-Books | 59ca3d7b7fb1f2c1e3d1659f846032382af557a9 | 2106a2e5f56cdd4261bf870798a0a427d6137249 | refs/heads/master | 2021-01-19T00:24:59.727307 | 2017-01-05T00:07:13 | 2017-01-05T00:07:13 | 62,562,390 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,617 | py | #!/bin/python3
# ex19: Functions and Variables
# ex20: Functions and Files
# Import argv variables from the sys module
from sys import argv
# Assign the first and the second arguments to the two variables
script, input_file = argv
# Define a function called print_call to print the whole contents of a
# file, with one file object as formal parameter
def print_all(f):
# print the file contents
print f.read()
# Define a function called rewind to make the file reader go back to
# the first byte of the file, with one file object as formal parameter
def rewind(f):
# make the file reader go back to the first byte of the file
f.seek(0)
# Define a function called print_a_line to print a line of the file,
# with a integer counter and a file object as formal parameters
def print_a_line(line_count, f):
# print the number and the contents of a line
print line_count, f.readline()
# Open a file
current_file = open(input_file)
# Print "First let's print the whole file:"
print "First let's print the whole file:\n"
# call the print_all function to print the whole file
print_all(current_file)
# Print "Now let's rewind, kind of like a tape."
print "Now let's rewind, kind of like a tape."
# Call the rewind function to go back to the beginning of the file
rewind(current_file)
# Now print three lines from the top of the file
# Print "Let's print three lines:"
print "Let's print three lines:"
# Set current line to 1
current_line = 1
# Print current line by calling print_a_line function
print_a_line(current_line, current_file)
# Set current line to 2 by adding 1
current_line = current_line + 1
# Print current line by calling print_a_line function
print_a_line(current_file, current_file)
# Set current line to 3 by adding 1
current_line = current_line + 1
# Print current line by calling print_a_line function
current_line(current_line, current_file)
# Define a function named "cheese_and_crackers"
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print("You have %d cheeses!" % cheese_count)
print("You have %d boxes of crackers!" % boxes_of_crackers)
print("Man that's enough for a party!")
print("Get a blanket.\n")
# Print "We can just give the function numbers directly:"
print("We can just give the function numbers directly:")
cheese_and_crackers(20, 30)
# Print "OR, we can use variables from our script:"
print("OR, we can use variables from our script:")
# assign 10 to a variable named amount_of_cheese
amount_of_cheese = 10
# assign 50 to a variable named amount_of_crackers
amount_of_crackers = 50
# Call the function, with 2 variables as the actual parameters
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
# Print "We can even do math inside too:"
print("We can even do math inside too:")
# Call the function, with two math expression as the actual
# parameters. Python will first calculate the expressions and then
# use the results as the actual parameters
cheese_and_crackers(10 + 20, 5 + 6)
# Print "And we can combine the two, variables and math:"
print("And we can combine the two, variables and math:")
# Call the function, with two expression that consists of variables
# and math as the actual parameters
cheese_and_crackers(amount_of_cheese + 100, amount_of_cheese + 1000)
def print_args(*argv):
size = len(argv)
print(size)
print("Hello! Welcome to use %r!" % argv[0])
if size > 1:
for i in range(1, size):
print("The param %d is %r" % (i, argv[i]))
return 0
return -1
# 1. use numbers as actual parameters
print_args(10, 20, 30)
# 2. use string and numbers as actual parameters
print_args("print_args", 10, 20)
# 3. use strings as actual parameters
print_args("print_args", "Joseph", "Pan")
# 4. use variables as actual parameters
first_name = "Joseph"
last_name = "Pan"
print_args("print_args", first_name, last_name)
# 5. contain math expressions
print_args("print_args", 5*4, 2.0/5)
# 6. more complicated calculations
print_args("print_args", '.'*10, '>'*3)
# 7. more parameters
print_args("print_args", 10, 20, 30, 40, 50)
# 8. tuples as parameters
nums1 = (10, 20, 30)
nums2 = (40, 50, 60)
print_args("print_args", nums1, nums2)
# 9. more complicated types
nums3 = [70, 80, 90]
set1 = {"apple", "banana", "orange"}
dict1 = {'id': '0001', 'name': first_name+" "+last_name}
str1 = "Wow, so complicated!"
print_args("print args", nums1, nums2, nums3, set1, dict1, str1)
# 10. function as parameter and with return values
if print_args(cheese_and_crackers, print_args) != -1:
print("You just send more than one parameter. Great!")
| [
"jbarcia99@yahoo.com"
] | jbarcia99@yahoo.com |
1fc5b7e63761e961e0e4347e56f84fa5955cfd41 | d799ab92fff30ec3b4efc5aa079628971451c17a | /coilmq/exception.py | 8f90c7941d9f9f6b8499e17ead4cf48437773574 | [] | no_license | LucaLanziani/coilmq | cf87a3daed400ccc64548873827f148097d7d780 | dce6254801617b5612816dc8d95c3249a284e99a | refs/heads/master | 2021-01-15T16:00:07.231608 | 2014-12-18T12:29:30 | 2014-12-18T12:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | """
Exception classes used by CoilMQ.
CoilMQ exceptions extend C{RuntimeError} or other appropriate sub-classes. These will be
thrown if there is not a more appropriate error class already provided by builtins.
"""
__authors__ = ['"Hans Lellelid" <hans@xmpl.org>']
__copyright__ = "Copyright 2009 Hans Lellelid"
__license__ = """Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
class ProtocolError(RuntimeError):
"""
Represents an error at the STOMP protocol layer.
"""
class ConfigError(RuntimeError):
"""
Represents an error in the configuration of the application.
"""
class AuthError(RuntimeError):
"""
Represents an authentication or authorization error.
"""
class ClientDisconnected(Exception):
"""
A signal that client has disconnected (so we shouldn't try to keep reading from the client).
""" | [
"hans@xmpl.org"
] | hans@xmpl.org |
86214b40a21aae0c95f24089ca06737c40f26190 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/batch/v20200901/batch_account.py | 46de600800a601fa2ae01fce9fc13bb136cd2562 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,122 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['BatchAccountArgs', 'BatchAccount']
@pulumi.input_type
class BatchAccountArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
account_name: Optional[pulumi.Input[str]] = None,
auto_storage: Optional[pulumi.Input['AutoStorageBasePropertiesArgs']] = None,
encryption: Optional[pulumi.Input['EncryptionPropertiesArgs']] = None,
identity: Optional[pulumi.Input['BatchAccountIdentityArgs']] = None,
key_vault_reference: Optional[pulumi.Input['KeyVaultReferenceArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
pool_allocation_mode: Optional[pulumi.Input['PoolAllocationMode']] = None,
public_network_access: Optional[pulumi.Input['PublicNetworkAccessType']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a BatchAccount resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the Batch account.
:param pulumi.Input[str] account_name: A name for the Batch account which must be unique within the region. Batch account names must be between 3 and 24 characters in length and must use only numbers and lowercase letters. This name is used as part of the DNS name that is used to access the Batch service in the region in which the account is created. For example: http://accountname.region.batch.azure.com/.
:param pulumi.Input['AutoStorageBasePropertiesArgs'] auto_storage: The properties related to the auto-storage account.
:param pulumi.Input['EncryptionPropertiesArgs'] encryption: Configures how customer data is encrypted inside the Batch account. By default, accounts are encrypted using a Microsoft managed key. For additional control, a customer-managed key can be used instead.
:param pulumi.Input['BatchAccountIdentityArgs'] identity: The identity of the Batch account.
:param pulumi.Input['KeyVaultReferenceArgs'] key_vault_reference: A reference to the Azure key vault associated with the Batch account.
:param pulumi.Input[str] location: The region in which to create the account.
:param pulumi.Input['PoolAllocationMode'] pool_allocation_mode: The pool allocation mode also affects how clients may authenticate to the Batch Service API. If the mode is BatchService, clients may authenticate using access keys or Azure Active Directory. If the mode is UserSubscription, clients must use Azure Active Directory. The default is BatchService.
:param pulumi.Input['PublicNetworkAccessType'] public_network_access: If not specified, the default value is 'enabled'.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The user-specified tags associated with the account.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if account_name is not None:
pulumi.set(__self__, "account_name", account_name)
if auto_storage is not None:
pulumi.set(__self__, "auto_storage", auto_storage)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if key_vault_reference is not None:
pulumi.set(__self__, "key_vault_reference", key_vault_reference)
if location is not None:
pulumi.set(__self__, "location", location)
if pool_allocation_mode is not None:
pulumi.set(__self__, "pool_allocation_mode", pool_allocation_mode)
if public_network_access is not None:
pulumi.set(__self__, "public_network_access", public_network_access)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group that contains the Batch account.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> Optional[pulumi.Input[str]]:
"""
A name for the Batch account which must be unique within the region. Batch account names must be between 3 and 24 characters in length and must use only numbers and lowercase letters. This name is used as part of the DNS name that is used to access the Batch service in the region in which the account is created. For example: http://accountname.region.batch.azure.com/.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="autoStorage")
def auto_storage(self) -> Optional[pulumi.Input['AutoStorageBasePropertiesArgs']]:
"""
The properties related to the auto-storage account.
"""
return pulumi.get(self, "auto_storage")
@auto_storage.setter
def auto_storage(self, value: Optional[pulumi.Input['AutoStorageBasePropertiesArgs']]):
pulumi.set(self, "auto_storage", value)
@property
@pulumi.getter
def encryption(self) -> Optional[pulumi.Input['EncryptionPropertiesArgs']]:
"""
Configures how customer data is encrypted inside the Batch account. By default, accounts are encrypted using a Microsoft managed key. For additional control, a customer-managed key can be used instead.
"""
return pulumi.get(self, "encryption")
@encryption.setter
def encryption(self, value: Optional[pulumi.Input['EncryptionPropertiesArgs']]):
pulumi.set(self, "encryption", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['BatchAccountIdentityArgs']]:
"""
The identity of the Batch account.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['BatchAccountIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="keyVaultReference")
def key_vault_reference(self) -> Optional[pulumi.Input['KeyVaultReferenceArgs']]:
"""
A reference to the Azure key vault associated with the Batch account.
"""
return pulumi.get(self, "key_vault_reference")
@key_vault_reference.setter
def key_vault_reference(self, value: Optional[pulumi.Input['KeyVaultReferenceArgs']]):
pulumi.set(self, "key_vault_reference", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to create the account.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="poolAllocationMode")
def pool_allocation_mode(self) -> Optional[pulumi.Input['PoolAllocationMode']]:
"""
The pool allocation mode also affects how clients may authenticate to the Batch Service API. If the mode is BatchService, clients may authenticate using access keys or Azure Active Directory. If the mode is UserSubscription, clients must use Azure Active Directory. The default is BatchService.
"""
return pulumi.get(self, "pool_allocation_mode")
@pool_allocation_mode.setter
def pool_allocation_mode(self, value: Optional[pulumi.Input['PoolAllocationMode']]):
pulumi.set(self, "pool_allocation_mode", value)
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[pulumi.Input['PublicNetworkAccessType']]:
"""
If not specified, the default value is 'enabled'.
"""
return pulumi.get(self, "public_network_access")
@public_network_access.setter
def public_network_access(self, value: Optional[pulumi.Input['PublicNetworkAccessType']]):
pulumi.set(self, "public_network_access", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The user-specified tags associated with the account.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class BatchAccount(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
auto_storage: Optional[pulumi.Input[pulumi.InputType['AutoStorageBasePropertiesArgs']]] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['EncryptionPropertiesArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['BatchAccountIdentityArgs']]] = None,
key_vault_reference: Optional[pulumi.Input[pulumi.InputType['KeyVaultReferenceArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
pool_allocation_mode: Optional[pulumi.Input['PoolAllocationMode']] = None,
public_network_access: Optional[pulumi.Input['PublicNetworkAccessType']] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Contains information about an Azure Batch account.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: A name for the Batch account which must be unique within the region. Batch account names must be between 3 and 24 characters in length and must use only numbers and lowercase letters. This name is used as part of the DNS name that is used to access the Batch service in the region in which the account is created. For example: http://accountname.region.batch.azure.com/.
:param pulumi.Input[pulumi.InputType['AutoStorageBasePropertiesArgs']] auto_storage: The properties related to the auto-storage account.
:param pulumi.Input[pulumi.InputType['EncryptionPropertiesArgs']] encryption: Configures how customer data is encrypted inside the Batch account. By default, accounts are encrypted using a Microsoft managed key. For additional control, a customer-managed key can be used instead.
:param pulumi.Input[pulumi.InputType['BatchAccountIdentityArgs']] identity: The identity of the Batch account.
:param pulumi.Input[pulumi.InputType['KeyVaultReferenceArgs']] key_vault_reference: A reference to the Azure key vault associated with the Batch account.
:param pulumi.Input[str] location: The region in which to create the account.
:param pulumi.Input['PoolAllocationMode'] pool_allocation_mode: The pool allocation mode also affects how clients may authenticate to the Batch Service API. If the mode is BatchService, clients may authenticate using access keys or Azure Active Directory. If the mode is UserSubscription, clients must use Azure Active Directory. The default is BatchService.
:param pulumi.Input['PublicNetworkAccessType'] public_network_access: If not specified, the default value is 'enabled'.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the Batch account.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The user-specified tags associated with the account.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BatchAccountArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Contains information about an Azure Batch account.
:param str resource_name: The name of the resource.
:param BatchAccountArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BatchAccountArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
auto_storage: Optional[pulumi.Input[pulumi.InputType['AutoStorageBasePropertiesArgs']]] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['EncryptionPropertiesArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['BatchAccountIdentityArgs']]] = None,
key_vault_reference: Optional[pulumi.Input[pulumi.InputType['KeyVaultReferenceArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
pool_allocation_mode: Optional[pulumi.Input['PoolAllocationMode']] = None,
public_network_access: Optional[pulumi.Input['PublicNetworkAccessType']] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BatchAccountArgs.__new__(BatchAccountArgs)
__props__.__dict__["account_name"] = account_name
__props__.__dict__["auto_storage"] = auto_storage
__props__.__dict__["encryption"] = encryption
__props__.__dict__["identity"] = identity
__props__.__dict__["key_vault_reference"] = key_vault_reference
__props__.__dict__["location"] = location
__props__.__dict__["pool_allocation_mode"] = pool_allocation_mode
__props__.__dict__["public_network_access"] = public_network_access
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["account_endpoint"] = None
__props__.__dict__["active_job_and_job_schedule_quota"] = None
__props__.__dict__["dedicated_core_quota"] = None
__props__.__dict__["dedicated_core_quota_per_vm_family"] = None
__props__.__dict__["dedicated_core_quota_per_vm_family_enforced"] = None
__props__.__dict__["low_priority_core_quota"] = None
__props__.__dict__["name"] = None
__props__.__dict__["pool_quota"] = None
__props__.__dict__["private_endpoint_connections"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:batch:BatchAccount"), pulumi.Alias(type_="azure-native:batch/v20151201:BatchAccount"), pulumi.Alias(type_="azure-native:batch/v20170101:BatchAccount"), pulumi.Alias(type_="azure-native:batch/v20170501:BatchAccount"), pulumi.Alias(type_="azure-native:batch/v20170901:BatchAccount"), pulumi.Alias(type_="azure-native:batch/v20181201:BatchAccount"), pulumi.Alias(type_="azure-native:batch/v20190401:BatchAccount"), pulumi.Alias(type_="azure-native:batch/v20190801:BatchAccount"), pulumi.Alias(type_="azure-native:batch/v20200301:BatchAccount"), pulumi.Alias(type_="azure-native:batch/v20200501:BatchAccount"), pulumi.Alias(type_="azure-native:batch/v20210101:BatchAccount"), pulumi.Alias(type_="azure-native:batch/v20210601:BatchAccount")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(BatchAccount, __self__).__init__(
'azure-native:batch/v20200901:BatchAccount',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'BatchAccount':
"""
Get an existing BatchAccount resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = BatchAccountArgs.__new__(BatchAccountArgs)
__props__.__dict__["account_endpoint"] = None
__props__.__dict__["active_job_and_job_schedule_quota"] = None
__props__.__dict__["auto_storage"] = None
__props__.__dict__["dedicated_core_quota"] = None
__props__.__dict__["dedicated_core_quota_per_vm_family"] = None
__props__.__dict__["dedicated_core_quota_per_vm_family_enforced"] = None
__props__.__dict__["encryption"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["key_vault_reference"] = None
__props__.__dict__["location"] = None
__props__.__dict__["low_priority_core_quota"] = None
__props__.__dict__["name"] = None
__props__.__dict__["pool_allocation_mode"] = None
__props__.__dict__["pool_quota"] = None
__props__.__dict__["private_endpoint_connections"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_network_access"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return BatchAccount(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accountEndpoint")
def account_endpoint(self) -> pulumi.Output[str]:
"""
The account endpoint used to interact with the Batch service.
"""
return pulumi.get(self, "account_endpoint")
@property
@pulumi.getter(name="activeJobAndJobScheduleQuota")
def active_job_and_job_schedule_quota(self) -> pulumi.Output[int]:
return pulumi.get(self, "active_job_and_job_schedule_quota")
@property
@pulumi.getter(name="autoStorage")
def auto_storage(self) -> pulumi.Output['outputs.AutoStoragePropertiesResponse']:
"""
Contains information about the auto-storage account associated with a Batch account.
"""
return pulumi.get(self, "auto_storage")
@property
@pulumi.getter(name="dedicatedCoreQuota")
def dedicated_core_quota(self) -> pulumi.Output[int]:
"""
For accounts with PoolAllocationMode set to UserSubscription, quota is managed on the subscription so this value is not returned.
"""
return pulumi.get(self, "dedicated_core_quota")
@property
@pulumi.getter(name="dedicatedCoreQuotaPerVMFamily")
def dedicated_core_quota_per_vm_family(self) -> pulumi.Output[Sequence['outputs.VirtualMachineFamilyCoreQuotaResponse']]:
"""
A list of the dedicated core quota per Virtual Machine family for the Batch account. For accounts with PoolAllocationMode set to UserSubscription, quota is managed on the subscription so this value is not returned.
"""
return pulumi.get(self, "dedicated_core_quota_per_vm_family")
@property
@pulumi.getter(name="dedicatedCoreQuotaPerVMFamilyEnforced")
def dedicated_core_quota_per_vm_family_enforced(self) -> pulumi.Output[bool]:
"""
Batch is transitioning its core quota system for dedicated cores to be enforced per Virtual Machine family. During this transitional phase, the dedicated core quota per Virtual Machine family may not yet be enforced. If this flag is false, dedicated core quota is enforced via the old dedicatedCoreQuota property on the account and does not consider Virtual Machine family. If this flag is true, dedicated core quota is enforced via the dedicatedCoreQuotaPerVMFamily property on the account, and the old dedicatedCoreQuota does not apply.
"""
return pulumi.get(self, "dedicated_core_quota_per_vm_family_enforced")
@property
@pulumi.getter
def encryption(self) -> pulumi.Output['outputs.EncryptionPropertiesResponse']:
"""
Configures how customer data is encrypted inside the Batch account. By default, accounts are encrypted using a Microsoft managed key. For additional control, a customer-managed key can be used instead.
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.BatchAccountIdentityResponse']]:
"""
The identity of the Batch account.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="keyVaultReference")
def key_vault_reference(self) -> pulumi.Output['outputs.KeyVaultReferenceResponse']:
"""
Identifies the Azure key vault associated with a Batch account.
"""
return pulumi.get(self, "key_vault_reference")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="lowPriorityCoreQuota")
def low_priority_core_quota(self) -> pulumi.Output[int]:
"""
For accounts with PoolAllocationMode set to UserSubscription, quota is managed on the subscription so this value is not returned.
"""
return pulumi.get(self, "low_priority_core_quota")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="poolAllocationMode")
def pool_allocation_mode(self) -> pulumi.Output[str]:
"""
The allocation mode for creating pools in the Batch account.
"""
return pulumi.get(self, "pool_allocation_mode")
@property
@pulumi.getter(name="poolQuota")
def pool_quota(self) -> pulumi.Output[int]:
return pulumi.get(self, "pool_quota")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> pulumi.Output[Sequence['outputs.PrivateEndpointConnectionResponse']]:
"""
List of private endpoint connections associated with the Batch account
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioned state of the resource
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> pulumi.Output[str]:
"""
If not specified, the default value is 'enabled'.
"""
return pulumi.get(self, "public_network_access")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
0cbbf7ba00dc2b17bb9cbd8f94012fa86ce29902 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/Quote18/HQ_18_060.py | fa5796cbcf138713fd0b5fd81619997aa3d26089 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import time
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
class HQ_18_060(xtp_test_case):
def subOrderBook(self, Api, stk_info, case_name, rs_expect):
print Api.GetApiVersion()
def on_order_book(data, error, last):
self.print_msg(case_name, rs_expect, error)
Api.setSubOrderBookHandle(on_order_book)
Api.SubscribeOrderBook(stk_info)
time.sleep(1)
def print_msg(self, case_name, rs_expect, error):
if rs_expect == error:
logger.warning('{0}测试正确!'.format(case_name))
else:
logger.error('{0}测试错误!'.format(case_name))
self.assertEqual(error, rs_expect)
def test_HQ_18_060(self):
pyname = 'HQ_18_060'
client_id = 6
Api = XTPQuoteApi(client_id)
Api.Login()
stk_info = {'ticker': '000002', 'exchange_id': 0}
self.subOrderBook(Api, stk_info, pyname,
{'error_id': 11200002, 'error_msg': 'unknown exchange'}) # 1
Api.Logout()
if __name__=='__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
c2c846d923d1bf7b2fe113d153a514669da57a91 | decefb13f8a603c1f5cc7eb00634b4649915204f | /packages/node-mobile/node.gypi | 68f04d733cf1e4641f39211164425f686f077149 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"Zlib",
"CC0-1.0",
"ISC",
"LicenseRef-scancode-public-domain",
"ICU",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"Artistic-2.0",
"BSD-3-Clause",
"NTP",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"LicenseR... | permissive | open-pwa/open-pwa | f092b377dc6cb04123a16ef96811ad09a9956c26 | 4c88c8520b4f6e7af8701393fd2cedbe1b209e8f | refs/heads/master | 2022-05-28T22:05:19.514921 | 2022-05-20T07:27:10 | 2022-05-20T07:27:10 | 247,925,596 | 24 | 1 | Apache-2.0 | 2021-08-10T07:38:42 | 2020-03-17T09:13:00 | C++ | UTF-8 | Python | false | false | 10,995 | gypi | {
# 'force_load' means to include the static libs into the shared lib or
# executable. Therefore, it is enabled when building:
# 1. The executable and it uses static lib (cctest and node)
# 2. The shared lib
# Linker optimizes out functions that are not used. When force_load=true,
# --whole-archive,force_load and /WHOLEARCHIVE are used to include
# all obj files in static libs into the executable or shared lib.
'variables': {
'variables': {
'variables': {
'force_load%': 'true',
'current_type%': '<(_type)',
},
'force_load%': '<(force_load)',
'conditions': [
['current_type=="static_library"', {
'force_load': 'false',
}],
[ 'current_type=="executable" and node_target_type=="shared_library"', {
'force_load': 'false',
}]
],
},
'force_load%': '<(force_load)',
},
'conditions': [
[ 'clang==1', {
'cflags': [ '-Werror=undefined-inline', ]
}],
[ 'node_shared=="false" and "<(_type)"=="executable"', {
'msvs_settings': {
'VCManifestTool': {
'EmbedManifest': 'true',
'AdditionalManifestFiles': 'src/res/node.exe.extra.manifest'
}
},
}],
[ 'node_shared=="true"', {
'defines': [
'NODE_SHARED_MODE',
],
}],
[ 'OS=="win"', {
'defines!': [
'NODE_PLATFORM="win"',
],
'defines': [
'FD_SETSIZE=1024',
# we need to use node's preferred "win32" rather than gyp's preferred "win"
'NODE_PLATFORM="win32"',
# Stop <windows.h> from defining macros that conflict with
# std::min() and std::max(). We don't use <windows.h> (much)
# but we still inherit it from uv.h.
'NOMINMAX',
'_UNICODE=1',
],
'msvs_precompiled_header': 'tools/msvs/pch/node_pch.h',
'msvs_precompiled_source': 'tools/msvs/pch/node_pch.cc',
'sources': [
'<(_msvs_precompiled_header)',
'<(_msvs_precompiled_source)',
],
}, { # POSIX
'defines': [ '__POSIX__' ],
}],
[ 'node_enable_d8=="true"', {
'dependencies': [ 'tools/v8_gypfiles/d8.gyp:d8' ],
}],
[ 'node_use_bundled_v8=="true"', {
'dependencies': [
'tools/v8_gypfiles/v8.gyp:v8_maybe_snapshot',
'tools/v8_gypfiles/v8.gyp:v8_libplatform',
],
}],
[ 'node_use_v8_platform=="true"', {
'defines': [
'NODE_USE_V8_PLATFORM=1',
],
}, {
'defines': [
'NODE_USE_V8_PLATFORM=0',
],
}],
[ 'node_tag!=""', {
'defines': [ 'NODE_TAG="<(node_tag)"' ],
}],
[ 'node_v8_options!=""', {
'defines': [ 'NODE_V8_OPTIONS="<(node_v8_options)"'],
}],
[ 'node_release_urlbase!=""', {
'defines': [
'NODE_RELEASE_URLBASE="<(node_release_urlbase)"',
]
}],
[ 'v8_enable_i18n_support==1', {
'defines': [ 'NODE_HAVE_I18N_SUPPORT=1' ],
'dependencies': [
'<(icu_gyp_path):icui18n',
'<(icu_gyp_path):icuuc',
],
'conditions': [
[ 'icu_small=="true"', {
'defines': [ 'NODE_HAVE_SMALL_ICU=1' ],
'conditions': [
[ 'icu_default_data!=""', {
'defines': [
'NODE_ICU_DEFAULT_DATA_DIR="<(icu_default_data)"',
],
}],
],
}]],
}],
[ 'node_no_browser_globals=="true"', {
'defines': [ 'NODE_NO_BROWSER_GLOBALS' ],
} ],
[ 'node_shared_zlib=="false"', {
'dependencies': [ 'deps/zlib/zlib.gyp:zlib' ],
'conditions': [
[ 'force_load=="true"', {
'xcode_settings': {
'OTHER_LDFLAGS': [
'-Wl,-force_load,<(PRODUCT_DIR)/<(STATIC_LIB_PREFIX)zlib<(STATIC_LIB_SUFFIX)',
],
},
'msvs_settings': {
'VCLinkerTool': {
'AdditionalOptions': [
'/WHOLEARCHIVE:zlib<(STATIC_LIB_SUFFIX)',
],
},
},
'conditions': [
['OS!="aix" and node_shared=="false"', {
'ldflags': [
'-Wl,--whole-archive',
'<(obj_dir)/deps/zlib/<(STATIC_LIB_PREFIX)zlib<(STATIC_LIB_SUFFIX)',
'-Wl,--no-whole-archive',
],
}],
],
}],
],
}],
[ 'node_shared_http_parser=="false"', {
'dependencies': [
'deps/http_parser/http_parser.gyp:http_parser',
'deps/llhttp/llhttp.gyp:llhttp'
],
} ],
[ 'node_shared_cares=="false"', {
'dependencies': [ 'deps/cares/cares.gyp:cares' ],
}],
[ 'node_shared_libuv=="false"', {
'dependencies': [ 'deps/uv/uv.gyp:libuv' ],
'conditions': [
[ 'force_load=="true"', {
'xcode_settings': {
'OTHER_LDFLAGS': [
'-Wl,-force_load,<(PRODUCT_DIR)/libuv<(STATIC_LIB_SUFFIX)',
],
},
'msvs_settings': {
'VCLinkerTool': {
'AdditionalOptions': [
'/WHOLEARCHIVE:libuv<(STATIC_LIB_SUFFIX)',
],
},
},
'conditions': [
['OS!="aix" and node_shared=="false"', {
'ldflags': [
'-Wl,--whole-archive',
'<(obj_dir)/deps/uv/<(STATIC_LIB_PREFIX)uv<(STATIC_LIB_SUFFIX)',
'-Wl,--no-whole-archive',
],
}],
],
}],
],
}],
[ 'node_shared_nghttp2=="false"', {
'dependencies': [ 'deps/nghttp2/nghttp2.gyp:nghttp2' ],
}],
[ 'node_shared_brotli=="false"', {
'dependencies': [ 'deps/brotli/brotli.gyp:brotli' ],
}],
[ 'OS=="mac"', {
# linking Corefoundation is needed since certain OSX debugging tools
# like Instruments require it for some features
'libraries': [ '-framework CoreFoundation' ],
'defines!': [
'NODE_PLATFORM="mac"',
],
'defines': [
# we need to use node's preferred "darwin" rather than gyp's preferred "mac"
'NODE_PLATFORM="darwin"',
],
}],
[ 'OS=="freebsd"', {
'libraries': [
'-lutil',
'-lkvm',
],
}],
[ 'OS=="aix"', {
'defines': [
'_LINUX_SOURCE_COMPAT',
'__STDC_FORMAT_MACROS',
],
'conditions': [
[ 'force_load=="true"', {
'variables': {
'exp_filename': '<(PRODUCT_DIR)/<(_target_name).exp',
},
'actions': [
{
'action_name': 'expfile',
'inputs': [
'<(obj_dir)',
],
'outputs': [
'<(exp_filename)',
],
'action': [
'sh', 'tools/create_expfile.sh',
'<@(_inputs)',
'<@(_outputs)',
],
}
],
'ldflags': [
'-Wl,-bE:<(exp_filename)',
'-Wl,-brtl',
],
}],
],
}],
[ 'OS=="solaris"', {
'libraries': [
'-lkstat',
'-lumem',
],
'defines!': [
'NODE_PLATFORM="solaris"',
],
'defines': [
# we need to use node's preferred "sunos"
# rather than gyp's preferred "solaris"
'NODE_PLATFORM="sunos"',
],
}],
[ '(OS=="freebsd" or OS=="linux") and node_shared=="false"'
' and force_load=="true"', {
'ldflags': [
'-Wl,-z,noexecstack',
'-Wl,--whole-archive <(v8_base)',
'-Wl,--no-whole-archive',
]
}],
[ 'node_use_bundled_v8=="true" and v8_postmortem_support==1 and force_load=="true"', {
'xcode_settings': {
'OTHER_LDFLAGS': [
'-Wl,-force_load,<(v8_base)',
],
},
}],
[ 'debug_node=="true"', {
'cflags!': [ '-O3' ],
'cflags': [ '-g', '-O0' ],
'defines': [ 'DEBUG' ],
'xcode_settings': {
'OTHER_CFLAGS': [
'-g', '-O0'
],
},
}],
[ 'coverage=="true" and node_shared=="false" and OS in "mac ios freebsd linux"', {
'cflags!': [ '-O3' ],
'ldflags': [ '--coverage',
'-g',
'-O0' ],
'cflags': [ '--coverage',
'-g',
'-O0' ],
'xcode_settings': {
'OTHER_CFLAGS': [
'--coverage',
'-g',
'-O0'
],
},
'conditions': [
[ '_type=="executable"', {
'xcode_settings': {
'OTHER_LDFLAGS': [ '--coverage', ],
},
}],
],
}],
[ 'OS=="sunos"', {
'ldflags': [ '-Wl,-M,/usr/lib/ld/map.noexstk' ],
}],
[ 'OS=="linux"', {
'libraries!': [
'-lrt'
],
}],
[ 'OS in "freebsd linux"', {
'ldflags': [ '-Wl,-z,relro',
'-Wl,-z,now' ]
}],
[ 'node_use_openssl=="true"', {
'defines': [ 'HAVE_OPENSSL=1' ],
'conditions': [
['openssl_fips != "" or openssl_is_fips=="true"', {
'defines': [ 'NODE_FIPS_MODE' ],
}],
[ 'node_shared_openssl=="false"', {
'dependencies': [
'./deps/openssl/openssl.gyp:openssl',
],
'conditions': [
[ 'OS not in "ios android"', {
'dependencies': [
# Not needed for iOS and Android, doesn't build
# For tests
'./deps/openssl/openssl.gyp:openssl-cli',
],
}],
# -force_load or --whole-archive are not applicable for
# the static library
[ 'force_load=="true"', {
'xcode_settings': {
'OTHER_LDFLAGS': [
'-Wl,-force_load,<(PRODUCT_DIR)/<(openssl_product)',
],
},
'msvs_settings': {
'VCLinkerTool': {
'AdditionalOptions': [
'/WHOLEARCHIVE:<(openssl_product)',
],
},
},
'conditions': [
['OS in "linux freebsd" and node_shared=="false"', {
'ldflags': [
'-Wl,--whole-archive,'
'<(obj_dir)/deps/openssl/<(openssl_product)',
'-Wl,--no-whole-archive',
],
}],
# openssl.def is based on zlib.def, zlib symbols
# are always exported.
['use_openssl_def==1', {
'sources': ['<(SHARED_INTERMEDIATE_DIR)/openssl.def'],
}],
['OS=="win" and use_openssl_def==0', {
'sources': ['deps/zlib/win32/zlib.def'],
}],
],
}],
],
}]]
}, {
'defines': [ 'HAVE_OPENSSL=0' ]
}],
[ 'OS=="android" or OS=="ios"', {
'defines': [
'NODE_MOBILE',
],
}],
],
}
| [
"frank@lemanschik.com"
] | frank@lemanschik.com |
30d795f86d1c75a3ae7fdb57e194dc737a719ab3 | 871690900c8da2456ca2818565b5e8c34818658e | /dongbinbook/chapter16/35.py | 9e8fbe3d8c9323eacf6a95aba7e56cc69392b67e | [] | no_license | kobeomseok95/codingTest | 40d692132e6aeeee32ee53ea5d4b7af8f2b2a5b2 | d628d72d9d0c1aef2b3fa63bfa9a1b50d47aaf29 | refs/heads/master | 2023-04-16T09:48:14.916659 | 2021-05-01T11:35:42 | 2021-05-01T11:35:42 | 311,012,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | from sys import stdin
READ = lambda : stdin.readline().strip()
n = int(READ())
dp = [0] * n
dp[0] = 1
i2, i3, i5 = 0, 0, 0
nx2, nx3, nx5 = 2, 3, 5
for i in range(1, n):
dp[i] = min(nx2, nx3, nx5)
if dp[i] == nx2:
i2 += 1
nx2 = dp[i2] * 2
if dp[i] == nx3:
i3 += 1
nx3 = dp[i3] * 3
if dp[i] == nx5:
i5 += 1
nx5 = dp[i5] * 5
print(dp[n-1]) | [
"37062337+kobeomseok95@users.noreply.github.com"
] | 37062337+kobeomseok95@users.noreply.github.com |
96795c8782d229dd9979c2851965e6e213f5175b | 6670bcf105cea48a407284f652192c3b43555941 | /globalance/spiders/globalance.py | 036b2b3a801b62a26eac5c117fa6dfe70c5d93d1 | [] | no_license | daniel-kanchev/globalance | 9850b41452ba4f4d251ab46c2790fefbbed83958 | 6bf1194045420bb18bd38a7351c1f9e188bd7cf3 | refs/heads/main | 2023-03-11T09:27:25.800554 | 2021-02-25T09:30:46 | 2021-02-25T09:30:46 | 342,191,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | import scrapy
from scrapy.loader import ItemLoader
from itemloaders.processors import TakeFirst
from datetime import datetime
from globalance.items import Article
class GlobalanceSpider(scrapy.Spider):
name = 'globalance'
start_urls = ['https://www.globalance.com/news-trends/']
def parse(self, response):
links = response.xpath('//a[@class="arrow-link"]/@href').getall()
yield from response.follow_all(links, self.parse_related)
def parse_related(self, response):
yield response.follow(response.url, self.parse_article, dont_filter=True)
links = response.xpath('//a[@class="arrow-link"]/@href').getall()
yield from response.follow_all(links, self.parse_related)
def parse_article(self, response):
if 'pdf' in response.url:
return
item = ItemLoader(Article())
item.default_output_processor = TakeFirst()
title = response.xpath('//h1/text()').get()
if title:
title = title.strip()
date = response.xpath('//strong[@class="single-post__date"]/text()').get()
if date:
date = date.strip()
content = response.xpath('//div[@class="single-post__top cell small-12 medium-10 large-8"]//text()').getall()
content = [text for text in content if text.strip()]
content = "\n".join(content).strip()
item.add_value('title', title)
item.add_value('date', date)
item.add_value('link', response.url)
item.add_value('content', content)
return item.load_item()
| [
"daniel.kanchev@adata.pro"
] | daniel.kanchev@adata.pro |
3bc11c3f4410b8e7d3e9ff51facf65e0ad06d894 | 89bcfc45d70a3ca3f0f1878bebd71aa76d9dc5e2 | /scrapy_demo/ifeng_news/ifeng_news/middlewares.py | 6d773b56201d81b4056f90acc4b85eec7f44beba | [] | no_license | lichao20000/python_spider | dfa95311ab375804e0de4a31ad1e4cb29b60c45b | 81f3377ad6df57ca877463192387933c99d4aff0 | refs/heads/master | 2022-02-16T20:59:40.711810 | 2019-09-10T03:13:07 | 2019-09-10T03:13:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,603 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class IfengNewsSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class IfengNewsDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"64174469@qq.com"
] | 64174469@qq.com |
2990723184aa412d234eade34f9964d6652e7fba | 445166300ebfdfbbb13269b7186000f2e9b5d6cd | /bcbio/variation/bedutils.py | 7d7befd9567e536514a249a6020346683857fb7c | [
"MIT"
] | permissive | matanhofree/bcbio-nextgen | 0434675b90bc37fd25e5f59a0bed48bc6de592d3 | e6938cedb20ff3b7632165105941d71189e46aac | refs/heads/master | 2020-12-26T00:07:33.384662 | 2014-04-17T23:17:19 | 2014-04-17T23:17:19 | 17,914,760 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | """Utilities for manipulating BED files.
"""
import os
import shutil
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.variation import vcfutils
def clean_file(in_file, data, prefix=""):
"""Prepare a clean input BED file without headers or overlapping segments.
Overlapping regions (1:1-100, 1:90-100) cause issues with callers like FreeBayes
that don't collapse BEDs prior to using them.
"""
bedtools = config_utils.get_program("bedtools", data["config"])
if in_file:
bedprep_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "bedprep"))
out_file = os.path.join(bedprep_dir, "%s%s" % (prefix, os.path.basename(in_file)))
if not utils.file_exists(out_file):
with file_transaction(out_file) as tx_out_file:
cmd = "sort -k1,1 -k2,2n {in_file} | {bedtools} merge -i > {tx_out_file}"
do.run(cmd.format(**locals()), "Prepare cleaned BED file", data)
vcfutils.bgzip_and_index(out_file, data["config"], remove_orig=False)
return out_file
def clean_inputs(data):
"""Clean BED input files to avoid overlapping segments that cause downstream issues.
"""
data["config"]["algorithm"]["variant_regions"] = clean_file(
utils.get_in(data, ("config", "algorithm", "variant_regions")), data)
return data
def combine(in_files, out_file, config):
"""Combine multiple BED files into a single output.
"""
if not utils.file_exists(out_file):
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for in_file in in_files:
with open(in_file) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
return out_file
| [
"chapmanb@50mail.com"
] | chapmanb@50mail.com |
5096e1124ae1ec023777ece46d421a3a04d4c6a7 | 921481680f0821fb377799013395f63c00c74a13 | /client/commands/start.py | d4db36062e2c02e84c45ee4b43fbe991ffc6703e | [
"MIT"
] | permissive | jpmondet/pyre-check | 026302aed6eed15312541ecce5c6c959ca5f1720 | d8e916f143af55a013f56510730544afd639e977 | refs/heads/master | 2022-12-27T22:56:55.080300 | 2020-10-16T01:21:57 | 2020-10-16T01:23:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,475 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import errno
import logging
import os
from logging import Logger
from typing import List, Optional
from .. import (
command_arguments,
configuration_monitor,
filesystem,
project_files_monitor,
)
from ..analysis_directory import AnalysisDirectory
from ..configuration import Configuration
from .command import IncrementalStyle, typeshed_search_path
from .reporting import Reporting
LOG: Logger = logging.getLogger(__name__)
class Start(Reporting):
NAME = "start"
def __init__(
self,
command_arguments: command_arguments.CommandArguments,
original_directory: str,
*,
configuration: Configuration,
analysis_directory: Optional[AnalysisDirectory] = None,
terminal: bool,
store_type_check_resolution: bool,
use_watchman: bool,
incremental_style: IncrementalStyle,
) -> None:
super(Start, self).__init__(
command_arguments, original_directory, configuration, analysis_directory
)
self._terminal = terminal
self._store_type_check_resolution = store_type_check_resolution
self._use_watchman = use_watchman
self._incremental_style = incremental_style
self._enable_logging_section("environment")
def _start_configuration_monitor(self) -> None:
if self._use_watchman:
configuration_monitor.ConfigurationMonitor(
self._command_arguments,
self._configuration,
self._analysis_directory,
self._configuration.project_root,
self._original_directory,
self._configuration.local_root,
list(self._configuration.other_critical_files),
).daemonize()
def _run(self) -> None:
lock = os.path.join(self._configuration.log_directory, "client.lock")
LOG.info("Waiting on the pyre client lock.")
with filesystem.acquire_lock(lock, blocking=True):
self._start_configuration_monitor()
# This unsafe call is OK due to the client lock always
# being acquired before starting a server - no server can
# spawn in the interim which would cause a race.
try:
with filesystem.acquire_lock(
os.path.join(
self._configuration.log_directory, "server", "server.lock"
),
blocking=False,
):
pass
except OSError:
LOG.warning(
"Server at `%s` exists, skipping.",
self._analysis_directory.get_root(),
)
return
self._analysis_directory.prepare()
self._call_client(command=self.NAME).check()
if self._use_watchman:
try:
file_monitor = project_files_monitor.ProjectFilesMonitor(
self._configuration,
self._configuration.project_root,
self._analysis_directory,
)
file_monitor.daemonize()
LOG.debug("Initialized file monitor.")
except project_files_monitor.MonitorException as error:
LOG.warning("Failed to initialize file monitor: %s", error)
def _flags(self) -> List[str]:
flags = super()._flags()
if self._taint_models_path:
for path in self._taint_models_path:
flags.extend(["-taint-models", path])
filter_directories = self._get_directories_to_analyze()
filter_directories.update(
set(self._configuration.get_existent_do_not_ignore_errors_in_paths())
)
if len(filter_directories):
flags.extend(["-filter-directories", ";".join(sorted(filter_directories))])
ignore_all_errors_paths = (
self._configuration.get_existent_ignore_all_errors_paths()
)
if len(ignore_all_errors_paths):
flags.extend(
["-ignore-all-errors", ";".join(sorted(ignore_all_errors_paths))]
)
if self._terminal:
flags.append("-terminal")
if self._store_type_check_resolution:
flags.append("-store-type-check-resolution")
if not self._command_arguments.no_saved_state:
save_initial_state_to = self._command_arguments.save_initial_state_to
if save_initial_state_to and os.path.isdir(
os.path.dirname(save_initial_state_to)
):
flags.extend(["-save-initial-state-to", save_initial_state_to])
saved_state_project = self._command_arguments.saved_state_project
if saved_state_project:
flags.extend(["-saved-state-project", saved_state_project])
relative_local_root = self._configuration.relative_local_root
if relative_local_root is not None:
flags.extend(
["-saved-state-metadata", relative_local_root.replace("/", "$")]
)
configuration_file_hash = self._configuration.file_hash
if configuration_file_hash:
flags.extend(["-configuration-file-hash", configuration_file_hash])
load_initial_state_from = self._command_arguments.load_initial_state_from
changed_files_path = self._command_arguments.changed_files_path
if load_initial_state_from is not None:
flags.extend(["-load-state-from", load_initial_state_from])
if changed_files_path is not None:
flags.extend(["-changed-files-path", changed_files_path])
elif changed_files_path is not None:
LOG.error(
"--load-initial-state-from must be set if --changed-files-path is set."
)
flags.extend(
[
"-workers",
str(self._configuration.get_number_of_workers()),
"-expected-binary-version",
self._configuration.get_version_hash_respecting_override()
or "unversioned",
]
)
typeshed = self._configuration.get_typeshed_respecting_override()
search_path = [
search_path.command_line_argument()
for search_path in self._configuration.get_existent_search_paths()
] + (typeshed_search_path(typeshed) if typeshed is not None else [])
flags.extend(["-source-path", self._analysis_directory.get_root()])
if search_path:
flags.extend(["-search-path", ",".join(search_path)])
excludes = self._configuration.excludes
for exclude in excludes:
flags.extend(["-exclude", exclude])
extensions = self._configuration.get_valid_extensions()
for extension in extensions:
flags.extend(["-extension", extension])
if self._incremental_style != IncrementalStyle.SHALLOW:
flags.append("-new-incremental-check")
if self._configuration.autocomplete:
flags.append("-autocomplete")
flags.extend(self._feature_flags())
return flags
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
cf393c7ba87cbe283a5ea1a3cc6842334c93573b | 38258a7dd9acbfb7adf72983015de68a948a4826 | /B_1000~/B_1920.py | 4d9a17f2cc8b86303296610a604dd878094b257f | [] | no_license | kangsm0903/Algorithm | 13a7fe5729039a1d0ce91a574c4755a8a92fb02b | 7d713d1c9e2e4dc30141d4f409ac1430a357065b | refs/heads/master | 2022-10-04T00:33:49.247977 | 2022-09-26T12:51:16 | 2022-09-26T12:51:16 | 219,265,010 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | # Binary Search
import sys
N=int(sys.stdin.readline())
case=list(map(int,sys.stdin.readline().split()))
case.sort() # 오름차순 1 2 3 4 5
M=int(sys.stdin.readline())
case2=list(map(int,sys.stdin.readline().split()))
def Binary_Search(arr,value):
start=0
end=len(arr)
while True:
mid=(start+end)//2
if start>=end:
print(0)
break
elif arr[mid]==value:
print(1)
break
elif arr[mid]<value:
start=mid+1
elif arr[mid]>value:
end=mid
for i in case2:
Binary_Search(case,i)
| [
"kangsm0903@naver.com"
] | kangsm0903@naver.com |
eaae2bac105eae300e5e56925168de0fe36418da | f8ad6963bfc851657ea50c6a036cfad29cdd7f60 | /Books/GodOfPython/P15_Thread/direct/num2_1.py | 22db2b5b8f96c51f5cf87484cecfceb3e24d7c60 | [] | no_license | foru120/PythonRepository | e1ab0265c0f50ef2e9acdf7447237c913560692b | db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98 | refs/heads/master | 2021-01-01T06:53:11.728109 | 2019-04-25T13:52:50 | 2019-04-25T13:52:50 | 97,541,222 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | import threading
import time
class client_thread(threading.Thread):
def __init__(self, word, sec):
threading.Thread.__init__(self)
self.word = word
self.sec = sec
def run(self):
while True:
print(self.word)
time.sleep(self.sec)
client_A = client_thread('A', 1)
client_B = client_thread('B', 1.5)
client_C = client_thread('C', 2)
client_A.start()
client_B.start()
client_C.start() | [
"broodsky1122@hanmail.net"
] | broodsky1122@hanmail.net |
e820fc6eb664ddd70910f830cfc698c1046c2b27 | ee3039b27532d09c0c435ea7b92e29c70246c66e | /opencv/learnOpencv/091-120/107-Brisk特征提取与描述子匹配.py | 2445870091acd0512850103849db6de6ecba50d4 | [] | no_license | Alvazz/fanfuhan_ML_OpenCV | e8b37acc406462b9aaca9c5e6844d1db5aa3c944 | dacfdaf87356e857d3ff18c5e0a4fd5a50855324 | refs/heads/master | 2022-04-05T06:15:31.778227 | 2020-02-07T01:40:07 | 2020-02-07T01:40:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | """
Brisk特征提取与描述子匹配
"""
import cv2 as cv
box = cv.imread("images/box.png")
box_in_scene = cv.imread("images/box_in_scene.png")
# 创建Brisk特征检测器
brisk = cv.BRISK_create()
# 得到特征关键点和描述子
kp1, des1 = brisk.detectAndCompute(box, None)
kp2, des2 = brisk.detectAndCompute(box_in_scene, None)
# 暴力匹配
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
matchers = bf.match(des1, des2)
# 绘制匹配
result = cv.drawMatches(box, kp1, box_in_scene, kp2, matchers, None)
cv.imshow("brisk-match", result)
cv.waitKey(0)
cv.destroyAllWindows() | [
"gitea@fake.local"
] | gitea@fake.local |
55620ebc9837797070670695ca2f01c1d53aa79c | e1bdbd08afec39c1ee56a3885a837ec966543a2d | /Section_05_code/function_composition.py | 94b9cab36e24c24e98e0c20dfe7503c72a40805b | [
"MIT"
] | permissive | PacktPublishing/Python-Machine-Learning-Solutions-V- | 507bd8b285f051d2761a5348e4a8c9a50329287a | 8bb80a43a7c64032c25c1023faaa29bbfbd39d45 | refs/heads/master | 2023-02-28T05:19:49.782472 | 2021-01-20T09:11:09 | 2021-01-20T09:11:09 | 188,817,647 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | import numpy as np
from functools import reduce
def add3(input_array):
return list(map(lambda x: x+3, input_array))
def mul2(input_array):
return list(map(lambda x: x*2, input_array))
def sub5(input_array):
return list(map(lambda x: x-5, input_array))
def function_composer(*args):
return reduce(lambda f, g: lambda x: f(g(x)), args)
if __name__=='__main__':
arr = np.array([2,5,4,7])
print('\nOperation: add3(mul2(sub5(arr)))')
arr1 = add3(arr)
arr2 = mul2(arr1)
arr3 = sub5(arr2)
print('Output using the lengthy way:',arr3)
func_composed = function_composer(sub5, mul2, add3)
print('Output using function composition:', func_composed((arr)))
print('\nOperation: sub5(add3(mul2(sub5(mul2(arr)))))\nOutput:',
function_composer(mul2, sub5, mul2, add3, sub5)((arr)))
| [
"sonalis@packtpub.com"
] | sonalis@packtpub.com |
02b1d509f61b8aa6d56212bae696130cbbe68648 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/106/usersdata/250/51843/submittedfiles/questao2.py | ba338ef6887469e0b298d1b93d06e56af4237019 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | # -*- coding: utf-8 -*-
a1=int(input('1° numero da aposta:'))
a2=int(input('2° numero da aposta:'))
a3=int(input('3° numero da aposta:'))
a4=int(input('4° numero da aposta:'))
a5=int(input('5° numero da aposta:'))
a6=int(input('6° numero da aposta:'))
b1=int(input('1° numero sorteado:'))
b2=int(input('2° numero sorteado:'))
b3=int(input('3° numero sorteado:'))
b4=int(input('4° numero sorteado:'))
b5=int(input('5° numero sorteado:'))
b6=int(input('6° numero sorteado:'))
lista1=[a1,a2,a3,a4,a5,a6]
lista2=[b1,b2,b3,b4,b5,b6]
if lista1*lista2==3:
print('terno')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e5f90e811df9ccce1e34f936f9d73c5858150bb0 | abc4a73e5f93ebf90be946b95ef215e32c823353 | /colour/models/rgb/datasets/color_match_rgb.py | b66710a32c654b51f587f18453877293617fbaf5 | [
"BSD-3-Clause"
] | permissive | OmarWagih1/colour | 69f5108e83ec443551c5593c066bcd4e3596060f | bdc880a2783ff523dafb19f1233212dd03a639bd | refs/heads/develop | 2021-04-14T20:30:29.635916 | 2020-07-26T05:46:00 | 2020-07-26T05:46:00 | 249,263,927 | 0 | 0 | BSD-3-Clause | 2020-03-22T20:11:06 | 2020-03-22T20:11:06 | null | UTF-8 | Python | false | false | 2,658 | py | # -*- coding: utf-8 -*-
"""
ColorMatch RGB Colourspace
==========================
Defines the *ColorMatch RGB* colourspace:
- :attr:`colour.models.COLOR_MATCH_RGB_COLOURSPACE`.
References
----------
- :cite:`Lindbloom2014a` : Lindbloom, B. (2014). RGB Working Space
Information. Retrieved April 11, 2014, from
http://www.brucelindbloom.com/WorkingSpaceInfo.html
"""
from __future__ import division, unicode_literals
import numpy as np
from functools import partial
from colour.colorimetry import ILLUMINANTS
from colour.models.rgb import (RGB_Colourspace, gamma_function,
normalised_primary_matrix)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'COLOR_MATCH_RGB_PRIMARIES', 'COLOR_MATCH_RGB_WHITEPOINT_NAME',
'COLOR_MATCH_RGB_WHITEPOINT', 'COLOR_MATCH_RGB_TO_XYZ_MATRIX',
'XYZ_TO_COLOR_MATCH_RGB_MATRIX', 'COLOR_MATCH_RGB_COLOURSPACE'
]
COLOR_MATCH_RGB_PRIMARIES = np.array([
[0.6300, 0.3400],
[0.2950, 0.6050],
[0.1500, 0.0750],
])
"""
*ColorMatch RGB* colourspace primaries.
COLOR_MATCH_RGB_PRIMARIES : ndarray, (3, 2)
"""
COLOR_MATCH_RGB_WHITEPOINT_NAME = 'D50'
"""
*ColorMatch RGB* colourspace whitepoint name.
COLOR_MATCH_RGB_WHITEPOINT_NAME : unicode
"""
COLOR_MATCH_RGB_WHITEPOINT = (ILLUMINANTS[
'CIE 1931 2 Degree Standard Observer'][COLOR_MATCH_RGB_WHITEPOINT_NAME])
"""
*ColorMatch RGB* colourspace whitepoint.
COLOR_MATCH_RGB_WHITEPOINT : ndarray
"""
COLOR_MATCH_RGB_TO_XYZ_MATRIX = normalised_primary_matrix(
COLOR_MATCH_RGB_PRIMARIES, COLOR_MATCH_RGB_WHITEPOINT)
"""
*ColorMatch RGB* colourspace to *CIE XYZ* tristimulus values matrix.
COLOR_MATCH_RGB_TO_XYZ_MATRIX : array_like, (3, 3)
"""
XYZ_TO_COLOR_MATCH_RGB_MATRIX = np.linalg.inv(COLOR_MATCH_RGB_TO_XYZ_MATRIX)
"""
*CIE XYZ* tristimulus values to *ColorMatch RGB* colourspace matrix.
XYZ_TO_COLOR_MATCH_RGB_MATRIX : array_like, (3, 3)
"""
COLOR_MATCH_RGB_COLOURSPACE = RGB_Colourspace(
'ColorMatch RGB',
COLOR_MATCH_RGB_PRIMARIES,
COLOR_MATCH_RGB_WHITEPOINT,
COLOR_MATCH_RGB_WHITEPOINT_NAME,
COLOR_MATCH_RGB_TO_XYZ_MATRIX,
XYZ_TO_COLOR_MATCH_RGB_MATRIX,
partial(gamma_function, exponent=1 / 1.8),
partial(gamma_function, exponent=1.8),
)
COLOR_MATCH_RGB_COLOURSPACE.__doc__ = """
*ColorMatch RGB* colourspace.
References
----------
:cite:`Lindbloom2014a`
COLOR_MATCH_RGB_COLOURSPACE : RGB_Colourspace
"""
| [
"thomas.mansencal@gmail.com"
] | thomas.mansencal@gmail.com |
58833472273c67331ab27281f4677f0b6a75008b | a934a51f68592785a7aed1eeb31e5be45dd087d3 | /Learning/Network_process_WA/Day1/2020_Jul23/get_password.py | cc1afee02d5db35f9571e93b5029364eb37a9cc7 | [] | no_license | nsshayan/Python | 9bf0dcb9a6890419873428a2dde7a802e715be2b | 0cf5420eecac3505071326c90b28bd942205ea54 | refs/heads/master | 2021-06-03T18:41:06.203334 | 2020-09-28T07:28:48 | 2020-09-28T07:28:48 | 35,269,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | import getpass
username = input("Enter username: ")
password = getpass.getpass("Enter password: ")
print(f"Username {username}, Password is {password}")
print("Logged in as", getpass.getuser())
| [
"nsshayan89@gmail.com"
] | nsshayan89@gmail.com |
7d2635b73bf9e628176bb913afe718340253d357 | 1bad7fc3fdd9e38b7ff50a7825565b7b190fa5b7 | /qrback/migrations/0034_auto_20201015_0106.py | 44ea931eaea9adc4f2daa7b21b8fd04f9380a3fa | [] | no_license | furkankykc/QRforAll | d4be43e403d75c86436ed9d9e2b222619ecf92b1 | 6cc0555fdc27797586628f2012523dce5212b321 | refs/heads/master | 2023-07-10T13:02:27.618792 | 2021-08-05T07:22:29 | 2021-08-05T07:22:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # Generated by Django 3.0.8 on 2020-10-14 22:06
from django.db import migrations, models
import qrback.models
class Migration(migrations.Migration):
dependencies = [
('qrback', '0033_auto_20201005_1411'),
]
operations = [
migrations.AddField(
model_name='company',
name='logo_192',
field=models.ImageField(blank=True, null=True, upload_to=qrback.models.get_image_path, verbose_name='192x logo'),
),
migrations.AddField(
model_name='company',
name='logo_512',
field=models.ImageField(blank=True, null=True, upload_to=qrback.models.get_image_path, verbose_name='512x logo'),
),
]
| [
"furkanfbr@gmail.com"
] | furkanfbr@gmail.com |
ff943da6f0fe8957f24c6671b6c35d37ca590f9c | 1d502006c95de319b9e629ba9bea08823e689679 | /bndl/compute/tests/test_reduce_by_key.py | 21b7c5380fc085933ce6b5f7f78fa8f3d4a9a9d0 | [
"Apache-2.0"
] | permissive | bndl/bndl | 0e8dcb959b3a9dd603a006e4e6ae073ae6143ddf | e9c49c9844e7c4d6ac0c9491c02122098e22153d | refs/heads/master | 2022-12-10T18:11:17.877017 | 2022-03-20T18:23:26 | 2022-03-20T18:23:26 | 72,571,767 | 1 | 2 | Apache-2.0 | 2022-12-05T22:31:45 | 2016-11-01T20:01:19 | Python | UTF-8 | Python | false | false | 1,110 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from operator import add
from bndl.compute.tests import ComputeTest
from bndl.util import strings
class ReduceByKeyTest(ComputeTest):
def test_wordcount(self):
words = [strings.random(2) for _ in range(100)] * 5
counts = Counter(words)
dset = self.ctx.collection(words, pcount=4).with_value(1).reduce_by_key(add)
self.assertEqual(dset.count(), len(counts))
for word, count in dset.collect():
self.assertTrue(word in counts)
self.assertEqual(count, counts[word])
| [
"frens.jan.rumph@target-holding.nl"
] | frens.jan.rumph@target-holding.nl |
1ac453d4fc2c54b43c12c33bc1445864694ebec6 | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res_bw/scripts/common/lib/ctypes/test/test_byteswap.py | 046b757767016946ce9b51a6fd0d7b710ff39df2 | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 9,873 | py | # 2015.11.10 21:34:50 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/ctypes/test/test_byteswap.py
import sys, unittest, struct, math, ctypes
from binascii import hexlify
from ctypes import *
def bin(s):
return hexlify(memoryview(s)).upper()
class Test(unittest.TestCase):
def X_test(self):
print >> sys.stderr, sys.byteorder
for i in range(32):
bits = BITS()
setattr(bits, 'i%s' % i, 1)
dump(bits)
def test_endian_short(self):
if sys.byteorder == 'little':
self.assertIs(c_short.__ctype_le__, c_short)
self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short)
else:
self.assertIs(c_short.__ctype_be__, c_short)
self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short)
s = c_short.__ctype_be__(4660)
self.assertEqual(bin(struct.pack('>h', 4660)), '1234')
self.assertEqual(bin(s), '1234')
self.assertEqual(s.value, 4660)
s = c_short.__ctype_le__(4660)
self.assertEqual(bin(struct.pack('<h', 4660)), '3412')
self.assertEqual(bin(s), '3412')
self.assertEqual(s.value, 4660)
s = c_ushort.__ctype_be__(4660)
self.assertEqual(bin(struct.pack('>h', 4660)), '1234')
self.assertEqual(bin(s), '1234')
self.assertEqual(s.value, 4660)
s = c_ushort.__ctype_le__(4660)
self.assertEqual(bin(struct.pack('<h', 4660)), '3412')
self.assertEqual(bin(s), '3412')
self.assertEqual(s.value, 4660)
def test_endian_int(self):
if sys.byteorder == 'little':
self.assertIs(c_int.__ctype_le__, c_int)
self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int)
else:
self.assertIs(c_int.__ctype_be__, c_int)
self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int)
s = c_int.__ctype_be__(305419896)
self.assertEqual(bin(struct.pack('>i', 305419896)), '12345678')
self.assertEqual(bin(s), '12345678')
self.assertEqual(s.value, 305419896)
s = c_int.__ctype_le__(305419896)
self.assertEqual(bin(struct.pack('<i', 305419896)), '78563412')
self.assertEqual(bin(s), '78563412')
self.assertEqual(s.value, 305419896)
s = c_uint.__ctype_be__(305419896)
self.assertEqual(bin(struct.pack('>I', 305419896)), '12345678')
self.assertEqual(bin(s), '12345678')
self.assertEqual(s.value, 305419896)
s = c_uint.__ctype_le__(305419896)
self.assertEqual(bin(struct.pack('<I', 305419896)), '78563412')
self.assertEqual(bin(s), '78563412')
self.assertEqual(s.value, 305419896)
def test_endian_longlong(self):
if sys.byteorder == 'little':
self.assertIs(c_longlong.__ctype_le__, c_longlong)
self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong)
else:
self.assertIs(c_longlong.__ctype_be__, c_longlong)
self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong)
s = c_longlong.__ctype_be__(1311768467294899695L)
self.assertEqual(bin(struct.pack('>q', 1311768467294899695L)), '1234567890ABCDEF')
self.assertEqual(bin(s), '1234567890ABCDEF')
self.assertEqual(s.value, 1311768467294899695L)
s = c_longlong.__ctype_le__(1311768467294899695L)
self.assertEqual(bin(struct.pack('<q', 1311768467294899695L)), 'EFCDAB9078563412')
self.assertEqual(bin(s), 'EFCDAB9078563412')
self.assertEqual(s.value, 1311768467294899695L)
s = c_ulonglong.__ctype_be__(1311768467294899695L)
self.assertEqual(bin(struct.pack('>Q', 1311768467294899695L)), '1234567890ABCDEF')
self.assertEqual(bin(s), '1234567890ABCDEF')
self.assertEqual(s.value, 1311768467294899695L)
s = c_ulonglong.__ctype_le__(1311768467294899695L)
self.assertEqual(bin(struct.pack('<Q', 1311768467294899695L)), 'EFCDAB9078563412')
self.assertEqual(bin(s), 'EFCDAB9078563412')
self.assertEqual(s.value, 1311768467294899695L)
def test_endian_float(self):
if sys.byteorder == 'little':
self.assertIs(c_float.__ctype_le__, c_float)
self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float)
else:
self.assertIs(c_float.__ctype_be__, c_float)
self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float)
s = c_float(math.pi)
self.assertEqual(bin(struct.pack('f', math.pi)), bin(s))
self.assertAlmostEqual(s.value, math.pi, 6)
s = c_float.__ctype_le__(math.pi)
self.assertAlmostEqual(s.value, math.pi, 6)
self.assertEqual(bin(struct.pack('<f', math.pi)), bin(s))
s = c_float.__ctype_be__(math.pi)
self.assertAlmostEqual(s.value, math.pi, 6)
self.assertEqual(bin(struct.pack('>f', math.pi)), bin(s))
def test_endian_double(self):
if sys.byteorder == 'little':
self.assertIs(c_double.__ctype_le__, c_double)
self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double)
else:
self.assertIs(c_double.__ctype_be__, c_double)
self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double)
s = c_double(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack('d', math.pi)), bin(s))
s = c_double.__ctype_le__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack('<d', math.pi)), bin(s))
s = c_double.__ctype_be__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack('>d', math.pi)), bin(s))
def test_endian_other(self):
self.assertIs(c_byte.__ctype_le__, c_byte)
self.assertIs(c_byte.__ctype_be__, c_byte)
self.assertIs(c_ubyte.__ctype_le__, c_ubyte)
self.assertIs(c_ubyte.__ctype_be__, c_ubyte)
self.assertIs(c_char.__ctype_le__, c_char)
self.assertIs(c_char.__ctype_be__, c_char)
def test_struct_fields_1(self):
if sys.byteorder == 'little':
base = BigEndianStructure
else:
base = LittleEndianStructure
class T(base):
pass
_fields_ = [('a', c_ubyte),
('b', c_byte),
('c', c_short),
('d', c_ushort),
('e', c_int),
('f', c_uint),
('g', c_long),
('h', c_ulong),
('i', c_longlong),
('k', c_ulonglong),
('l', c_float),
('m', c_double),
('n', c_char),
('b1', c_byte, 3),
('b2', c_byte, 3),
('b3', c_byte, 2),
('a', c_int * 3 * 3 * 3)]
T._fields_ = _fields_
for typ in (c_wchar, c_void_p, POINTER(c_int)):
_fields_.append(('x', typ))
class T(base):
pass
self.assertRaises(TypeError, setattr, T, '_fields_', [('x', typ)])
def test_struct_struct(self):
for nested, data in ((BigEndianStructure, '\x00\x00\x00\x01\x00\x00\x00\x02'), (LittleEndianStructure, '\x01\x00\x00\x00\x02\x00\x00\x00')):
for parent in (BigEndianStructure, LittleEndianStructure, Structure):
class NestedStructure(nested):
_fields_ = [('x', c_uint32), ('y', c_uint32)]
class TestStructure(parent):
_fields_ = [('point', NestedStructure)]
self.assertEqual(len(data), sizeof(TestStructure))
ptr = POINTER(TestStructure)
s = cast(data, ptr)[0]
del ctypes._pointer_type_cache[TestStructure]
self.assertEqual(s.point.x, 1)
self.assertEqual(s.point.y, 2)
def test_struct_fields_2(self):
if sys.byteorder == 'little':
base = BigEndianStructure
fmt = '>bxhid'
else:
base = LittleEndianStructure
fmt = '<bxhid'
class S(base):
_fields_ = [('b', c_byte),
('h', c_short),
('i', c_int),
('d', c_double)]
s1 = S(18, 4660, 305419896, 3.14)
s2 = struct.pack(fmt, 18, 4660, 305419896, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_nonnative_struct_fields(self):
if sys.byteorder == 'little':
base = BigEndianStructure
fmt = '>b h xi xd'
else:
base = LittleEndianStructure
fmt = '<b h xi xd'
class S(base):
_pack_ = 1
_fields_ = [('b', c_byte),
('h', c_short),
('_1', c_byte),
('i', c_int),
('_2', c_byte),
('d', c_double)]
s1 = S()
s1.b = 18
s1.h = 4660
s1.i = 305419896
s1.d = 3.14
s2 = struct.pack(fmt, 18, 4660, 305419896, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_unaligned_native_struct_fields(self):
if sys.byteorder == 'little':
fmt = '<b h xi xd'
else:
base = LittleEndianStructure
fmt = '>b h xi xd'
class S(Structure):
_pack_ = 1
_fields_ = [('b', c_byte),
('h', c_short),
('_1', c_byte),
('i', c_int),
('_2', c_byte),
('d', c_double)]
s1 = S()
s1.b = 18
s1.h = 4660
s1.i = 305419896
s1.d = 3.14
s2 = struct.pack(fmt, 18, 4660, 305419896, 3.14)
self.assertEqual(bin(s1), bin(s2))
if __name__ == '__main__':
unittest.main()
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\ctypes\test\test_byteswap.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:34:50 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
23ee0c538db3ab215488797a03c1787eba16cd76 | ea99544eef7572b194c2d3607fa7121cb1e45872 | /apps/notification/migrations/0002_auto_20190407_0310.py | 8ab14bb16d0e8a9f752f89dd4da047f06b6ceff0 | [] | no_license | ash018/FFTracker | 4ab55d504a9d8ba9e541a8b682bc821f112a0866 | 11be165f85cda0ffe7a237d011de562d3dc64135 | refs/heads/master | 2022-12-02T15:04:58.543382 | 2019-10-05T12:54:27 | 2019-10-05T12:54:27 | 212,999,035 | 0 | 0 | null | 2022-11-22T03:58:29 | 2019-10-05T12:53:26 | Python | UTF-8 | Python | false | false | 1,104 | py | # Generated by Django 2.2 on 2019-04-07 03:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('task', '0001_initial'),
('notification', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='notification',
name='agent',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_agent', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='notification',
name='recipients',
field=models.ManyToManyField(blank=True, related_name='user_recipients', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='notification',
name='task',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='task.Task'),
),
]
| [
"sadatakash018@gmail.com"
] | sadatakash018@gmail.com |
02d7538ca267d6d32fa4370b3f204473841b89d0 | 05dc7ec5341ff65c92a6b9c347ac3203479b6e64 | /src/alveos/wsgi.py | a5c96796ac70706b8c3b7a681379edf23cd8c89d | [
"BSD-3-Clause"
] | permissive | tykling/alveos | 7542d15dbdf0ef6df53fd7b0a66f49929f1c7681 | 0758a1505bf1696a48c02d14c1fefe6633c35a97 | refs/heads/master | 2021-06-08T15:41:34.245465 | 2016-12-05T09:27:45 | 2016-12-05T09:27:45 | 74,826,012 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | """
WSGI config for alveos project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "alveos.settings")
application = get_wsgi_application()
| [
"thomas@gibfest.dk"
] | thomas@gibfest.dk |
17c020ac1425c98eb76a34fe9d863373305d7b2c | e67a0139092d3389fea0075de9ecf12ab209649f | /scripts/addons_extern/AF_3dview_specials/__init__.py | d7533a07f8e4f96b91ac55f60f211be8beb49c96 | [] | no_license | amagnoni/blenderpython | 9fe864d287f992b7cd71cd584fca4a501a6ac954 | d2fec1a35369b7b171e2f0999196b87e242e08f3 | refs/heads/master | 2021-01-18T11:28:55.372759 | 2015-10-17T20:16:57 | 2015-10-17T20:16:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,841 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# by meta-androcto, parts based on work by Saidenka #
bl_info = {
"name": "3d View Specials",
"author": "Meta Androcto, ",
"version": (0, 2),
"blender": (2, 75, 0),
"location": "W key > Object, Edit, Pose, Armature",
"description": "Extended Specials: W key > Object, Edit, Pose, Armature",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6"\
"/Py/Scripts",
"tracker_url": "",
"category": "Addon Factory"}
if "bpy" in locals():
import importlib
importlib.reload(VIEW3D_MT_armature_specials)
importlib.reload(VIEW3D_MT_edit_mesh_specials)
importlib.reload(VIEW3D_MT_select_object)
importlib.reload(VIEW3D_MT_pose_specials)
importlib.reload(VIEW3D_MT_object_batch)
else:
from . import VIEW3D_MT_armature_specials
from . import VIEW3D_MT_edit_mesh_specials
from . import VIEW3D_MT_object_specials
from . import VIEW3D_MT_pose_specials
from . import VIEW3D_MT_object_batch
import bpy
def register():
bpy.utils.register_module(__name__)
# Add "Extras" menu to the "Add Mesh" menu
bpy.types.VIEW3D_MT_armature_specials.append(VIEW3D_MT_armature_specials.menu)
bpy.types.VIEW3D_MT_edit_mesh_specials.append(VIEW3D_MT_edit_mesh_specials.menu)
bpy.types.VIEW3D_MT_object_specials.append(VIEW3D_MT_object_specials.menu)
bpy.types.VIEW3D_MT_pose_specials.append(VIEW3D_MT_pose_specials.menu)
bpy.types.VIEW3D_MT_object_specials.append(VIEW3D_MT_object_batch.menu)
def unregister():
bpy.utils.unregister_module(__name__)
# Remove "Extras" menu from the "Add Mesh" menu.
bpy.types.VIEW3D_MT_armature_specials.remove(VIEW3D_MT_armature_specials.menu)
bpy.types.VIEW3D_MT_edit_mesh_specials.remove(VIEW3D_MT_edit_mesh_specials.menu)
bpy.types.VIEW3D_MT_object_specials.remove(VIEW3D_MT_object_specials.menu)
bpy.types.VIEW3D_MT_pose_specials.remove(VIEW3D_MT_pose_specials.menu)
bpy.types.VIEW3D_MT_object_specials.remove(VIEW3D_MT_object_batch.menu)
if __name__ == "__main__":
register()
| [
"meta.androcto1@gmail.com"
] | meta.androcto1@gmail.com |
f4ef86db426b803bbb16c0ac7b8b53b436cc1d88 | 55a281d728541773e6eda896599c0cc48dfe5156 | /Advanced/Functions Advanced/4. Even or Odd.py | 2bbf52c69c734eb8c90fc21f076ff63127380a23 | [] | no_license | dhariskov/python-advanced | c0bebd937f3849dd62ae2834cbdf9f8100b2bb56 | 4725070c960d3c234ed2f20ff2156e2f89514a02 | refs/heads/master | 2022-12-04T22:40:18.485552 | 2020-08-28T08:29:25 | 2020-08-28T08:29:25 | 288,775,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | def even_odd(*args):
command = args[-1]
ll = args[:len(args)-1:]
if command == "odd":
sum_odd = list(filter(lambda x: x % 2 == 1, ll))
return sum_odd
elif command == "even":
sum_even = list(filter(lambda x: x % 2 == 0, ll))
return sum_even
print(even_odd(1, 2, 3, 4, 5, 6, "even"))
print(even_odd(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "odd"))
| [
"dhariskov@gmail.com"
] | dhariskov@gmail.com |
546c1c9e2e3477c864d0cc64cb3b3282e66ea1de | 9fb0500924f754425005d3ac92a4be538f203783 | /gaphor/UML/interactions/tests/test_executionspecification.py | da4a80c774098fc9232dc835753437caa641e3fd | [
"Apache-2.0"
] | permissive | seryafarma/gaphor | 491f57214c5392ad408cc7530424d99f7f81346f | f85998ae3a3ec5381b25cda60d89a47383c4fd2e | refs/heads/master | 2022-09-26T09:13:48.976569 | 2020-05-30T20:03:27 | 2020-05-30T20:03:39 | 268,274,153 | 0 | 0 | Apache-2.0 | 2020-05-31T12:21:13 | 2020-05-31T12:21:13 | null | UTF-8 | Python | false | false | 6,771 | py | from gaphas.canvas import Canvas, instant_cairo_context
from gaphor import UML
from gaphor.diagram.shapes import DrawContext
from gaphor.diagram.tests.fixtures import allow, connect, disconnect
from gaphor.UML.interactions.executionspecification import ExecutionSpecificationItem
from gaphor.UML.interactions.lifeline import LifelineItem
def create_lifeline_with_execution_specification(diagram, element_factory):
lifeline = diagram.create(
LifelineItem, subject=element_factory.create(UML.Lifeline)
)
lifeline.lifetime.visible = True
exec_spec = diagram.create(ExecutionSpecificationItem)
connect(exec_spec, exec_spec.handles()[0], lifeline, lifeline.lifetime.port)
return lifeline, exec_spec
def test_draw_on_canvas(diagram):
exec_spec = diagram.create(ExecutionSpecificationItem)
cr = instant_cairo_context()
exec_spec.draw(
DrawContext(
cairo=cr,
selected=False,
focused=False,
hovered=False,
dropzone=False,
style={},
)
)
def test_allow_execution_specification_to_lifeline(diagram):
lifeline = diagram.create(LifelineItem)
lifeline.lifetime.visible = True
exec_spec = diagram.create(ExecutionSpecificationItem)
glued = allow(exec_spec, exec_spec.handles()[0], lifeline, lifeline.lifetime.port)
assert glued
def test_connect_execution_specification_to_lifeline(diagram, element_factory):
lifeline, exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
assert exec_spec.subject
assert lifeline.subject
assert exec_spec.subject.start.covered is lifeline.subject
assert (
exec_spec.subject.executionOccurrenceSpecification[0].covered
is lifeline.subject
)
def test_disconnect_execution_specification_from_lifeline(diagram, element_factory):
def elements_of_kind(type):
return element_factory.lselect(type)
lifeline, exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
disconnect(exec_spec, exec_spec.handles()[0])
assert lifeline.subject
assert exec_spec.subject is None
assert exec_spec.canvas
assert elements_of_kind(UML.ExecutionSpecification) == []
assert elements_of_kind(UML.ExecutionOccurrenceSpecification) == []
def test_allow_execution_specification_to_execution_specification(diagram):
parent_exec_spec = diagram.create(ExecutionSpecificationItem)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
glued = allow(
parent_exec_spec,
parent_exec_spec.handles()[0],
child_exec_spec,
child_exec_spec.ports()[0],
)
assert glued
def test_connect_execution_specification_to_execution_specification(
diagram, element_factory
):
parent_exec_spec = diagram.create(ExecutionSpecificationItem)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
connect(
child_exec_spec,
child_exec_spec.handles()[0],
parent_exec_spec,
parent_exec_spec.ports()[0],
)
assert not parent_exec_spec.subject
assert not child_exec_spec.subject
def test_connect_execution_specification_to_execution_specification_with_lifeline(
diagram, element_factory
):
lifeline, parent_exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
connect(
child_exec_spec,
child_exec_spec.handles()[0],
parent_exec_spec,
parent_exec_spec.ports()[0],
)
assert child_exec_spec.subject
assert lifeline.subject
assert child_exec_spec.subject.start.covered is lifeline.subject
assert (
child_exec_spec.subject.executionOccurrenceSpecification[0].covered
is lifeline.subject
)
def test_connect_execution_specification_with_execution_specification_to_lifeline(
diagram, element_factory
):
lifeline = diagram.create(
LifelineItem, subject=element_factory.create(UML.Lifeline)
)
lifeline.lifetime.visible = True
parent_exec_spec = diagram.create(ExecutionSpecificationItem)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
connect(
child_exec_spec,
child_exec_spec.handles()[0],
parent_exec_spec,
parent_exec_spec.ports()[0],
)
connect(
parent_exec_spec,
parent_exec_spec.handles()[0],
lifeline,
lifeline.lifetime.port,
)
assert parent_exec_spec.subject
assert child_exec_spec.subject
assert lifeline.subject
assert parent_exec_spec.subject.start.covered is lifeline.subject
assert child_exec_spec.subject.start.covered is lifeline.subject
assert (
child_exec_spec.subject.executionOccurrenceSpecification[0].covered
is lifeline.subject
)
def test_disconnect_execution_specification_with_execution_specification_from_lifeline(
diagram, element_factory
):
def elements_of_kind(type):
return element_factory.lselect(type)
lifeline, parent_exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
grand_child_exec_spec = diagram.create(ExecutionSpecificationItem)
connect(
child_exec_spec,
child_exec_spec.handles()[0],
parent_exec_spec,
parent_exec_spec.ports()[0],
)
connect(
grand_child_exec_spec,
grand_child_exec_spec.handles()[0],
child_exec_spec,
child_exec_spec.ports()[0],
)
disconnect(parent_exec_spec, parent_exec_spec.handles()[0])
assert lifeline.subject
assert parent_exec_spec.subject is None
assert child_exec_spec.subject is None
assert grand_child_exec_spec.subject is None
assert elements_of_kind(UML.ExecutionSpecification) == []
assert elements_of_kind(UML.ExecutionOccurrenceSpecification) == []
def test_save_and_load(diagram, element_factory, saver, loader):
lifeline, exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
diagram.canvas.update_now()
saved_data = saver()
loader(saved_data)
exec_specs = element_factory.lselect(
lambda e: e.isKindOf(UML.ExecutionSpecification)
)
loaded_exec_spec = exec_specs[0].presentation[0]
assert len(exec_specs) == 1
assert (
len(
element_factory.lselect(
lambda e: e.isKindOf(UML.ExecutionOccurrenceSpecification)
)
)
== 2
)
assert loaded_exec_spec.canvas.get_connection(loaded_exec_spec.handles()[0])
| [
"gaphor@gmail.com"
] | gaphor@gmail.com |
3383f2959f626f37b6ab18cc8a5d8816397abc6c | 9f0babb96bb327aaa859aeb7950fb6e5b2fca73d | /HIGHLIGHTS/freeSpacePropagateModes.py | 2dfd7d288377348cfa03d26e0b905dcbe8b3f681 | [
"MIT"
] | permissive | srio/shadow3-scripts | d39e750774ad8f1c551e9965d4402b3fcb2b043d | 7dd9b4424f47e6d78db9fd6fcb5a3db788b062f7 | refs/heads/master | 2022-09-18T03:37:16.480163 | 2022-09-02T13:43:46 | 2022-09-02T13:43:46 | 43,300,813 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,864 | py | __author__ = 'mglass'
from srwlib import *
import sys
from comsyl.autocorrelation.AutocorrelationFunction import AutocorrelationFunction
from comsyl.autocorrelation.AutocorrelationFunctionPropagator import AutocorrelationFunctionPropagator
from comsyl.parallel.utils import isMaster, barrier
from comsyl.utils.Logger import log
def createBeamlinePS(distance, undulator, source_position):
if source_position == "entrance":
source_offset = undulator.length() * 0.5 #+ 2 * comparer.undulator().periodLength()
log("Using source position entrance z=%f" % source_offset)
elif source_position == "center":
source_offset = 0.0
log("Using source position center z=%f" % source_offset)
else:
raise Exception("Unhandled source position")
div_x_factor = int(distance) + 1
div_y_factor = int(distance) + 1
optBL = SRWLOptC([SRWLOptD(source_offset+distance)],
[[0, 0, 1.0, 0, 0, div_x_factor, 1, div_y_factor, 1, 0, 0, 0], [0, 0, 1.0, 0, 0, 1, 0.05/2.0, 1, 0.1, 0, 0, 0]])
return optBL
def propagateModes(distance, filename, directory_name,maximum_mode=None):
af_name = filename.split("/")[-1].replace(".npz", "")
autocorrelation_function = AutocorrelationFunction.load(filename)
undulator = autocorrelation_function._undulator
beamline = createBeamlinePS(distance, undulator, source_position=autocorrelation_function.info().sourcePosition())
propagator = AutocorrelationFunctionPropagator(beamline)
if maximum_mode is None:
mode_distribution=autocorrelation_function.modeDistribution()
maximum_mode = mode_distribution[abs(mode_distribution)>0.00005].shape[0]
propagator.setMaximumMode(maximum_mode)
data_directory = "%s/data_free_%s" % (directory_name, af_name)
if isMaster():
if not os.path.exists(data_directory):
os.mkdir(data_directory)
barrier()
propagated_filename = "%s/%s_d%.1f.npz" % (data_directory, af_name, distance)
af = propagator.propagate(autocorrelation_function, propagated_filename)
af.save("%s/free_prop_%s_d%.1f.npz" % (directory_name, af_name, distance))
if __name__ == "__main__":
# if len(sys.argv) <= 2:
# print("Need distance and filename")
# exit()
filename_ebs = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cs_new_u18_2m_1h_s2.5.npz" # OK EBS
# filename_lb = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cl_low_beta_u18_2m_1h_s6.5.npy" # OK LB
# filename_hb = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cl_high_beta_u18_2m_1h_s2.0.npy"
distance = 26.0 # float(sys.argv[1])
filename = filename_ebs # sys.argv[2]
directory_name = "propagation"
propagateModes(distance, filename, directory_name, maximum_mode=50)
| [
"srio@esrf.eu"
] | srio@esrf.eu |
9820f4e56513b1d24f74f5ae3cc92e63e23f2d7a | 2c5073c0140b3366b94866d50f8b975c926a529b | /venv/lib/python3.9/site-packages/mediapipe/calculators/util/collection_has_min_size_calculator_pb2.py | 12f5045ea54268b2af3cf6362f92c5e865b13e3f | [] | no_license | geekboi777/Volumegesture | 435c2752d107ac6915919e79bcb63fb0b85f6e9e | 3cc35f74533e26588a606154897f9ded4801f0ce | refs/heads/master | 2023-06-24T19:09:07.138900 | 2021-07-30T23:22:18 | 2021-07-30T23:22:18 | 390,512,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,920 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/calculators/util/collection_has_min_size_calculator.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mediapipe.framework import calculator_pb2 as mediapipe_dot_framework_dot_calculator__pb2
try:
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe_dot_framework_dot_calculator__options__pb2
except AttributeError:
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe.framework.calculator_options_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/calculators/util/collection_has_min_size_calculator.proto',
package='mediapipe',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nCmediapipe/calculators/util/collection_has_min_size_calculator.proto\x12\tmediapipe\x1a$mediapipe/framework/calculator.proto\"\x9c\x01\n%CollectionHasMinSizeCalculatorOptions\x12\x13\n\x08min_size\x18\x01 \x01(\x05:\x01\x30\x32^\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\xd0\xb1\xd8{ \x01(\x0b\x32\x30.mediapipe.CollectionHasMinSizeCalculatorOptions'
,
dependencies=[mediapipe_dot_framework_dot_calculator__pb2.DESCRIPTOR,])
_COLLECTIONHASMINSIZECALCULATOROPTIONS = _descriptor.Descriptor(
name='CollectionHasMinSizeCalculatorOptions',
full_name='mediapipe.CollectionHasMinSizeCalculatorOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='min_size', full_name='mediapipe.CollectionHasMinSizeCalculatorOptions.min_size', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
_descriptor.FieldDescriptor(
name='ext', full_name='mediapipe.CollectionHasMinSizeCalculatorOptions.ext', index=0,
number=259397840, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=121,
serialized_end=277,
)
DESCRIPTOR.message_types_by_name['CollectionHasMinSizeCalculatorOptions'] = _COLLECTIONHASMINSIZECALCULATOROPTIONS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CollectionHasMinSizeCalculatorOptions = _reflection.GeneratedProtocolMessageType('CollectionHasMinSizeCalculatorOptions', (_message.Message,), {
'DESCRIPTOR' : _COLLECTIONHASMINSIZECALCULATOROPTIONS,
'__module__' : 'mediapipe.calculators.util.collection_has_min_size_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.CollectionHasMinSizeCalculatorOptions)
})
_sym_db.RegisterMessage(CollectionHasMinSizeCalculatorOptions)
_COLLECTIONHASMINSIZECALCULATOROPTIONS.extensions_by_name['ext'].message_type = _COLLECTIONHASMINSIZECALCULATOROPTIONS
mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension(_COLLECTIONHASMINSIZECALCULATOROPTIONS.extensions_by_name['ext'])
# @@protoc_insertion_point(module_scope)
| [
"geekboi777@github.com"
] | geekboi777@github.com |
0b38a6f4b4ac235595e3a0c19b632b9b0a49a262 | f090c3e0faa70cf0ef7c4be99cb894630bce2842 | /scripts_201410/simpleMeasurements/micromotioncomp/scanEy.py | 4e93efb76ce8b5265babb5a67054ba26b52c4464 | [] | no_license | HaeffnerLab/resonator | 157d1dc455209da9b7de077157bda53b4883c8b7 | 7c2e377fdc45f6c1ad205f8bbc2e6607eb3fdc71 | refs/heads/master | 2021-01-09T20:48:03.587634 | 2016-09-22T18:40:17 | 2016-09-22T18:40:17 | 6,715,345 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | from FFT import measureFFT
import numpy as np
import labrad
import datetime
now = datetime.datetime.now()
date = now.strftime("%Y%m%d")
cxn = labrad.connect()
dv = cxn.data_vault
ds = cxn.resonatordac
#rs = cxn.rohdeschwarz_server
#rs.select_device('resonator-pc GPIB Bus - USB0::0x0AAD::0x0054::102549')
amplMin = -.4
amplMax = -.3
amplStep = .01
recordTime = 0.5 #seconds
average = 6
freqSpan = 100.0 #Hz
freqOffset = -920.0 #Hz, the offset between the counter clock and the rf synthesizer clock
#setting up FFT
fft = measureFFT(cxn, recordTime, average, freqSpan, freqOffset, savePlot = False)
#saving
dv.cd(['', date, 'QuickMeasurements','FFT'],True)
name = dv.new('FFT',[('Amplitude', 'V/m')], [('FFTPeak','Arb','Arb')] )
dv.add_parameter('plotLive',True)
print 'Saving {}'.format(name)
amplitudes = np.arange(amplMin, amplMax + amplStep, amplStep)
Ex = 0.19
Ez = 0
U1 = -.22
U2 = 4.5
U3 = .22
U4 = 0
U5 = 0
for Ey in amplitudes:
ds.set_multipole_voltages([('Ex', Ex), ('Ey', Ey), ('Ez', Ez), ('U1', U1), ('U2', U2), ('U3', U3), ('U4', U4), ('U5', U5)])
micromotion = fft.getPeakArea(ptsAround = 3)
dv.add(Ey, micromotion)
| [
"soenkeamoeller@gmail.com"
] | soenkeamoeller@gmail.com |
6605b246a60796200540bfea2493f300ae9e79fe | 7cebfa2066e679e19993a5507e59d1979df3d4a8 | /1_Basics/9_revamp.py | 7d04e2986df78e238ca677bcc1fb34fbcad2937f | [
"Apache-2.0"
] | permissive | Arunken/PythonScripts | 833e9e43ccb29234a206027f1cda1d978718d5eb | 702d0a3af7a9be3311f9da0afc5285d453f15484 | refs/heads/master | 2022-12-24T18:50:43.672779 | 2021-05-13T11:31:51 | 2021-05-13T11:31:51 | 237,631,027 | 0 | 0 | Apache-2.0 | 2022-12-08T00:47:45 | 2020-02-01T15:01:20 | Python | UTF-8 | Python | false | false | 1,196 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 20 14:18:16 2018
@author: Arken
"""
# splitting a sentence and storing it in a list or tuple
a = ' python programming '
a = a.strip()
b = list(a) #saves individual characters separately in a list.
c = tuple(b) # The same goes for tuple.
tup = a.split() # split in to individual words
tup1 = a.split('o') # split wherever a specific character is present
st = ','.join(tup1) # joins the items in the list and adds a comma in between
d = 'python programming on'
e = d[8:19:3] # values between 8 and 19 and return every 3rd value
f = ['hi','I','am','learning','python','programming'] # list
for i in f:
print(i) # prints each element in the list
for i in f:
if 'hi' in f:
print(i) # print each element in the list if a specific condition is satisfied
else:
print('ok thank you') # else print a message
g = [1,2,3,4,5,6,7,8,9,10]
for i in g:
if i%2==0:
print(i) # print even numbers in the list
for i in range(0,len(g),2):
print(i) # print every 2nd value between 0 and 10
for i in range(0,10):
if 2<i<9:
print(i)
else:
print('help me') | [
"mail.arunken@gmail.com"
] | mail.arunken@gmail.com |
12ae6ecdf67ee0c45b00920a1013ddc02f7e2206 | 4869c5e4d4b5ba6af434b62a2369ed58891c4eb0 | /addons/script.embuary.helper/resources/lib/library.py | 37ac5db0b300c2484d026e5146fc2a65a8b4b0e8 | [] | no_license | JohnnyBlackwater/Zephyr-mod | 1bd73a04549da83965a0979a1957ab4f98b03a6d | 2e9472793b45287b1114221f5dd1674ce886bca1 | refs/heads/master | 2023-08-31T22:54:47.827433 | 2021-11-15T03:01:01 | 2021-11-15T03:01:01 | 428,104,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,263 | py | #!/usr/bin/python
########################
import xbmc
import xbmcgui
from time import gmtime, strftime
from resources.lib.json_map import *
from resources.lib.helper import *
########################
def add_items(li,json_query,type,searchstring=None):
for item in json_query:
if type == 'movie':
handle_movies(li, item, searchstring)
elif type == 'tvshow':
handle_tvshows(li, item, searchstring)
elif type == 'season':
handle_seasons(li, item)
elif type == 'episode':
handle_episodes(li, item)
elif type == 'genre':
handle_genre(li, item)
elif type == 'cast':
handle_cast(li, item)
def handle_movies(li,item,searchstring=None):
genre = item.get('genre', '')
studio = item.get('studio', '')
country = item.get('country', '')
director = item.get('director', '')
writer = item.get('writer', '')
li_item = xbmcgui.ListItem(item['title'], offscreen=True)
li_item.setInfo(type='Video', infoLabels={'title': item['title'],
'originaltitle': item['originaltitle'],
'sorttitle': item['sorttitle'],
'year': item['year'],
'genre': get_joined_items(genre),
'studio': get_joined_items(studio),
'country': get_joined_items(country),
'director': get_joined_items(director),
'writer': get_joined_items(writer),
'plot': item['plot'],
'plotoutline': item['plotoutline'],
'dbid': item['movieid'],
'imdbnumber': item['imdbnumber'],
'tagline': item['tagline'],
'tag': item['tag'],
'rating': str(float(item['rating'])),
'userrating': str(float(item['userrating'])),
'votes': item['votes'],
'mpaa': item['mpaa'],
'lastplayed': item['lastplayed'],
'mediatype': 'movie',
'trailer': item['trailer'],
'dateadded': item['dateadded'],
'premiered': item['premiered'],
'path': item['file'],
'playcount': item['playcount'],
'set': item['set'],
'setid': item['setid'],
'top250': item['top250']
})
if 'cast' in item:
cast_actors = _get_cast(item['cast'])
li_item.setCast(item['cast'])
_set_unique_properties(li_item,cast_actors[0],'cast')
_set_ratings(li_item,item['ratings'])
_set_unique_properties(li_item,genre,'genre')
_set_unique_properties(li_item,studio,'studio')
_set_unique_properties(li_item,country,'country')
_set_unique_properties(li_item,director,'director')
_set_unique_properties(li_item,writer,'writer')
li_item.setProperty('resumetime', str(item['resume']['position']))
li_item.setProperty('totaltime', str(item['resume']['total']))
li_item.setArt(item['art'])
li_item.setArt({'icon': 'DefaultVideo.png'})
hasVideo = False
for key, value in iter(list(item['streamdetails'].items())):
for stream in value:
if 'video' in key:
hasVideo = True
li_item.addStreamInfo(key, stream)
if not hasVideo: # if duration wasnt in the streaminfo try adding the scraped one
stream = {'duration': item['runtime']}
li_item.addStreamInfo('video', stream)
if searchstring:
li_item.setProperty('searchstring', searchstring)
li.append((item['file'], li_item, False))
def handle_tvshows(li,item,searchstring=None):
genre = item.get('genre', '')
studio = item.get('studio', '')
dbid = item['tvshowid']
season = item['season']
episode = item['episode']
watchedepisodes = item['watchedepisodes']
unwatchedepisodes = get_unwatched(episode,watchedepisodes)
if not condition('Window.IsVisible(movieinformation)'):
folder = True
item['file'] = 'videodb://tvshows/titles/%s/' % dbid
else:
folder = False
item['file'] = 'plugin://script.embuary.helper/?action=folderjump&type=tvshow&dbid=%s' % dbid
li_item = xbmcgui.ListItem(item['title'], offscreen=True)
li_item.setInfo(type='Video', infoLabels={'title': item['title'],
'year': item['year'],
'tvshowtitle': item['title'],
'sorttitle': item['sorttitle'],
'originaltitle': item['originaltitle'],
'genre': get_joined_items(genre),
'studio': get_joined_items(studio),
'plot': item['plot'],
'rating': str(float(item['rating'])),
'userrating': str(float(item['userrating'])),
'votes': item['votes'],
'premiered': item['premiered'],
'mpaa': item['mpaa'],
'tag': item['tag'],
'mediatype': 'tvshow',
'dbid': dbid,
'season': season,
'episode': episode,
'imdbnumber': item['imdbnumber'],
'lastplayed': item['lastplayed'],
'path': item['file'],
'duration': item['runtime'],
'dateadded': item['dateadded'],
'playcount': item['playcount']
})
if 'cast' in item:
cast_actors = _get_cast(item['cast'])
li_item.setCast(item['cast'])
_set_unique_properties(li_item,cast_actors[0],'cast')
_set_ratings(li_item,item['ratings'])
_set_unique_properties(li_item,genre,'genre')
_set_unique_properties(li_item,studio,'studio')
li_item.setProperty('totalseasons', str(season))
li_item.setProperty('totalepisodes', str(episode))
li_item.setProperty('watchedepisodes', str(watchedepisodes))
li_item.setProperty('unwatchedepisodes', str(unwatchedepisodes))
li_item.setArt(item['art'])
li_item.setArt({'icon': 'DefaultVideo.png'})
if searchstring:
li_item.setProperty('searchstring', searchstring)
li.append((item['file'], li_item, folder))
def handle_seasons(li,item):
tvshowdbid = item['tvshowid']
season = item['season']
episode = item['episode']
watchedepisodes = item['watchedepisodes']
unwatchedepisodes = get_unwatched(episode,watchedepisodes)
if season == 0:
title = '%s' % (xbmc.getLocalizedString(20381))
special = 'true'
else:
title = '%s %s' % (xbmc.getLocalizedString(20373), season)
special = 'false'
if not condition('Window.IsVisible(movieinformation)'):
folder = True
file = 'videodb://tvshows/titles/%s/%s/' % (tvshowdbid, season)
else:
folder = False
file = 'plugin://script.embuary.helper/?action=folderjump&type=season&dbid=%s&season=%s' % (tvshowdbid, season)
li_item = xbmcgui.ListItem(title, offscreen=True)
li_item.setInfo(type='Video', infoLabels={'title': title,
'season': season,
'episode': episode,
'tvshowtitle': item['showtitle'],
'playcount': item['playcount'],
'mediatype': 'season',
'dbid': item['seasonid']
})
li_item.setArt(item['art'])
li_item.setArt({'icon': 'DefaultVideo.png',
'fanart': item['art'].get('tvshow.fanart', '')
})
li_item.setProperty('watchedepisodes', str(watchedepisodes))
li_item.setProperty('unwatchedepisodes', str(unwatchedepisodes))
li_item.setProperty('isspecial', special)
li_item.setProperty('season_label', item.get('label', ''))
li.append((file, li_item, folder))
def handle_episodes(li,item):
director = item.get('director', '')
writer = item.get('writer', '')
if item['episode'] < 10:
label = '0%s. %s' % (item['episode'], item['title'])
else:
label = '%s. %s' % (item['episode'], item['title'])
if item['season'] == '0':
label = 'S' + label
else:
label = '%sx%s' % (item['season'], label)
li_item = xbmcgui.ListItem(label, offscreen=True)
li_item.setInfo(type='Video', infoLabels={'title': item['title'],
'episode': item['episode'],
'season': item['season'],
'premiered': item['firstaired'],
'dbid': item['episodeid'],
'plot': item['plot'],
'tvshowtitle': item['showtitle'],
'originaltitle': item['originaltitle'],
'lastplayed': item['lastplayed'],
'rating': str(float(item['rating'])),
'userrating': str(float(item['userrating'])),
'votes': item['votes'],
'playcount': item['playcount'],
'director': get_joined_items(director),
'writer': get_joined_items(writer),
'path': item['file'],
'dateadded': item['dateadded'],
'mediatype': 'episode'
})
if 'cast' in item:
cast_actors = _get_cast(item['cast'])
li_item.setCast(item['cast'])
_set_unique_properties(li_item,cast_actors[0],'cast')
_set_ratings(li_item,item['ratings'])
_set_unique_properties(li_item,director,'director')
_set_unique_properties(li_item,writer,'writer')
li_item.setProperty('resumetime', str(item['resume']['position']))
li_item.setProperty('totaltime', str(item['resume']['total']))
li_item.setProperty('season_label', item.get('season_label', ''))
li_item.setArt({'icon': 'DefaultTVShows.png',
'fanart': item['art'].get('tvshow.fanart', ''),
'poster': item['art'].get('tvshow.poster', ''),
'banner': item['art'].get('tvshow.banner', ''),
'clearlogo': item['art'].get('tvshow.clearlogo') or item['art'].get('tvshow.logo') or '',
'landscape': item['art'].get('tvshow.landscape', ''),
'clearart': item['art'].get('tvshow.clearart', '')
})
li_item.setArt(item['art'])
hasVideo = False
for key, value in iter(list(item['streamdetails'].items())):
for stream in value:
if 'video' in key:
hasVideo = True
li_item.addStreamInfo(key, stream)
if not hasVideo: # if duration wasnt in the streaminfo try adding the scraped one
stream = {'duration': item['runtime']}
li_item.addStreamInfo('video', stream)
if item['season'] == '0':
li_item.setProperty('IsSpecial', 'true')
li.append((item['file'], li_item, False))
def handle_cast(li,item):
li_item = xbmcgui.ListItem(item['name'], offscreen=True)
li_item.setLabel(item['name'])
li_item.setLabel2(item['role'])
li_item.setProperty('role', item['role'])
li_item.setArt({'icon': 'DefaultActor.png',
'thumb': item.get('thumbnail', '')
})
li.append(('', li_item, False))
def handle_genre(li,item):
li_item = xbmcgui.ListItem(item['label'], offscreen=True)
li_item.setInfo(type='Video', infoLabels={'title': item['label'],
'dbid': str(item['genreid']),
'path': item['url']
})
li_item.setArt(item['art'])
li_item.setArt({'icon': 'DefaultGenre.png'})
li.append((item['url'], li_item, True))
def get_unwatched(episode,watchedepisodes):
if episode > watchedepisodes:
unwatchedepisodes = episode - watchedepisodes
return unwatchedepisodes
else:
return 0
def _get_cast(castData):
listcast = []
listcastandrole = []
for castmember in castData:
listcast.append(castmember['name'])
listcastandrole.append((castmember['name'], castmember['role']))
return [listcast, listcastandrole]
def _set_unique_properties(li_item,item,prop):
try:
i = 0
for value in item:
li_item.setProperty('%s.%s' % (prop,i), value)
i += 1
except Exception:
pass
return li_item
def _set_ratings(li_item,item):
for key in item:
try:
rating = item[key]['rating']
votes = item[key]['votes'] or 0
default = True if key == 'default' or len(item) == 1 else False
''' Kodi only supports floats up to 10.0. But Rotten Tomatoes is using 0-100.
To get the values correctly set it's required to transform the value.
'''
if rating > 100:
raise Exception
elif rating > 10:
rating = rating / 10
li_item.setRating(key, float(rating), votes, default)
except Exception:
pass
return li_item
| [
"Johnnyblackx3@aol.com"
] | Johnnyblackx3@aol.com |
71b56e58f27fc67cf47cecacfa2c58b0264f5054 | f476cdf5a27e7768238854c5e7f24e3650ffeebc | /Codeforces/1409A.py | b5d64d8e69939839e4534b9d1c0c62fc669fc834 | [] | no_license | masudurHimel/Problematic_Adventure | 0d1a8b0d3cc6339d3d9a4f8ed9be9c1635ab290f | 3f32f5195c497e1c44d1a37c80ea644c31a53688 | refs/heads/master | 2023-05-01T23:34:15.697852 | 2023-04-28T09:30:00 | 2023-04-29T09:22:33 | 226,885,967 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | n = int(input())
for i in range(n):
a, b = map(int, input().split())
if a == b:
print(0)
continue
diff_abs = abs(a-b)
step = 0
for j in range(10, 0, -1):
if diff_abs//j != 0:
step += diff_abs//j
diff_abs = diff_abs % j
print(step)
| [
"masudurhimel@gmail.com"
] | masudurhimel@gmail.com |
dcde52d8d1e7cda7719a66c2bc0f132c213960a8 | 43ff15a7989576712d0e51f0ed32e3a4510273c0 | /app/migrations/0010_auto_20160712_1040.py | e5c34056b176ac89bbcdf1bcefd4ce23094e8c03 | [] | no_license | v1cker/kekescan | f2b51d91a9d6496e2cdc767eb6a600171f513449 | 3daa1775648439ba9e0003a376f90b601820290e | refs/heads/master | 2020-09-19T16:26:56.522453 | 2017-06-15T02:55:24 | 2017-06-15T02:55:24 | 94,495,007 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-07-12 02:40
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0009_auto_20160712_1037'),
]
operations = [
migrations.AlterField(
model_name='icpcheck',
name='insert_time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2016, 7, 12, 10, 40, 29, 429713), null=True),
),
migrations.AlterField(
model_name='subdomainbrute',
name='fuzz_time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2016, 7, 12, 10, 40, 29, 430219), null=True),
),
]
| [
"liyueke@huobi.com"
] | liyueke@huobi.com |
ebf4bdc53c74f65f3d597c85336264c25abf9174 | 242da8865e037f9fffb76269c3acddb73ce9fa14 | /packages/pyright-internal/src/tests/samples/tuples10.py | f111dbbb6122c9440b75b9f703c03c87600c2765 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | khyveasna11111908/pyright | f42eceae044f6fbc27552c1765b03ebd345a451c | 493d47807b96137995e4bb6ca341930e4de911f9 | refs/heads/main | 2023-08-30T00:08:36.191799 | 2021-09-25T19:17:13 | 2021-09-25T19:17:13 | 410,361,483 | 1 | 1 | NOASSERTION | 2021-09-25T19:15:23 | 2021-09-25T19:15:22 | null | UTF-8 | Python | false | false | 721 | py | # This sample tests that inferred types for tuples strip
# literals under the appropriate circumstances.
from typing import List, Literal, Tuple
a1 = (1, 2)
t1: Literal["tuple[Literal[1], Literal[2]]"] = reveal_type(a1)
a2 = list((1, 2))
t2: Literal["list[int]"] = reveal_type(a2)
a3: List[Literal[1]] = list((1,))
t3: Literal["list[Literal[1]]"] = reveal_type(a3)
def func1(v1: Tuple[Literal[1], ...], v2: Tuple[Literal[1]]):
a4 = set(v1)
t4: Literal["set[Literal[1]]"] = reveal_type(a4)
a5 = set(v2)
t5: Literal["set[Literal[1]]"] = reveal_type(a5)
a6 = (1, "hi")
t6: Literal["tuple[Literal[1], Literal['hi']]"] = reveal_type(a6)
v4 = set(a6)
t7: Literal["set[int | str]"] = reveal_type(v4)
| [
"erictr@microsoft.com"
] | erictr@microsoft.com |
677418f0a2b1374b3bc980f5460395cd3bbfbbfb | f0fc7de70574a6bacacc5110652d28d076b0047f | /sierra/apps.py | 716f9716f80a7446947706969680513521e2988a | [] | no_license | robertvandeneynde/sierra | 6b95a285b116482efde3d05f9898e069d5022f76 | 59cbaaaa1f14d9a591c40f372180795353e1a6f4 | refs/heads/master | 2020-04-01T21:46:11.457236 | 2020-01-02T04:10:57 | 2020-01-02T04:10:57 | 153,674,376 | 1 | 1 | null | 2020-01-08T15:26:45 | 2018-10-18T19:07:58 | TeX | UTF-8 | Python | false | false | 87 | py | from django.apps import AppConfig
class SierraConfig(AppConfig):
name = 'sierra'
| [
"robertvandeneynde@hotmail.com"
] | robertvandeneynde@hotmail.com |
b9e8aa1f4c274670cb8f1b15a15dd85cd3bc852c | 5341e31c0a210bd9449dcc4aa63d5ce5ab161e3a | /bin/cron_command.py | b1d4854f6c919ff2a88f67a2816330f03654c2ed | [
"MIT"
] | permissive | ipashchenko/watcher | efc347cd261c1483f4bc18cd030d3d42d09422d9 | ab55615b0ad8d23f98317bd49c2c9291c4add69b | refs/heads/master | 2021-01-10T10:20:09.504787 | 2016-01-25T12:27:44 | 2016-01-25T12:27:44 | 49,568,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,122 | py | #!/usr/bin python
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import smtplib
import netrc
from difflib import context_diff
from email.mime.application import MIMEApplication
from filecmp import cmp
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from os.path import basename
path = os.path.normpath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
sys.path.insert(0, path)
from watcher import watcher
import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s -'
' %(message)s')
# logging.disable()
def func(f1, f2):
"""
Note that by default, this looks to check your netrc credentials
to use this feature, create a .netrc file, so that only you can read and
write it
touch ~/.netrc
chmod 600 ~/.netrc
and then add the information for the gmail smtp server, i.e.
``machine smtp.gmail.com login yourusername@gmail.com password
yourpassword``
"""
smtpserver = "smtp.gmail.com"
tls = True
fromaddr = "in4pashchenko@gmail.com"
toaddr = "in4-pashchenko@yandex.ru"
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
if not cmp(f1, f2):
logging.debug("Files {} & {} differs!".format(basename(f1),
basename(f2)))
diff = context_diff(open(f1).readlines(), open(f2).readlines(),
fromfile=basename(f1), tofile=basename(f2))
text = ''.join(diff)
with open(f2, "rb") as fil:
msg.attach(MIMEApplication(fil.read(),
Content_Disposition='attachment; filename="%s"' % basename(f2),
Name=basename(f2)))
body = text
msg['Subject'] = "Changes in SVLBI schedule"
msg.attach(MIMEText(body, 'plain'))
s = smtplib.SMTP(smtpserver)
secrets = netrc.netrc()
netrclogin, netrcaccount, netrcpassword = secrets.authenticators(smtpserver)
if tls:
s.starttls()
s.login(netrclogin, netrcpassword)
s.sendmail('in4pashchenko@gmail.com', ['in4-pashchenko@yandex.ru'],
msg.as_string())
s.quit()
logging.debug("Moving file {} to {}!".format(basename(f2),
basename(f1)))
shutil.move(f2, f1)
else:
logging.debug("Files {} & {} are the same!".format(basename(f1),
basename(f2)))
os.unlink(f2)
if __name__ == '__main__':
if not len(sys.argv) == 4:
print("Usage: cron_command.py month year directory")
sys.exit(0)
month = sys.argv[1]
year = sys.argv[2]
# User-specified directory
dir = sys.argv[3]
# Get last SVLBI schedule
watcher.get_last_svlbi_schedule(month, year, os.path.join(dir,
'svlbi_new.txt'))
func(os.path.join(dir, 'svlbi.txt'), os.path.join(dir, 'svlbi_new.txt'))
| [
"in4pashchenko@gmail.com"
] | in4pashchenko@gmail.com |
43fe1136c40dce46d74805aa4f3ea6a264cfcc08 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/semantic_segmentation/BiseNetV1_for_PyTorch/configs/fcn/fcn_r50-d8_769x769_40k_cityscapes.py | 0a8eafa418cb4e080cad194c16bddd0970d1efb8 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,050 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------
_base_ = [
'../_base_/models/fcn_r50-d8.py',
'../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
model = dict(
decode_head=dict(align_corners=True),
auxiliary_head=dict(align_corners=True),
test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
| [
"chenyong84@huawei.com"
] | chenyong84@huawei.com |
586847bd394d5bf213a98ea55153760e22ad456c | 2b8fe23680fb8c6596c8b4fd53a2547e32e84617 | /1-DS-Array-String/String_Compress.py | 43bcad128033c460e03812bfdc4ce87e60f0064c | [] | no_license | jigarshah2811/Python-Programming | b441c4815f80bef4d17611cdea851254c59739a9 | a60a11ad29e9dde9e9960006f887d9b66d29e427 | refs/heads/master | 2022-11-20T18:09:11.955564 | 2022-11-04T05:58:19 | 2022-11-04T05:58:19 | 67,324,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,822 | py | """
https://nbviewer.org/github/donnemartin/interactive-coding-challenges/blob/master/arrays_strings/compress/compress_challenge.ipynb
Problem: Compress a string such that 'AAABCCDDDD' becomes 'A3BC2D4'. Only compress the string if it saves space.
"""
import unittest
class TestSolution(unittest.TestCase):
def testCompressString(self, func):
self.assertEqual(func(""), "")
self.assertEqual(func("ABC"), "ABC")
self.assertEqual(func("AAABC"), "A3BC")
self.assertEqual(func("AAABCCCC"), "A3BC4")
class Solution:
def compressString(self, inputStr: str) -> str:
"""
Pattern: If the prior char is same as this char - DUP - just increment count
No Dup - embed the last char and counter
"""
# Edge case, where string has <2 char "" "A" - no compression needed
if len(inputStr) < 2:
return inputStr
# Deal with lists not str (immutable)
res, s = list(), list(inputStr)
counter = 1 # Default counter for a new char
# Embed first char as-is, then count total occurances of this char to embed in last
res.append(s[0])
for i in range(1, len(s)):
if s[i] == s[i-1]: # DUP, just increment counter and append at the last
counter += 1
else: # New char, append counter for prior char and append new char
if counter > 1:
res.append(counter)
counter = 1
res.append(s[i])
if counter > 1:
res.append(counter)
return ''.join(map(str, res))
def main():
solution = Solution()
testSolution = TestSolution()
testSolution.testCompressString(solution.compressString)
if __name__ == "__main__":
main()
| [
"jshah@pinterest.com"
] | jshah@pinterest.com |
94ec424c89decabfdda8c83f68cfd5daceac066b | 9c7581c3b862174878a5e71609f94b3e5a2de5c9 | /CursoEmVideo/Aula20/ex097.py | db52582c0213396493f58974d80d72cb11e57046 | [
"MIT"
] | permissive | lucashsouza/Desafios-Python | 6d9fdc3500e0d01ce9a75201fc4fe88469928170 | abb5b11ebdfd4c232b4f0427ef41fd96013f2802 | refs/heads/master | 2020-06-21T16:49:32.884025 | 2019-07-23T01:23:07 | 2019-07-23T01:23:07 | 143,765,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # Exercicio 097 - Função para texto
'''
Faça um programa que tenha uma função chamada escreva(), que receba um texto
qualquer como parâmetro e mostre uma mensagem com o tamanho adaptável
'''
def mensagem(txt):
tam = len(txt)
print('~'*tam)
print(txt)
print('~'*tam, '\n')
mensagem('Hello, world!')
mensagem('Python é a a melhor linguagem de programação')
| [
"noreply@github.com"
] | lucashsouza.noreply@github.com |
bd89de0e8e6e8a3312f0081d9b98cb531374b37a | 40fc319f88b1296af916bd61c14dc55512bb3951 | /changeset/tests.py | d629a9c9fd6be51905eb6a0adcbe8f55adddaa58 | [] | no_license | aaj013/pycrocosm | bd43a119a5fb3ec8a38b78e181e99cd3ee0b75b9 | 40cb6ebdbcec08fa4e8bc85ea6ed5854de1bb84b | refs/heads/master | 2020-12-02T08:44:04.190942 | 2019-11-26T00:52:36 | 2019-11-26T00:52:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,541 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from django.test import TestCase
from django.test import Client
from django.urls import reverse
from django.contrib.auth.models import User
import xml.etree.ElementTree as ET
from defusedxml.ElementTree import parse, fromstring
import sys
import pgmap
import gc
import sys
import time
import datetime
import random
from changeset import views
from querymap.views import p
from xml.sax.saxutils import escape
from querymap.tests import create_node, create_way, create_relation, modify_node, modify_way, modify_relation
from changeset.management.commands import closeoldchangesets
from django.conf import settings
def ParseOsmDiffToDict(xml):
out = {'node':{}, 'way':{}, 'relation':{}}
for diff in xml:
old_id, new_id, new_version = None, None, None
if "old_id" in diff.attrib:
old_id = int(diff.attrib["old_id"])
if "new_id" in diff.attrib:
new_id = int(diff.attrib["new_id"])
if "new_version" in diff.attrib:
new_version = int(diff.attrib["new_version"])
out[diff.tag][old_id] = (new_id, new_version)
return out
def GetObj(p, objType, objId):
t = p.GetTransaction("ACCESS SHARE")
osmData = pgmap.OsmData() #Watch out, this goes out of scope!
t.GetObjectsById(objType, [objId], osmData)
del t
objs = None
if objType == "node":
objs = osmData.nodes
if len(objs) == 0:
return None
return pgmap.OsmNode(objs[0])
if objType == "way":
objs = osmData.ways
if len(objs) == 0:
return None
return pgmap.OsmWay(objs[0])
if objType == "relation":
objs = osmData.relations
if len(objs) == 0:
return None
return pgmap.OsmRelation(objs[0])
return None
def CreateTestChangeset(user, tags=None, is_open=True, bbox=None, open_timestamp=None, close_timestamp=None):
if tags is None:
tags = {'foo': 'bar'}
t = p.GetTransaction("EXCLUSIVE")
cs = pgmap.PgChangeset()
errStr = pgmap.PgMapError()
for k in tags:
cs.tags[k] = tags[k]
cs.username = user.username
cs.uid = user.id
cs.is_open = is_open
if open_timestamp is None:
cs.open_timestamp = int(time.time())
else:
cs.open_timestamp = int(open_timestamp)
if not is_open:
if close_timestamp is None:
cs.close_timestamp = int(time.time())
else:
cs.close_timestamp = int(close_timestamp)
if bbox is not None:
cs.bbox_set=True
cs.x1=bbox[0]
cs.y1=bbox[1]
cs.x2=bbox[2]
cs.y2=bbox[3]
cid = t.CreateChangeset(cs, errStr);
cs.objId = cid
t.Commit()
del t
return cs
def CheckChangesetListContainsId(obj, xml, csId, expected):
obj.assertEqual(xml.tag, "osm")
found = False
for cs in xml:
obj.assertEqual(cs.tag, "changeset")
if int(cs.attrib["id"]) == csId:
found = True
break
obj.assertEqual(found, expected)
# Create your tests here.
# alter user microcosm with createdb;
# python manage.py test changeset --keep
class ChangesetTestCase(TestCase):
def setUp(self):
self.username = "john"
self.password = "glass onion"
self.email = 'jlennon@beatles.com'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.client = Client()
self.client.login(username=self.username, password=self.password)
self.createXml = """<?xml version='1.0' encoding='UTF-8'?>
<osm>
<changeset>
<tag k="created_by" v="JOSM 1.61"/>
<tag k="comment" v="Just adding some streetnames"/>
</changeset>
</osm>"""
# Strings from https://www.cl.cam.ac.uk/~mgk25/ucs/examples/quickbrown.txt
self.unicodeStr = u"Falsches Üben von Xylophonmusik quält jeden größeren Zwerg, Γαζέες καὶ μυρτιὲς δὲν θὰ βρῶ πιὰ στὸ χρυσαφὶ ξέφωτο, Kæmi ný öxi hér ykist þjófum nú bæði víl og ádrepa, イロハニホヘト チリヌルヲ ワカヨタレソ ツネナラム, В чащах юга жил бы цитрус? Да, но фальшивый экземпляр!"
self.createXmlUnicodeTags = u"""<?xml version='1.0' encoding='UTF-8'?>
<osm>
<changeset>
<tag k="source" v="photomapping"/>
<tag k="comment" v="{}"/>
</changeset>
</osm>""".format(escape(self.unicodeStr))
self.overlongString = u"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam vulputate quam sit amet arcu efficitur, eget ullamcorper ligula suscipit. Nunc ullamcorper pellentesque libero at lacinia. Donec ut arcu mauris. Quisque ultrices tincidunt pharetra. Morbi indo."
self.createXmlOverlong = u"""<?xml version='1.0' encoding='UTF-8'?>
<osm>
<changeset>
<tag k="source" v="photomapping"/>
<tag k="comment" v="{}"/>
</changeset>
</osm>""".format(escape(self.overlongString))
self.expandBboxXml = """<?xml version='1.0' encoding='UTF-8'?>
<osm version='0.6' upload='true' generator='JOSM'>
<node id='-2190' action='modify' visible='true' lat='51.79852581343' lon='-3.38662147656' />
<node id='-2193' action='modify' visible='true' lat='50.71917284205' lon='-5.24880409375' />
<node id='-2197' action='modify' visible='true' lat='50.29646268337' lon='-4.07326698438' />
<node id='-2199' action='modify' visible='true' lat='50.70178040373' lon='-3.08999061719' />
<node id='-2201' action='modify' visible='true' lat='51.08292478386' lon='-3.28225135938' />
<way id='-2194' action='modify' visible='true'>
<nd ref='-2190' />
<nd ref='-2193' />
<nd ref='-2197' />
<nd ref='-2199' />
<nd ref='-2201' />
</way>
</osm>"""
def get_test_changeset(self, cid):
t = p.GetTransaction("ACCESS SHARE")
cs2 = pgmap.PgChangeset()
errStr = pgmap.PgMapError()
ret = t.GetChangeset(cid, cs2, errStr)
t.Commit()
if ret == 0:
print (errStr)
self.assertEqual(ret != 0, True)
if ret == -1:
raise KeyError("Changeset not found")
return cs2
def test_create_changeset(self):
response = self.client.put(reverse('changeset:create'), self.createXml, content_type='text/xml')
if response.status_code != 200:
print (response.content)
self.assertEqual(response.status_code, 200)
cid = int(response.content)
cs = self.get_test_changeset(cid)
self.assertEqual("created_by" in cs.tags, True)
self.assertEqual("comment" in cs.tags, True)
self.assertEqual(cs.tags["created_by"] == "JOSM 1.61", True)
self.assertEqual(cs.tags["comment"] == "Just adding some streetnames", True)
def test_anon_create_changeset(self):
anonClient = Client()
response = anonClient.put(reverse('changeset:create'), self.createXml, content_type='text/xml')
if response.status_code != 403:
print (response.content)
self.assertEqual(response.status_code, 403)
def test_create_changeset_unicodetags(self):
response = self.client.put(reverse('changeset:create'), self.createXmlUnicodeTags, content_type='text/xml')
if response.status_code != 200:
print (response.content)
self.assertEqual(response.status_code, 200)
cid = int(response.content)
cs = self.get_test_changeset(cid)
self.assertEqual("comment" in cs.tags, True)
self.assertEqual(views.DecodeIfNotUnicode(cs.tags["comment"]) == self.unicodeStr, True)
def test_create_changeset_overlong(self):
response = self.client.put(reverse('changeset:create'), self.createXmlOverlong, content_type='text/xml')
self.assertEqual(response.status_code, 400)
def test_get_changeset(self):
teststr = u"Съешь же ещё этих мягких французских булок да выпей чаю"
cs = CreateTestChangeset(self.user, tags={"foo": "bar", 'test': teststr}, bbox=(-1.0893202,50.7942715,-1.0803509,50.7989372))
anonClient = Client()
response = anonClient.get(reverse('changeset:changeset', args=(cs.objId,)))
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
self.assertEqual(xml.tag, "osm")
csout = xml.find("changeset")
self.assertEqual(int(csout.attrib["id"]) == cs.objId, True)
self.assertEqual("uid" in csout.attrib, True)
self.assertEqual("created_at" in csout.attrib, True)
self.assertEqual("min_lon" in csout.attrib, True)
self.assertEqual("max_lon" in csout.attrib, True)
self.assertEqual("min_lat" in csout.attrib, True)
self.assertEqual("max_lat" in csout.attrib, True)
self.assertEqual(csout.attrib["open"], "true")
self.assertEqual(len(csout.findall("tag")), 2)
foundFirst, foundSecond = False, False
for tag in csout.findall("tag"):
if tag.attrib["k"] == "foo":
self.assertEqual(tag.attrib["v"], "bar")
foundFirst = True
if tag.attrib["k"] == "test":
self.assertEqual(tag.attrib["v"], teststr)
foundSecond = True
self.assertEqual(foundFirst, True)
self.assertEqual(foundSecond, True)
self.assertEqual(csout.find("discussion"), None)
def test_get_changeset_missing(self):
anonClient = Client()
response = anonClient.get(reverse('changeset:changeset', args=(0,)))
self.assertEqual(response.status_code, 404)
def test_put_changeset(self):
cs = CreateTestChangeset(self.user, tags={"foo": "bar", "man": "child"})
response = self.client.put(reverse('changeset:changeset', args=(cs.objId,)), self.createXml, content_type='text/xml')
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
self.assertEqual(xml.tag, "osm")
csout = xml.find("changeset")
self.assertEqual(len(csout.findall("tag")), 2)
for tag in csout.findall("tag"):
if tag.attrib["k"] == "comment":
self.assertEqual(tag.attrib["v"], "Just adding some streetnames")
if tag.attrib["k"] == "created_by":
self.assertEqual(tag.attrib["v"], "JOSM 1.61")
def test_put_changeset_anon(self):
cs = CreateTestChangeset(self.user, tags={"foo": "bar", "man": "child"})
anonClient = Client()
response = anonClient.put(reverse('changeset:changeset', args=(cs.objId,)), self.createXml, content_type='text/xml')
self.assertEqual(response.status_code, 403)
def test_close_changeset(self):
cs = CreateTestChangeset(self.user)
response = self.client.put(reverse('changeset:close', args=(cs.objId,)))
self.assertEqual(response.status_code, 200)
t = p.GetTransaction("ACCESS SHARE")
cs2 = pgmap.PgChangeset()
errStr = pgmap.PgMapError()
ret = t.GetChangeset(cs.objId, cs2, errStr)
t.Commit()
self.assertEqual(ret != 0, True)
self.assertEqual(cs2.is_open, False)
def test_close_changeset_double_close(self):
cs = CreateTestChangeset(self.user)
response = self.client.put(reverse('changeset:close', args=(cs.objId,)))
self.assertEqual(response.status_code, 200)
t = p.GetTransaction("ACCESS SHARE")
cs2 = pgmap.PgChangeset()
errStr = pgmap.PgMapError()
ret = t.GetChangeset(cs.objId, cs2, errStr)
t.Commit()
self.assertEqual(cs2.is_open, False)
response = self.client.put(reverse('changeset:close', args=(cs.objId,)))
self.assertEqual(response.status_code, 409)
self.assertEqual(response.content.decode("UTF-8"), "The changeset {} was closed at {}.".format(cs2.objId,
datetime.datetime.fromtimestamp(cs2.close_timestamp).isoformat()))
def test_close_changeset_anon(self):
cs = CreateTestChangeset(self.user)
anonClient = Client()
response = anonClient.put(reverse('changeset:close', args=(cs.objId,)))
if response.status_code != 403:
print (response.content)
self.assertEqual(response.status_code, 403)
cs2 = self.get_test_changeset(cs.objId)
self.assertEqual(cs2.is_open, True)
def test_expand_bbox(self):
cs = CreateTestChangeset(self.user)
response = self.client.post(reverse('changeset:expand_bbox', args=(cs.objId,)), self.expandBboxXml,
content_type='text/xml')
self.assertEqual(response.status_code, 200)
t = p.GetTransaction("ACCESS SHARE")
cs2 = pgmap.PgChangeset()
errStr = pgmap.PgMapError()
t.GetChangeset(cs.objId, cs2, errStr)
self.assertEqual(cs2.bbox_set, True)
self.assertEqual(abs(cs2.y1 - 50.2964626834) < 1e-5, True)
self.assertEqual(abs(cs2.y2 - 51.7985258134) < 1e-5, True)
self.assertEqual(abs(cs2.x1 + 5.24880409375) < 1e-5, True)
self.assertEqual(abs(cs2.x2 + 3.08999061719) < 1e-5, True)
xml = fromstring(response.content)
self.assertEqual(xml.tag, "osm")
csout = xml.find("changeset")
self.assertEqual(int(csout.attrib["id"]) == cs.objId, True)
self.assertEqual(abs(float(csout.attrib["min_lat"]) - 50.2964626834) < 1e-5, True)
self.assertEqual(abs(float(csout.attrib["max_lat"]) - 51.7985258134) < 1e-5, True)
self.assertEqual(abs(float(csout.attrib["min_lon"]) + 5.24880409375) < 1e-5, True)
self.assertEqual(abs(float(csout.attrib["max_lon"]) + 3.08999061719) < 1e-5, True)
t.Commit()
def test_expand_bbox_anon(self):
cs = CreateTestChangeset(self.user)
anonClient = Client()
response = anonClient.post(reverse('changeset:expand_bbox', args=(cs.objId,)), self.expandBboxXml,
content_type='text/xml')
self.assertEqual(response.status_code, 403)
def test_expand_bbox_closed(self):
cs = CreateTestChangeset(self.user, is_open=False)
response = self.client.post(reverse('changeset:expand_bbox', args=(cs.objId,)), self.expandBboxXml,
content_type='text/xml')
self.assertEqual(response.status_code, 409)
self.assertEqual(response.content, "The changeset {} was closed at {}.".format(cs.objId,
datetime.datetime.fromtimestamp(cs.close_timestamp).isoformat()).encode(str("utf-8")))
def tearDown(self):
u = User.objects.get(username = self.username)
u.delete()
errStr = pgmap.PgMapError()
t = p.GetTransaction("EXCLUSIVE")
ok = t.ResetActiveTables(errStr)
if not ok:
print (errStr.errStr)
t.Commit()
class ChangesetUploadTestCase(TestCase):
def setUp(self):
self.username = "john"
self.password = "glass onion"
self.email = 'jlennon@beatles.com'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.client = Client()
self.client.login(username=self.username, password=self.password)
self.username2 = "ringo"
self.password2 = "penny lane"
self.email2 = 'rstarr@beatles.com'
self.user2 = User.objects.create_user(self.username2, self.email2, self.password2)
self.client2 = Client()
self.client2.login(username=self.username2, password=self.password2)
def test_upload_create_single_node(self):
cs = CreateTestChangeset(self.user, tags={"foo": "invade"}, is_open=True)
xml = """<osmChange generator="JOSM" version="0.6">
<create>
<node changeset="{}" id="-5393" lat="50.79046578105" lon="-1.04971367626" />
</create>
</osmChange>""".format(cs.objId)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
if response.status_code != 200:
print (response.content)
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
self.assertEqual(len(xml), 1)
ndiff = xml[0]
self.assertEqual(int(ndiff.attrib["old_id"]), -5393)
self.assertEqual(int(ndiff.attrib["new_version"]), 1)
self.assertEqual(int(ndiff.attrib["new_id"])>0, True)
dbNode = GetObj(p, "node", int(ndiff.attrib["new_id"]))
self.assertEqual(dbNode is not None, True)
self.assertEqual(dbNode.metaData.username, self.user.username)
self.assertEqual(dbNode.metaData.uid, self.user.id)
self.assertEqual(abs(dbNode.metaData.timestamp - time.time())<60, True)
# Check xml download is reasonable
response2 = self.client.get(reverse('changeset:download', args=(cs.objId,)))
xml2 = fromstring(response2.content)
for ch in xml2:
self.assertEqual(ch.tag, "create")
for ch2 in ch:
self.assertEqual(ch2.tag, "node")
def test_upload_modify_single_node(self):
cs = CreateTestChangeset(self.user, tags={"foo": "interstellar"}, is_open=True)
node = create_node(self.user.id, self.user.username)
xml = """<osmChange generator="JOSM" version="0.6">
<modify>
<node changeset="{}" id="{}" lat="50.80" lon="-1.05" version="{}">
<tag k="note" v="Just a node"/>
</node>
</modify>
</osmChange>""".format(cs.objId, node.objId, node.metaData.version)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
if response.status_code != 200:
print (response.content)
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
self.assertEqual(len(xml), 1)
ndiff = xml[0]
self.assertEqual(int(ndiff.attrib["old_id"]), node.objId)
self.assertEqual(int(ndiff.attrib["new_version"]), node.metaData.version+1)
self.assertEqual(int(ndiff.attrib["new_id"]), node.objId)
dbNode = GetObj(p, "node", node.objId)
self.assertEqual(abs(dbNode.lat-50.80)<1e-6, True)
self.assertEqual(abs(dbNode.lon+1.05)<1e-6, True)
self.assertEqual(len(dbNode.tags), 1)
# Check xml download is reasonable
response2 = self.client.get(reverse('changeset:download', args=(cs.objId,)))
xml2 = fromstring(response2.content)
for ch in xml2:
self.assertEqual(ch.tag, "modify")
for ch2 in ch:
self.assertEqual(ch2.tag, "node")
def test_upload_modify_single_node_wrong_version(self):
cs = CreateTestChangeset(self.user, tags={"foo": "interstellar"}, is_open=True)
node = create_node(self.user.id, self.user.username)
xml = """<osmChange generator="JOSM" version="0.6">
<modify>
<node changeset="{}" id="{}" lat="50.80" lon="-1.05" version="{}">
<tag k="note" v="Just a node"/>
</node>
</modify>
</osmChange>""".format(cs.objId, node.objId, node.metaData.version+1)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
self.assertEqual(response.status_code, 409)
def test_upload_modify_single_node_wrong_user(self):
cs = CreateTestChangeset(self.user, tags={"foo": "apollo"}, is_open=True)
node = create_node(self.user.id, self.user.username)
xml = """<osmChange generator="JOSM" version="0.6">
<modify>
<node changeset="{}" id="{}" lat="50.80" lon="-1.05" version="{}">
<tag k="note" v="Just a node"/>
</node>
</modify>
</osmChange>""".format(cs.objId, node.objId, node.metaData.version)
response = self.client2.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
self.assertEqual(response.status_code, 409)
def test_upload_delete_undelete_single_node(self):
cs = CreateTestChangeset(self.user, tags={"foo": "interstellar"}, is_open=True)
node = create_node(self.user.id, self.user.username)
xml = """<osmChange generator="JOSM" version="0.6">
<delete>
<node changeset="{}" id="{}" lat="50.80" lon="-1.05" version="{}"/>
</delete>
</osmChange>""".format(cs.objId, node.objId, node.metaData.version)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
if response.status_code != 200:
print (response.content)
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
self.assertEqual(len(xml), 1)
ndiff = xml[0]
self.assertEqual(int(ndiff.attrib["old_id"]), node.objId)
dbNode = GetObj(p, "node", node.objId)
self.assertEqual(dbNode is None, True)
# Check xml download is reasonable
response2 = self.client.get(reverse('changeset:download', args=(cs.objId,)))
xml2 = fromstring(response2.content)
for ch in xml2:
self.assertEqual(ch.tag, "delete")
for ch2 in ch:
self.assertEqual(ch2.tag, "node")
# Undelete node by uploading new version
ok, node = modify_node(node, node.metaData.version+1, self.user)
self.assertEqual(ok, True)
def test_upload_create_long_tag(self):
cs = CreateTestChangeset(self.user, tags={"foo": "invade"}, is_open=True)
xml = """<osmChange generator="JOSM" version="0.6">
<create>
<node changeset="{}" id="-5393" lat="50.79046578105" lon="-1.04971367626">
<tag k="{}" v="{}"/>
</node>
</create>
</osmChange>""".format(cs.objId, "x" * settings.MAX_TAG_LENGTH, "y" * settings.MAX_TAG_LENGTH)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
if response.status_code != 200:
print (response.content)
self.assertEqual(response.status_code, 200)
def test_upload_create_overlong_tag(self):
cs = CreateTestChangeset(self.user, tags={"foo": "invade"}, is_open=True)
xml = """<osmChange generator="JOSM" version="0.6">
<create>
<node changeset="{}" id="-5393" lat="50.79046578105" lon="-1.04971367626">
<tag k="{}" v="{}"/>
</node>
</create>
</osmChange>""".format(cs.objId, "x" * 256, "y" * 256)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
self.assertEqual(response.status_code, 400)
def test_upload_create_way(self):
cs = CreateTestChangeset(self.user, tags={"foo": "invade"}, is_open=True)
xml = """<osmChange generator="JOSM" version="0.6">
<create>
<node changeset="{0}" id="-5393" lat="50.79046578105" lon="-1.04971367626" />
<node changeset="{0}" id="-5394" lat="50.81" lon="-1.051" />
<way changeset="{0}" id="-434">
<tag k="note" v="Just a way"/>
<nd ref="-5393"/>
<nd ref="-5394"/>
</way>
</create>
</osmChange>""".format(cs.objId)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
if response.status_code != 200:
print (response.content)
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
self.assertEqual(len(xml), 3)
diffDict = ParseOsmDiffToDict(xml)
self.assertEqual(-5393 in diffDict["node"], True)
self.assertEqual(-5394 in diffDict["node"], True)
self.assertEqual(-434 in diffDict["way"], True)
newWayId, newWayVersion = diffDict["way"][-434]
self.assertEqual(newWayVersion, 1)
newWay = GetObj(p, "way", newWayId)
self.assertEqual(newWay is not None, True)
for ref in list(newWay.refs):
self.assertEqual(ref > 0, True)
def generate_upload_way_with_n_nodes(self, csId, numNodes):
nids = range(-5393, -5393-numNodes, -1)
xml = ["""<osmChange generator="JOSM" version="0.6">
<create>"""]
for i in nids:
xml.append(""" <node changeset="{0}" id="{1}" lat="{2}" lon="{3}" />\n"""
.format(csId, i, 50.79046578105+random.uniform(-1,1), -1.04971367626+random.uniform(-1,1)))
xml.append(""" <way changeset="{0}" id="-434">
<tag k="note" v="Just a way"/>""".format(csId))
for i in nids:
xml.append(""" <nd ref="{0}"/>\n""".format(i))
xml.append("""</way>
</create>
</osmChange>""")
return "".join(xml)
def test_upload_create_way_with_max_nodes(self):
cs = CreateTestChangeset(self.user, tags={"foo": "invade"}, is_open=True)
xml = self.generate_upload_way_with_n_nodes(cs.objId, settings.WAYNODES_MAXIMUM)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
self.assertEqual(response.status_code, 200)
def test_upload_create_way_too_many_nodes(self):
cs = CreateTestChangeset(self.user, tags={"foo": "invade"}, is_open=True)
xml = self.generate_upload_way_with_n_nodes(cs.objId, settings.WAYNODES_MAXIMUM+1)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
self.assertEqual(response.status_code, 400)
def test_upload_create_way_empty(self):
cs = CreateTestChangeset(self.user, tags={"foo": "invade"}, is_open=True)
xml = self.generate_upload_way_with_n_nodes(cs.objId, 0)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
self.assertEqual(response.status_code, 400)
def test_upload_create_way_too_few_nodes(self):
cs = CreateTestChangeset(self.user, tags={"foo": "invade"}, is_open=True)
xml = self.generate_upload_way_with_n_nodes(cs.objId, 1)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
self.assertEqual(response.status_code, 400)
def test_upload_create_complex(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
xml = """<osmChange version="0.6" generator="JOSM">
<create>
<node id='-3912' changeset='{0}' lat='50.78673385857' lon='-1.04730886255'>
<tag k='abc' v='def' />
</node>
<node id='-3910' changeset='{0}' lat='50.7865119298' lon='-1.04843217891' />
<node id='-3909' changeset='{0}' lat='50.78724872927' lon='-1.04808114255' />
<way id='-3911' changeset='{0}'>
<nd ref='-3909' />
<nd ref='-3910' />
<nd ref='-3912' />
<nd ref='{1}' />
<tag k='ghi' v='jkl' />
</way>
<relation id='-3933' changeset='{0}'>
<member type='way' ref='-3911' role='lmn' />
<member type='node' ref='-3909' role='opq' />
<tag k='rst' v='uvw' />
</relation>
<relation id='-3934' changeset='{0}'>
<member type='way' ref='-3911' role='lmn' />
<member type='relation' ref='-3933' role='opq' />
<tag k='rst' v='xyz' />
</relation>
</create>
</osmChange>""".format(cs.objId, node.objId)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
if response.status_code != 200:
print (response.content)
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
diffDict = ParseOsmDiffToDict(xml)
way = GetObj(p, "way", diffDict["way"][-3911][0])
wayRefs = list(way.refs)
for diffId, diffVer in diffDict["node"].values():
self.assertEqual(diffId in wayRefs, True)
self.assertEqual(node.objId in wayRefs, True)
wayTags = dict(way.tags)
self.assertEqual(wayTags, {'ghi': 'jkl'})
rel1 = GetObj(p, "relation", diffDict["relation"][-3933][0])
rel1Refs = zip(list(rel1.refTypeStrs), list(rel1.refIds), list(rel1.refRoles))
self.assertEqual(("way", diffDict["way"][-3911][0], "lmn") in rel1Refs, True)
self.assertEqual(("node", diffDict["node"][-3909][0], "opq") in rel1Refs, True)
rel1Tags = dict(rel1.tags)
self.assertEqual(rel1Tags, {'rst': 'uvw'})
rel2 = GetObj(p, "relation", diffDict["relation"][-3934][0])
rel2Refs = zip(list(rel2.refTypeStrs), list(rel2.refIds), list(rel2.refRoles))
self.assertEqual(("way", diffDict["way"][-3911][0], "lmn") in rel2Refs, True)
self.assertEqual(("relation", diffDict["relation"][-3933][0], "opq") in rel2Refs, True)
rel2Tags = dict(rel2.tags)
self.assertEqual(rel2Tags, {'rst': 'xyz'})
def test_upload_delete_node_used_by_way(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
node2 = create_node(self.user.id, self.user.username, node)
way = create_way(self.user.id, self.user.username, [node.objId, node2.objId])
xml = """<osmChange generator="JOSM" version="0.6">
<delete>
<node changeset="{}" id="{}" lat="50.80" lon="-1.05" version="{}"/>
</delete>
</osmChange>""".format(cs.objId, node.objId, node.metaData.version)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
self.assertEqual(response.status_code, 412)
def test_upload_delete_node_used_by_relation(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
node2 = create_node(self.user.id, self.user.username, node)
relation = create_relation(self.user.id, self.user.username, [("node", node.objId, "parrot"), ("node", node2.objId, "dead")])
xml = """<osmChange generator="JOSM" version="0.6">
<delete>
<node changeset="{}" id="{}" lat="50.80" lon="-1.05" version="{}"/>
</delete>
</osmChange>""".format(cs.objId, node.objId, node.metaData.version)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
#print (response.content)
self.assertEqual(response.status_code, 412)
def test_upload_delete_node_used_by_way(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
node2 = create_node(self.user.id, self.user.username, node)
way = create_way(self.user.id, self.user.username, [node.objId, node2.objId])
xml = """<osmChange generator="JOSM" version="0.6">
<delete>
<node changeset="{}" id="{}" version="{}"/>
</delete>
</osmChange>""".format(cs.objId, node.objId, node.metaData.version)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
#print (response.content)
self.assertEqual(response.status_code, 412)
def test_upload_delete_node_used_by_relation(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
node2 = create_node(self.user.id, self.user.username, node)
relation = create_relation(self.user.id, self.user.username, [("node", node.objId, "parrot"), ("node", node2.objId, "dead")])
xml = """<osmChange generator="JOSM" version="0.6">
<delete>
<node changeset="{}" id="{}" version="{}"/>
</delete>
</osmChange>""".format(cs.objId, node.objId, node.metaData.version)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
#print (response.content)
self.assertEqual(response.status_code, 412)
def test_upload_delete_undelete_way(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
node2 = create_node(self.user.id, self.user.username, node)
way = create_way(self.user.id, self.user.username, [node.objId, node2.objId])
xml = """<osmChange generator="JOSM" version="0.6">
<delete>
<way changeset="{}" id="{}" version="{}"/>
</delete>
</osmChange>""".format(cs.objId, way.objId, node.metaData.version)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
#print (response.content)
self.assertEqual(response.status_code, 200)
ok, way = modify_way(way, [node.objId, node2.objId], {}, self.user)
self.assertEqual(ok, True)
def test_upload_delete_way_used_by_relation(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
node2 = create_node(self.user.id, self.user.username, node)
way = create_way(self.user.id, self.user.username, [node.objId, node2.objId])
relation = create_relation(self.user.id, self.user.username, [("node", node.objId, "parrot"), ("way", way.objId, "dead")])
xml = """<osmChange generator="JOSM" version="0.6">
<delete>
<way changeset="{}" id="{}" version="{}"/>
</delete>
</osmChange>""".format(cs.objId, way.objId, node.metaData.version)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
#print (response.content)
self.assertEqual(response.status_code, 412)
def test_upload_delete_undelete_relation(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
node2 = create_node(self.user.id, self.user.username, node)
refs = [("node", node.objId, "parrot"), ("node", node2.objId, "dead")]
relation = create_relation(self.user.id, self.user.username, refs)
xml = """<osmChange generator="JOSM" version="0.6">
<delete>
<relation changeset="{}" id="{}" version="{}"/>
</delete>
</osmChange>""".format(cs.objId, relation.objId, relation.metaData.version)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
self.assertEqual(response.status_code, 200)
relation = modify_relation(self.user.id, self.user.username, relation, refs, {})
self.assertNotEqual(relation, None)
def test_upload_delete_relation_used_by_relation(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
node2 = create_node(self.user.id, self.user.username, node)
relation = create_relation(self.user.id, self.user.username, [("node", node.objId, "parrot"), ("node", node2.objId, "dead")])
relation2 = create_relation(self.user.id, self.user.username, [("node", node.objId, "parrot"), ("relation", relation.objId, "dead")])
xml = """<osmChange generator="JOSM" version="0.6">
<delete>
<relation changeset="{}" id="{}" version="{}"/>
</delete>
</osmChange>""".format(cs.objId, relation.objId, relation.metaData.version)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
#print (response.content)
self.assertEqual(response.status_code, 412)
def test_upload_multi_action(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
node2 = create_node(self.user.id, self.user.username, node)
way = create_way(self.user.id, self.user.username, [node.objId, node2.objId])
xml = """<osmChange version="0.6" generator="JOSM">
<create>
<node id='-3912' changeset='{0}' lat='50.78673385857' lon='-1.04730886255'>
<tag k='abc' v='def' />
</node>
</create>
<modify>
<way id='{1}' changeset='{0}' version="{2}">
<nd ref='-3912' />
<nd ref='{3}' />
<nd ref='{4}' />
<tag k='ghi' v='jkl' />
</way>
</modify>
</osmChange>""".format(cs.objId, way.objId, way.metaData.version, node.objId, node2.objId)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
if response.status_code != 200:
print (response.content)
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
diffDict = ParseOsmDiffToDict(xml)
def test_upload_delete_node_used_by_way_if_unused(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
node2 = create_node(self.user.id, self.user.username, node)
way = create_way(self.user.id, self.user.username, [node.objId, node2.objId])
xml = """<osmChange generator="JOSM" version="0.6">
<delete if-unused="true">
<node changeset="{}" id="{}" version="{}"/>
</delete>
</osmChange>""".format(cs.objId, node.objId, node.metaData.version)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
#print (response.content)
self.assertEqual(response.status_code, 200)
def test_upload_delete_node_used_by_relation_if_unused(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
node2 = create_node(self.user.id, self.user.username, node)
relation = create_relation(self.user.id, self.user.username, [("node", node.objId, "parrot"), ("node", node2.objId, "dead")])
xml = """<osmChange generator="JOSM" version="0.6">
<delete if-unused="true">
<node changeset="{}" id="{}" version="{}"/>
</delete>
</osmChange>""".format(cs.objId, node.objId, node.metaData.version)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
#print (response.content)
self.assertEqual(response.status_code, 200)
def test_upload_delete_way_used_by_relation_if_unused(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
node2 = create_node(self.user.id, self.user.username, node)
way = create_way(self.user.id, self.user.username, [node.objId, node2.objId])
relation = create_relation(self.user.id, self.user.username, [("node", node.objId, "parrot"), ("way", way.objId, "dead")])
xml = """<osmChange generator="JOSM" version="0.6">
<delete if-unused="true">
<way changeset="{}" id="{}" version="{}"/>
</delete>
</osmChange>""".format(cs.objId, way.objId, way.metaData.version)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
#print (response.content)
self.assertEqual(response.status_code, 200)
def test_upload_delete_relation_used_by_relation_if_unused(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
node2 = create_node(self.user.id, self.user.username, node)
relation = create_relation(self.user.id, self.user.username, [("node", node.objId, "parrot"), ("node", node2.objId, "dead")])
relation2 = create_relation(self.user.id, self.user.username, [("node", node.objId, "parrot"), ("relation", relation.objId, "dead")])
xml = """<osmChange generator="JOSM" version="0.6">
<delete if-unused="true">
<relation changeset="{}" id="{}" version="{}"/>
</delete>
</osmChange>""".format(cs.objId, relation.objId, relation.metaData.version)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
#print (response.content)
self.assertEqual(response.status_code, 200)
def test_upload_delete_interdependent_objects(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
node2 = create_node(self.user.id, self.user.username, node)
way = create_way(self.user.id, self.user.username, [node.objId, node2.objId])
xml = """<osmChange version="0.6" generator="JOSM">
<delete>
<way id='{}' version='{}' changeset='{}'/>
<node id='{}' version='{}' changeset='{}'/>
<node id='{}' version='{}' changeset='{}'/>
</delete>
</osmChange>""".format(way.objId, way.metaData.version, cs.objId,
node.objId, node.metaData.version, cs.objId,
node2.objId, node2.metaData.version, cs.objId)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
#print (response.content)
self.assertEqual(response.status_code, 200)
def test_upload_delete_relations_with_circular_reference(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
node = create_node(self.user.id, self.user.username)
relation = create_relation(self.user.id, self.user.username, [("node", node.objId, "parrot")])
relation2 = create_relation(self.user.id, self.user.username, [("node", node.objId, "parrot"), ("relation", relation.objId, "dead")])
relation = modify_relation(self.user.id, self.user.username, relation,
[("node", node.objId, "parrot"), ("relation", relation2.objId, "dead")], {})
xml = """<osmChange version="0.6" generator="JOSM">
<delete>
<relation id='{}' version='{}' changeset='{}'/>
<relation id='{}' version='{}' changeset='{}'/>
</delete>
</osmChange>""".format(relation.objId, relation.metaData.version, cs.objId,
relation2.objId, relation2.metaData.version, cs.objId)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
#print (response.content)
self.assertEqual(response.status_code, 200)
def test_upload_create_node_way_version_one(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
xml = """<osmChange version="0.6" generator="acme osm editor">
<create>
<node id="-1" changeset="{0}" version="1" lat="-33.9133123" lon="151.1173123" />
<node id="-2" changeset="{0}" version="1" lat="-33.9233321" lon="151.1173321" />
<way id="-3" changeset="{0}" version="1">
<nd ref="-1"/>
<nd ref="-2"/>
</way>
</create>
</osmChange>""".format(cs.objId)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
if response.status_code != 200:
print (response.content)
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
self.assertEqual(len(xml), 3)
diffDict = ParseOsmDiffToDict(xml)
self.assertEqual(-1 in diffDict["node"], True)
self.assertEqual(-2 in diffDict["node"], True)
self.assertEqual(-3 in diffDict["way"], True)
self.assertEqual(diffDict["node"][-1][1], 1)
self.assertEqual(diffDict["node"][-2][1], 1)
self.assertEqual(diffDict["way"][-3][1], 1)
def test_upload_create_wrong_version(self):
cs = CreateTestChangeset(self.user, tags={"foo": "me"}, is_open=True)
xml = """<osmChange version="0.6" generator="acme osm editor">
<create>
<node id="-1" changeset="{0}" version="2" lat="-33.9133123" lon="151.1173123" />
</create>
</osmChange>""".format(cs.objId)
response = self.client.post(reverse('changeset:upload', args=(cs.objId,)), xml,
content_type='text/xml')
if response.status_code != 400:
print (response.content)
self.assertEqual(response.status_code, 400)
def test_get_changeset_list(self):
teststr = u"Съешь же ещё этих мягких французских булок да выпей чаю"
cs = CreateTestChangeset(self.user, tags={"foo": "bar", 'test': teststr}, bbox=(-1.0893202,50.7942715,-1.0803509,50.7989372),
is_open=True, open_timestamp=int(time.time())-60)
cs2 = CreateTestChangeset(self.user, tags={"foo": "bar", 'test': teststr}, bbox=(-1.0893202,50.7942715,-1.0803509,50.7989372),
is_open=False, open_timestamp=int(time.time())-120)
anonClient = Client()
response = anonClient.get(reverse('changeset:list'))
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
CheckChangesetListContainsId(self, xml, cs.objId, True)
CheckChangesetListContainsId(self, xml, cs2.objId, True)
response = anonClient.get(reverse('changeset:list')+"?open=true")
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
CheckChangesetListContainsId(self, xml, cs.objId, True)
CheckChangesetListContainsId(self, xml, cs2.objId, False)
response = anonClient.get(reverse('changeset:list')+"?closed=true")
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
CheckChangesetListContainsId(self, xml, cs.objId, False)
CheckChangesetListContainsId(self, xml, cs2.objId, True)
def tearDown(self):
u = User.objects.get(username = self.username)
u.delete()
u2 = User.objects.get(username = self.username2)
u2.delete()
errStr = pgmap.PgMapError()
t = p.GetTransaction("EXCLUSIVE")
ok = t.ResetActiveTables(errStr)
if not ok:
print (errStr.errStr)
t.Commit()
class ChangesetAutoCloseTestCase(TestCase):
def setUp(self):
self.username = "john"
self.password = "glass onion"
self.email = 'jlennon@beatles.com'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.client = Client()
self.client.login(username=self.username, password=self.password)
self.username2 = "ringo"
self.password2 = "penny lane"
self.email2 = 'rstarr@beatles.com'
self.user2 = User.objects.create_user(self.username2, self.email2, self.password2)
self.client2 = Client()
self.client2.login(username=self.username2, password=self.password2)
def test_changeset_auto_close_active(self):
teststr = u"Съешь же ещё этих мягких французских булок да выпей чаю"
cs = CreateTestChangeset(self.user, tags={"foo": "bar", 'test': teststr}, bbox=(-1.0893202,50.7942715,-1.0803509,50.7989372),
is_open=True, open_timestamp=int(time.time())-60)
cs2 = CreateTestChangeset(self.user, tags={"foo": "bar", 'test': teststr}, bbox=(-1.0893202,50.7942715,-1.0803509,50.7989372),
is_open=False, open_timestamp=int(time.time())-120)
cs3 = CreateTestChangeset(self.user, tags={"foo": "bar", 'test': teststr}, bbox=(-1.0893202,50.7942715,-1.0803509,50.7989372),
is_open=True, open_timestamp=int(time.time())-(36*60*60))
cs4 = CreateTestChangeset(self.user, tags={"foo": "bar", 'test': teststr}, bbox=(-1.0893202,50.7942715,-1.0803509,50.7989372),
is_open=False, open_timestamp=int(time.time())-(36*60*60))
cmd = closeoldchangesets.Command()
cmd.handle([], {})
anonClient = Client()
response = anonClient.get(reverse('changeset:changeset', args=(cs.objId,)))
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
self.assertEqual(xml.tag, "osm")
csout = xml.find("changeset")
self.assertEqual(int(csout.attrib["id"]) == cs.objId, True)
self.assertEqual(csout.attrib["open"], "true")
response = anonClient.get(reverse('changeset:changeset', args=(cs2.objId,)))
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
self.assertEqual(xml.tag, "osm")
csout = xml.find("changeset")
self.assertEqual(int(csout.attrib["id"]) == cs2.objId, True)
self.assertEqual(csout.attrib["open"], "false")
response = anonClient.get(reverse('changeset:changeset', args=(cs3.objId,)))
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
self.assertEqual(xml.tag, "osm")
csout = xml.find("changeset")
self.assertEqual(int(csout.attrib["id"]) == cs3.objId, True)
self.assertEqual(csout.attrib["open"], "false")
response = anonClient.get(reverse('changeset:changeset', args=(cs4.objId,)))
self.assertEqual(response.status_code, 200)
xml = fromstring(response.content)
self.assertEqual(xml.tag, "osm")
csout = xml.find("changeset")
self.assertEqual(int(csout.attrib["id"]) == cs4.objId, True)
self.assertEqual(csout.attrib["open"], "false")
def tearDown(self):
u = User.objects.get(username = self.username)
u.delete()
u2 = User.objects.get(username = self.username2)
u2.delete()
errStr = pgmap.PgMapError()
t = p.GetTransaction("EXCLUSIVE")
ok = t.ResetActiveTables(errStr)
if not ok:
print (errStr.errStr)
t.Commit()
| [
"tim2009@sheerman-chase.org.uk"
] | tim2009@sheerman-chase.org.uk |
b0acf56f2da7e65ce7b8ef6af2945ed5cf4c5bd0 | 4abce782dad606b10d7646763b21277689e8cedd | /async-pydevd/tests/test_generate.py | 552980c42eaf9220bb5c76d07dd48fff99bf2108 | [] | no_license | wordhui/pycharm-evaluate-async-code | 8cca3ee4a5b74eff1073a442c1f014de30b02b5b | 64ccd29b0ee286ad6fe45172334926e9f517d162 | refs/heads/master | 2023-05-14T18:10:20.786741 | 2021-06-08T07:04:05 | 2021-06-08T07:04:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | from async_pydevd import FILES, generate
def test_generate():
result = generate()
assert '"""' not in result
for f in FILES:
normalized = (
f.read_text("utf-8").replace('"""', "'''").replace(" # pragma: no cover", "").strip()
)
assert normalized in result
assert not result.endswith(" ")
assert not result.startswith(" ")
| [
"1998uriyyo@gmail.com"
] | 1998uriyyo@gmail.com |
ed4d0a72fc25b24f5a5ba572bb628ea20168a043 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_friends.py | 120a772768eee00c88d6902834549d9e7b6fe04a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.verbs._friend import _FRIEND
#calss header
class _FRIENDS(_FRIEND, ):
def __init__(self,):
_FRIEND.__init__(self)
self.name = "FRIENDS"
self.specie = 'verbs'
self.basic = "friend"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
32b70266bcc9034ed202f467bfd9da532c09fc20 | edbb5293b14fae626ad38f0087e66c996acd80de | /run.py | 3a094f50bdc2aa60b87f6bb15133904c69f742fc | [] | no_license | furuiyang0715/sync_services | 4d8b2f425814920e409580080d946b1437ed17b3 | 95bb50180129ddd4cc78ef086d5e415f2740ea2b | refs/heads/master | 2020-06-12T10:31:57.314671 | 2019-07-12T02:00:07 | 2019-07-12T02:00:07 | 194,272,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | #!/usr/bin/env python3
# coding=utf8
import subprocess
def start():
cmd = """
python index_run.py start; python finance_run.py start; python calendars_run.py start;
"""
subprocess.getoutput(cmd)
def stop():
cmd = """
python index_run.py stop; python finance_run.py stop; python calendars_run.py stop;
"""
subprocess.getoutput(cmd)
if __name__ == '__main__':
# start()
stop()
| [
"furuiyang0715@gmail.com"
] | furuiyang0715@gmail.com |
d038618b91626654260d34d8571e6a64bce244d4 | 14a913fce4b538b22f28409645cd6abe3455808f | /dialogflow/cloud-client/document_management.py | 6145c9df8a6ae25fef0a9e30d3f88037cebd79cc | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | iamLoi/Python-Random-Number-Generator | 8da7dbd37cb13a01232c8ed49b9df35a99c63d73 | 7579e8b15130802aaf519979e475c6c75c403eda | refs/heads/master | 2022-08-29T19:05:32.649931 | 2019-09-14T14:48:58 | 2019-09-14T14:48:58 | 208,454,877 | 2 | 1 | Apache-2.0 | 2022-08-05T21:57:49 | 2019-09-14T14:51:05 | Python | UTF-8 | Python | false | false | 8,572 | py | #!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dialogflow API Python sample showing how to manage Knowledge Documents.
Examples:
python document_management.py -h
python document_management.py --project-id PROJECT_ID \
--knowledge-base-id knowledge_base_id \
list
python document_management.py --project-id PROJECT_ID \
--knowledge-base-id knowledge_base_id \
create --display-name DISPLAY_NAME --mime-type MIME_TYPE \
--knowledge-type KNOWLEDGE_TYPE --content-uri CONTENT_URI
python document_management.py --project-id PROJECT_ID \
--knowledge-base-id knowledge_base_id \
get --document-id DOCUMENT_ID
python document_management.py --project-id PROJECT_ID \
--knowledge-base-id knowledge_base_id \
delete --document-id DOCUMENT_ID
"""
import argparse
KNOWLEDGE_TYPES = ['KNOWLEDGE_TYPE_UNSPECIFIED', 'FAQ', 'EXTRACTIVE_QA']
# [START dialogflow_list_document]
def list_documents(project_id, knowledge_base_id):
"""Lists the Documents belonging to a Knowledge base.
Args:
project_id: The GCP project linked with the agent.
knowledge_base_id: Id of the Knowledge base."""
import dialogflow_v2beta1 as dialogflow
client = dialogflow.DocumentsClient()
knowledge_base_path = client.knowledge_base_path(project_id,
knowledge_base_id)
print('Documents for Knowledge Id: {}'.format(knowledge_base_id))
for document in client.list_documents(knowledge_base_path):
print(' - Display Name: {}'.format(document.display_name))
print(' - Knowledge ID: {}'.format(document.name))
print(' - MIME Type: {}'.format(document.mime_type))
print(' - Knowledge Types:')
for knowledge_type in document.knowledge_types:
print(' - {}'.format(KNOWLEDGE_TYPES[knowledge_type]))
print(' - Source: {}\n'.format(document.content_uri))
# [END dialogflow_list_document]
# [START dialogflow_create_document]]
def create_document(project_id, knowledge_base_id, display_name, mime_type,
knowledge_type, content_uri):
"""Creates a Document.
Args:
project_id: The GCP project linked with the agent.
knowledge_base_id: Id of the Knowledge base.
display_name: The display name of the Document.
mime_type: The mime_type of the Document. e.g. text/csv, text/html,
text/plain, text/pdf etc.
knowledge_type: The Knowledge type of the Document. e.g. FAQ,
EXTRACTIVE_QA.
content_uri: Uri of the document, e.g. gs://path/mydoc.csv,
http://mypage.com/faq.html."""
import dialogflow_v2beta1 as dialogflow
client = dialogflow.DocumentsClient()
knowledge_base_path = client.knowledge_base_path(project_id,
knowledge_base_id)
document = dialogflow.types.Document(
display_name=display_name, mime_type=mime_type,
content_uri=content_uri)
document.knowledge_types.append(
dialogflow.types.Document.KnowledgeType.Value(knowledge_type))
response = client.create_document(knowledge_base_path, document)
print('Waiting for results...')
document = response.result(timeout=90)
print('Created Document:')
print(' - Display Name: {}'.format(document.display_name))
print(' - Knowledge ID: {}'.format(document.name))
print(' - MIME Type: {}'.format(document.mime_type))
print(' - Knowledge Types:')
for knowledge_type in document.knowledge_types:
print(' - {}'.format(KNOWLEDGE_TYPES[knowledge_type]))
print(' - Source: {}\n'.format(document.content_uri))
# [END dialogflow_create_document]]
# [START dialogflow_get_document]]
def get_document(project_id, knowledge_base_id, document_id):
"""Gets a Document.
Args:
project_id: The GCP project linked with the agent.
knowledge_base_id: Id of the Knowledge base.
document_id: Id of the Document."""
import dialogflow_v2beta1 as dialogflow
client = dialogflow.DocumentsClient()
document_path = client.document_path(project_id, knowledge_base_id,
document_id)
response = client.get_document(document_path)
print('Got Document:')
print(' - Display Name: {}'.format(response.display_name))
print(' - Knowledge ID: {}'.format(response.name))
print(' - MIME Type: {}'.format(response.mime_type))
print(' - Knowledge Types:')
for knowledge_type in response.knowledge_types:
print(' - {}'.format(KNOWLEDGE_TYPES[knowledge_type]))
print(' - Source: {}\n'.format(response.content_uri))
# [END dialogflow_get_document]]
# [START dialogflow_delete_document]]
def delete_document(project_id, knowledge_base_id, document_id):
"""Deletes a Document.
Args:
project_id: The GCP project linked with the agent.
knowledge_base_id: Id of the Knowledge base.
document_id: Id of the Document."""
import dialogflow_v2beta1 as dialogflow
client = dialogflow.DocumentsClient()
document_path = client.document_path(project_id, knowledge_base_id,
document_id)
response = client.delete_document(document_path)
print('operation running:\n {}'.format(response.operation))
print('Waiting for results...')
print('Done.\n {}'.format(response.result()))
# [END dialogflow_delete_document]]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--project-id', help='Project id. Required.', required=True)
parser.add_argument(
'--knowledge-base-id',
help='The id of the Knowledge Base that the Document belongs to',
required=True)
subparsers = parser.add_subparsers(dest='command')
list_parser = subparsers.add_parser(
'list',
help='List all Documents that belong to a certain Knowledge base.')
create_parser = subparsers.add_parser(
'create', help='Create a Document for a certain Knowledge base.')
create_parser.add_argument(
'--display-name',
help='A name of the Document, mainly used for display purpose, '
'can not be used to identify the Document.',
default=str(''))
create_parser.add_argument(
'--mime-type',
help='The mime-type of the Document, e.g. text/csv, text/html, '
'text/plain, text/pdf etc. ',
required=True)
create_parser.add_argument(
'--knowledge-type',
help='The knowledge-type of the Document, e.g. FAQ, EXTRACTIVE_QA.',
required=True)
create_parser.add_argument(
'--content-uri',
help='The uri of the Document, e.g. gs://path/mydoc.csv, '
'http://mypage.com/faq.html',
required=True)
get_parser = subparsers.add_parser(
'get', help='Get a Document by its id and the Knowledge base id.')
get_parser.add_argument(
'--document-id', help='The id of the Document', required=True)
delete_parser = subparsers.add_parser(
'delete', help='Delete a Document by its id and the Knowledge base'
'id.')
delete_parser.add_argument(
'--document-id',
help='The id of the Document you want to delete',
required=True)
args = parser.parse_args()
if args.command == 'list':
list_documents(args.project_id, args.knowledge_base_id)
elif args.command == 'create':
create_document(args.project_id, args.knowledge_base_id,
args.display_name, args.mime_type, args.knowledge_type,
args.content_uri)
elif args.command == 'get':
get_document(args.project_id, args.knowledge_base_id, args.document_id)
elif args.command == 'delete':
delete_document(args.project_id, args.knowledge_base_id,
args.document_id)
| [
"noreply@github.com"
] | iamLoi.noreply@github.com |
460b589029a28f4fa3fa3f781280627857374c0b | c6f14a40b13121e8266882a38fa7ff3ff6c943a2 | /apps/ndvi_anomaly/utils.py | d2ed9a3b5d738907c70718714555adcf22309a69 | [
"Apache-2.0"
] | permissive | gijs/data_cube_ui | 443572c7b25734a13f576ea284687145bb3e72cf | 831f4d4f1fe44d7cb81caebf241e3d2add5d7b5d | refs/heads/master | 2021-01-17T11:56:21.657787 | 2017-01-20T23:53:08 | 2017-01-20T23:53:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,120 | py | # Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from .models import Query
from data_cube_ui.models import Area, Satellite
from datetime import datetime
"""
Utility class designed to take repeated functional code and abstract out for reuse through
application.
"""
# Author: AHDS
# Creation date: 2016-06-23
# Modified by:
# Last modified date:
def create_query_from_post(user_id, post):
"""
Takes post data from a request with a user id and creates a model.
TODO: use form validation rather than doing it this way.
Args:
user_id (string): Id of the user requesting the creation of the query.
post (HttpPost): A post that contains a variety of information regarding how to construct the
query
Returns:
query_id (string): The ID of the query that has been created.
"""
#scene_sel = ",".join(post.getlist('scene_selection'))
scene_index_sel = []
scene_string_sel = []
for scene in post.getlist('scene_selection'):
scene_split = scene.split("-")
scene_index_sel.append(scene_split[0])
scene_string_sel.append(scene_split[1])
query = Query(query_start=datetime.now(), query_end=datetime.now(), user_id=user_id,
latitude_max=post['latitude_max'], latitude_min=post['latitude_min'],
longitude_max=post['longitude_max'], longitude_min=post['longitude_min'],
time_start=",".join(scene_index_sel), time_end=",".join(scene_string_sel),
platform=post['platform'], baseline=",".join(post.getlist('baseline_selection')),
area_id=post['area_id'])
query.title = "NDVI Anomaly Task" if 'title' not in post or post['title'] == '' else post['title']
query.description = "None" if 'description' not in post or post['description'] == '' else post['description']
query.product = Satellite.objects.get(satellite_id=query.platform).product_prefix + Area.objects.get(area_id=query.area_id).area_id
query.query_id = query.generate_query_id()
if not Query.objects.filter(query_id=query.query_id).exists():
query.save()
return query.query_id
| [
"alfredo.h.delos_santos@ama-inc.com"
] | alfredo.h.delos_santos@ama-inc.com |
b0098d8416b34e015d6c88c4e7f600a0ab479460 | 98cd5ddf45a73aea64bbfac0c0104829d7231b81 | /S - Sound Jaws-Image/info.py | f063b8a755be8e9c8b7362e3492b7a13b012ff08 | [] | no_license | atheis4/ETC_Modes_Extra | 42508d523cfe632a3335e29f6e1e40af91df231b | d0ce221562105382a7a73cc6d280f4ad0eabf6f3 | refs/heads/master | 2022-04-04T11:15:07.335910 | 2020-01-03T20:27:32 | 2020-01-03T20:27:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | name = "S - Sound Jaws-Image"
description = "Oscilloscope teeth with background image cycle and tooth color cycle"
knob1 = "Clench"
knob2 = "Number of Teeth"
knob3 = "Tooth Shape"
knob4 = "Colorshift Speed"
released = "September 7 2017"
| [
"media@critterandguitari.com"
] | media@critterandguitari.com |
6281f5af7e13ed5ac591b3359234870130abb8e0 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.5/django/contrib/localflavor/no/__init__.py | 34417bd2b5f11a5b4b6bc08711c262ea5e04f4d8 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.5/django/contrib/localflavor/no/__init__.py | [
"ron.y.kagan@gmail.com"
] | ron.y.kagan@gmail.com |
80cb84142bdc36f93b641768e87520357a096f0a | 01e82d70ee62824fcad3d2df57411c2ff620d6e0 | /data/imsitu_loader.py | ddd1505a8b00336a0b571a85c31dd6cd54b6752c | [
"MIT"
] | permissive | kyoungrok0517/verb-attributes | 3a54c99a096d4252116748dfb1188045f4a2dd70 | a04931e3b2ef5be859bdb4c0f123148b194c9d42 | refs/heads/master | 2020-03-21T05:28:10.026977 | 2018-03-20T16:36:25 | 2018-03-20T16:36:25 | 138,162,475 | 2 | 0 | null | 2018-06-21T11:38:47 | 2018-06-21T11:38:47 | null | UTF-8 | Python | false | false | 6,844 | py | """
Dataset and dataloader for imsitu experiments.
This allows us to:
1) Finetune on Imsitu
2) Finetune on a zero shot setting
"""
import spacy
import torch
import os
from config import IMSITU_TRAIN_LIST, IMSITU_VAL_LIST, IMSITU_TEST_LIST, IMSITU_IMGS
from torchvision.transforms import Scale, RandomCrop, CenterCrop, ToTensor, Normalize, Compose, RandomHorizontalFlip
from PIL import Image
from data.attribute_loader import Attributes
from collections import namedtuple
from torch.autograd import Variable
LISTS = {
'train': IMSITU_TRAIN_LIST,
'val': IMSITU_VAL_LIST,
'test': IMSITU_TEST_LIST,
}
def _load_imsitu_file(mode):
"""
Helper fn that loads imsitu file
:param fn:
:return:
"""
if mode not in LISTS:
raise ValueError("Invalid mode {}, must be train val or test".format(mode))
imsitu_ind_to_label = {}
dps = []
with open(LISTS[mode], 'r') as f:
for row in f.read().splitlines():
fn_ext = row.split(' ')[0]
label = fn_ext.split('_')[0] # This has "ing" on it, so we can't use it for the word
# label. But needed to construct the filename
ind = int(row.split(' ')[1])
fn = os.path.join(IMSITU_IMGS, label, fn_ext)
imsitu_ind_to_label[ind] = label
dps.append((fn, ind))
return dps
class ImSitu(torch.utils.data.Dataset):
def __init__(self,
use_train_verbs=False,
use_val_verbs=False,
use_test_verbs=False,
use_train_images=False,
use_val_images=False,
use_test_images=False,
vector_type='glove',
word_type='lemma',
):
self.vector_type = vector_type
self.word_type = word_type
self.use_train_verbs = use_train_verbs
self.use_val_verbs = use_val_verbs
self.use_test_verbs = use_test_verbs
if not (self.use_train_verbs or self.use_val_verbs or self.use_test_verbs):
raise ValueError("No verbs selected!")
self.use_train_images = use_train_images
self.use_val_images = use_val_images
self.use_test_images = use_test_images
if not (self.use_train_verbs or self.use_val_verbs or self.use_test_verbs):
raise ValueError("No images selected!")
self.attributes = Attributes(
vector_type=vector_type,
word_type=word_type,
use_train=self.use_train_verbs, use_val=self.use_val_verbs,
use_test=self.use_test_verbs, imsitu_only=True)
self.examples = []
for mode, to_use in zip(
['train', 'val', 'test'],
[self.use_train_images, self.use_val_images, self.use_test_images],
):
if to_use:
self.examples += [(fn, self.attributes.ind_perm[ind])
for fn, ind in _load_imsitu_file(mode)
if ind in self.attributes.ind_perm]
self.transform = transform(is_train=not self.use_test_verbs)
def __getitem__(self, index):
fn, ind = self.examples[index]
img = self.transform(Image.open(fn).convert('RGB'))
return img, ind
@classmethod
def splits(cls, zeroshot=False, **kwargs):
"""
Gets splits
:param zeroshot: True if we're transferring to zeroshot classes
:return: train, val, test datasets
"""
if zeroshot:
train_cls = cls(use_train_verbs=True, use_train_images=True, use_val_images=True, **kwargs)
val_cls = cls(use_val_verbs=True, use_train_images=True, use_val_images=True, **kwargs)
test_cls = cls(use_test_verbs=True, use_test_images=True, **kwargs)
else:
train_cls = cls(use_train_verbs=True, use_train_images=True, **kwargs)
val_cls = cls(use_train_verbs=True, use_val_images=True, **kwargs)
test_cls = cls(use_train_verbs=True, use_test_images=True, **kwargs)
return train_cls, val_cls, test_cls
def __len__(self):
return len(self.examples)
Batch = namedtuple('Batch', ['img', 'label'])
class CudaDataLoader(torch.utils.data.DataLoader):
"""
Iterates through the data, but also loads everything as a (cuda) variable
"""
def __init__(self, *args, volatile=False, **kwargs):
super(CudaDataLoader, self).__init__(*args, **kwargs)
self.volatile = volatile
def _load(self, item):
img = Variable(item[0], volatile=self.volatile)
label = Variable(item[1], volatile=self.volatile)
if torch.cuda.is_available():
img = img.cuda()
label = label.cuda()
return Batch(img, label)
def __iter__(self):
return (self._load(x) for x in super(CudaDataLoader, self).__iter__())
@classmethod
def splits(cls, train, val, test, batch_size, num_workers=0, **kwargs):
"""
gets dataloaders given datasets
:param train:
:param val:
:param test:
:param batch_size:
:param num_workers:
:return:
"""
train_dl = cls(
dataset=train,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=collate_fn,
**kwargs,
)
val_dl = cls(
dataset=val,
batch_size=batch_size*16,
shuffle=False,
num_workers=num_workers,
collate_fn=collate_fn,
volatile=True,
**kwargs,
)
test_dl = cls(
dataset=test,
batch_size=batch_size*16,
shuffle=False,
num_workers=num_workers,
collate_fn=collate_fn,
volatile=True,
**kwargs,
)
return train_dl, val_dl, test_dl
def transform(is_train=True, normalize=True):
"""
Returns a transform object
"""
filters = []
filters.append(Scale(256))
if is_train:
filters.append(RandomCrop(224))
else:
filters.append(CenterCrop(224))
if is_train:
filters.append(RandomHorizontalFlip())
filters.append(ToTensor())
if normalize:
filters.append(Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]))
return Compose(filters)
def collate_fn(data):
imgs, labels = zip(*data)
imgs = torch.stack(imgs, 0)
labels = torch.LongTensor(labels)
return imgs, labels
if __name__ == '__main__':
train, val, test = ImSitu.splits()
train_dl = CudaDataLoader(
dataset=train,
batch_size=32,
shuffle=True,
num_workers=0,
collate_fn=collate_fn
)
| [
"rowanz@cs.washington.edu"
] | rowanz@cs.washington.edu |
5d91735118f1452267c4e02054d07b1411cadc2e | 2624007528d2e37f2a2460c7a2d2964890deed16 | /synapse/rest/client/knock.py | 0152a0c66a509b24012d562c6d0f97a525001cbb | [
"Apache-2.0"
] | permissive | matrix-org/synapse-dinsic | a5386060fb6a9575dbec86547fd0943e46d63ac7 | 3da3ecc22d36f129eade97b679e1791176e3d9fa | refs/heads/dinsic | 2023-02-19T22:15:54.550679 | 2022-07-07T13:24:51 | 2022-07-07T13:24:51 | 206,570,942 | 8 | 7 | Apache-2.0 | 2023-02-08T02:50:31 | 2019-09-05T13:29:44 | Python | UTF-8 | Python | false | false | 3,759 | py | # Copyright 2020 Sorunome
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple
from twisted.web.server import Request
from synapse.api.constants import Membership
from synapse.api.errors import SynapseError
from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
parse_json_object_from_request,
parse_strings_from_args,
)
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import set_tag
from synapse.rest.client.transactions import HttpTransactionCache
from synapse.types import JsonDict, RoomAlias, RoomID
if TYPE_CHECKING:
from synapse.app.homeserver import HomeServer
from ._base import client_patterns
logger = logging.getLogger(__name__)
class KnockRoomAliasServlet(RestServlet):
"""
POST /knock/{roomIdOrAlias}
"""
PATTERNS = client_patterns("/knock/(?P<room_identifier>[^/]*)")
def __init__(self, hs: "HomeServer"):
super().__init__()
self.txns = HttpTransactionCache(hs)
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
async def on_POST(
self,
request: SynapseRequest,
room_identifier: str,
txn_id: Optional[str] = None,
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
event_content = None
if "reason" in content:
event_content = {"reason": content["reason"]}
if RoomID.is_valid(room_identifier):
room_id = room_identifier
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
args: Dict[bytes, List[bytes]] = request.args # type: ignore
remote_room_hosts = parse_strings_from_args(
args, "server_name", required=False
)
elif RoomAlias.is_valid(room_identifier):
handler = self.room_member_handler
room_alias = RoomAlias.from_string(room_identifier)
room_id_obj, remote_room_hosts = await handler.lookup_room_alias(room_alias)
room_id = room_id_obj.to_string()
else:
raise SynapseError(
400, "%s was not legal room ID or room alias" % (room_identifier,)
)
await self.room_member_handler.update_membership(
requester=requester,
target=requester.user,
room_id=room_id,
action=Membership.KNOCK,
txn_id=txn_id,
third_party_signed=None,
remote_room_hosts=remote_room_hosts,
content=event_content,
)
return 200, {"room_id": room_id}
def on_PUT(
self, request: Request, room_identifier: str, txn_id: str
) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
request, self.on_POST, request, room_identifier, txn_id
)
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
KnockRoomAliasServlet(hs).register(http_server)
| [
"noreply@github.com"
] | matrix-org.noreply@github.com |
8c3f4059018913519c05f57b490e763a80e25559 | bd2b8551aca9728d1dd37a6f2ac988f03e93b2bf | /120_SquareRemainders_(MakeFaster).py | 9f6fb23b41b0421fb2c8235d29a7060338585464 | [] | no_license | acganesh/euler | c7fc6bb0873df4474765598a2933baf413d094e7 | 1a870e9ecfaec770d162eec32dbaa327269ac5ce | refs/heads/master | 2020-06-25T13:07:59.976765 | 2019-10-15T00:19:41 | 2019-10-15T00:19:41 | 199,316,809 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | from datetime import datetime
s = datetime.now()
cache = [[-1]*(2*a**2) for a in range(1000+1)]
def cache_pow(a, n, mod):
try:
val = cache[a][n]
except:
print 'failed', a, n
if val != -1:
return val
else:
val = pow(a, n, mod)
cache[a][n] = val
return val
def remainder(a, n):
mod = a**2
val = (pow(a-1, n, mod) + pow(a+1, n, mod)) % mod
return val
print remainder(7, 3)
def max_remainder(a):
max_val = 0
max_exp = None
for exp in range(1, a**2-7):
val = remainder(a, exp)
if val > max_val:
max_val = val
max_exp = exp
return max_val
def main(limit):
val = 3
total = 0
while val <= limit:
total += max_remainder(val)
if val % 100 == 0: print 'val',val
#print val
val += 1
return total
'''
for a in range(1, 20):
print a, max_remainder(a)
'''
print main(200)
print datetime.now() - s
| [
"acganesh@stanford.edu"
] | acganesh@stanford.edu |
34c460b320afaa0d7a0a6778bb245ac99c089f7a | 205fe9835ee9ae9dee72635c870bd836f911d331 | /src/cloudx/migrations/0002_auto_20170903_1445.py | 0c823947ecfad2dc0289f569f91dc98f706f9f97 | [] | no_license | msrshahrukh100/testxml | 8bc9b2f5e40dd3878499a988579a3e76beec6582 | 30fa9523fd8d507964b127a640949534515c5b2e | refs/heads/master | 2021-01-22T06:11:59.410452 | 2017-09-03T18:43:54 | 2017-09-03T18:43:54 | 102,286,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-03 14:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cloudx', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='data',
name='managerid',
field=models.IntegerField(blank=True, null=True),
),
]
| [
"msr.concordfly@gmail.com"
] | msr.concordfly@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.