text stringlengths 38 1.54M |
|---|
from collections import namedtuple
import json
# ScrapedData = namedtuple('ScrapedData', 'title code employment_2016 employment_2026 change_number change_percent anual_wage')
# s = ScrapedData(title='Management Occupations', code='11-0000', employment_2016='9,533.1', employment_2026='10,411.5',
# change_number='878.3', change_percent='9.4', anual_wage='100,790')
class Occupation(object):
# init
def __init__(self, data):
# set our data constants
self.title = data[0]# data.title
self.code = data[1] # data.code
self.employment_2016 = data[2] # data.employment_2016
self.employment_2026 = data[3] # data.employment_2026
self.change_number = data[4] # data.change_number
self.change_percent = data[5] # data.change_percent
self.anual_wage = data[6] # data.anual_wage
# json data
def jsonData(self):
# takes class properties into json format
json_data = {
self.title : [
{'code' : self.code },
{'employment' : [
{'employment_2016': self.employment_2016},
{'employment_2016': self.employment_2026}
]},
{'change_employment' : [
{'change_number' : self.change_number},
{'change_percentage': self.change_percent}
]},
{'anual_wage' : self.anual_wage}
]
}
# return json data
return json_data
# in order for out class to be a json object
def jsonDefault(object):
return object.__dict__
# # class instance
# employee = Occupation(s)
# # write it in json file
# filename = "careers.json"
# f = open(filename, "w")
# jsonstuff = json.dumps(employee.jsonData(), indent=4, default=jsonDefault)
# f.write(jsonstuff) |
"""
Created on 12:08, May. 23rd, 2021
Author: fassial
Filename: test_inputs.py
"""
# local dep
import stimulus
__all__ = [
"test_poisson_input",
]
## define test func
# define test_poisson_input func
def test_poisson_input():
# get stim
stim, _ = stimulus.inputs.poisson_input(
duration = 100
)
# display stim
print(stim.shape) # (duration / dt, size)
|
#!/usr/bin/python3
"""Island_perimeter in python"""
def island_perimeter(grid):
"""That returns the perimeter of the island described in grid"""
base_actual = 0
base_max = 0
altura = 0
for row in grid:
base_actual = 0
for column in row:
if column == 1:
base_actual += 1
if base_actual > base_max:
base_max = base_actual
if base_actual != 0:
altura += 1
perimeter = 2 * (base_max + altura)
return perimeter
|
from multiprocessing import Process, cpu_count, freeze_support, Queue
import numpy as np
from math import ceil, floor
import time
def next(seed):
seed = (seed * 0x5deece66d + 0xb) & ((1 << 48) - 1)
retval = seed >> (48 - 31)
if retval & (1 << 31):
retval -= (1 << 32)
return retval, seed
def nextInt(n, seed):
seed = (seed ^ 0x5deece66d) & ((1 << 48) - 1)
retval, seed = next(seed)
if not (n & (n - 1)):
return (n * retval) >> 31
else:
bits = retval
val = bits % n
while (bits - val + n - 1) < 0:
bits, seed = next(seed)
val = bits % n
return val
def javaInt64(val):
return ((val + (1 << 63)) % (1 << 64)) - (1 << 63)
def javaInt32(val):
return ((val + (1 << 31)) % (1 << 32)) - (1 << 31)
def itsASlime(cx, cz, worldseed):
seed= javaInt64(worldseed + javaInt32(cx * cx * 4987142) + javaInt32(cx * 5947611) + javaInt32(cz * cz) * 4392871 + javaInt32(cz * 389711) ^ 987234911)
return not nextInt(10, seed)
def initialize(r, s, w, offset):
a = np.zeros((s, s), dtype=bool)
for i in range(s):
for j in range(s):
a[i][j] = itsASlime(-r + j, i + offset, w)
return a
def goDown(a, nbr, s, x, z, w):
a = a[nbr:]
b = np.zeros((nbr, s), dtype=bool)
for i in range(nbr):
for j in range(s):
b[i][j] = itsASlime(x + j, z + s + i, w)
return np.concatenate((a, b))
def goRight(a, nbr, s, x, z, w):
for i in range(s):
for j in range(nbr):
a[i] = np.concatenate((a[i][1:], [itsASlime(x + s + j, z + i, w)]))
return a
def checkMask(mask, layer):
return np.array_equal(mask, layer)
def workers(mask, index, offset, seed, size, radius, cores, result):
block = initialize(radius, size, seed, offset * cores + index)
if checkMask(mask, block):
result.put((0, offset * cores + index))
for i in range(-radius, radius - 1):
block = goRight(block, 1, size, i, offset * cores + index, seed)
if checkMask(mask, block):
result.put((i + 1, offset * cores + index))
def main(radius, seed, size, mask):
assert size, radius > 0
result = []
processPool = []
result_queue = Queue()
cores = cpu_count()
t = time.time()
for offset in range(-floor(radius / cores), ceil(radius / cores)):
for i in range(cpu_count()):
p = Process(target=workers, args=(mask, i, offset, seed, size, radius, cores, result_queue))
p.daemon = True
p.start()
processPool.append(p)
for el in processPool:
p.join()
result_queue.put("DONE")
while True:
temp = result_queue.get()
if temp == "DONE":
break
result.append(temp)
if not offset % cores:
print("{} %".format(round(offset / (2 * radius / cores) * 100 + 50, 2)))
print(time.time()-t)
t = time.time()
print(result)
def start():
t = time.time()
freeze_support()
size = 16
seed = 2
radius = 20000
mask = np.zeros((size, size), dtype=bool)
main(radius, seed, size, mask)
print(time.time() - t)
print("The results are in chunks compared to 0 0, also you need to read it as chunkX,chunkZ")
if __name__ == '__main__':
freeze_support()
start()
|
from EMAIL import *
from fra import *
from time import gmtime, strftime
import sys
import logging
import time
FILENAME = "MaxaromaNA"
Subject = "MaxAroma_NewArrival"
def main():
url = "http://www.maxaroma.com/"
original = getMaxAroma(url)
# dumpToSave(original)
sendemail("Started " + FILENAME,Subject)
while True:
# get time to file the time
current_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
# check
try:
current =getMaxAroma(url)
log = ""
if current == original:
# ìf same write same
log = current_time + " same"
# if different then write email alert together with the content
else:
log = current_time + "\n" + "OLD: \n" + original + "\nNEW: \n" + current
sendemail(log,Subject)
original = current
writeToFile(log, FILENAME)
except:
logging.exception('OPPSS' + current_time)
time.sleep(15 * 60)
logging.basicConfig(level=logging.DEBUG, filename=FILENAME + 'error.txt')
main()
|
import subprocess
def executeCommand():
'''
Read a command from a user input, validate it, and execute it
''' |
from astral.adapters.Net import TCPServer,errors
from base_adapter import ClientAdapter
import socket
import threading
import time
class server_adapter(TCPServer):
def __init__(self,gameserver,host="127.0.0.1",port=1919):
TCPServer.__init__(self)
self.gameserver = gameserver
self.host = host
self.port = port
self.messages = []
self.addr = None
self.Send = lambda data:0
def sendback(self,data):
self.send_data(data)
def send_to_socket(self,sock,data):
self.sending_socket = sock
self.send_data(data)
def remove_socket(self,sock):
"""Add a try except here"""
try:
TCPServer.remove_socket(self,sock)
except ValueError:
import traceback
traceback.print_exc()
def input_func(self,sock,host,port,address):
"""This occurs for each socket we hear from right before handle_data is called"""
self.addr = address
self.Send = lambda data:self.send_to_socket(sock,data)
def handle_data(self,data,addr=None,send=None):
"""This occors on incoming data, right after input_func is called, but only if data is clean"""
if not addr:
addr = self.addr
if not send:
send = self.Send
self.messages.append((self.addr,self.Send,data))
def client_disconnect_func(self,sock,host,port,address):
"""Client disconnected"""
self.messages.append((address,lambda data:0,{"action":"disconnected"}))
def update(self):
#The threads should already be listening
for a in self.messages[:]:
self.gameserver.handle_data(*a)
self.messages.remove(a)
def _start(self):
self.ending = False
try:
self.connect(self.host,self.port)
except:
print("can't host")
self.handle_data({"action":"error","value":"hosting_error"})
self.ending = True
return
self.serve_forever()
self.quit()
self.ending = True
def start(self):
t = threading.Thread(target=self._start)
t.daemon = True
t.start()
self.t =t
def close(self):
try:
self.quit()
except:
pass
self.looping = False
if self.sending_socket:
self.sending_socket.close()
if self.unconnected_socket:
self.unconnected_socket.close()
if getattr(self,"connected_sockets",None):
for sock in self.connected_sockets:
sock.close()
#self.socketaddresses = {}
while not self.ending:
pass
from astral.adapters.Net import TCPClient,errors
class client_adapter(TCPClient,ClientAdapter):
def __init__(self,gameclient,host="127.0.0.1",port=1919):
TCPClient.__init__(self)
ClientAdapter.__init__(self,gameclient,host,port)
self.connect_to_server()
try:
self.connect(host,port)
except:
self.handle_disconnect()
return
self.handle_connect()
def send_to_server(self,data):
try:
self.send_data(data)
except:
return
def listen(self):
if not self.connect_state:
return
try:
data = self.check_for_data()
except errors.SocketError:
self.handle_disconnect()
return
if data:
self.handle_data(data)
self.flush()
def close(self):
self.quit()
adapter_hook = {("server","tcp"):server_adapter,("client","tcp"):client_adapter} |
# importing dataset
from sklearn.datasets import .... as fetch_olivetti_faces
faces = fetch_olivetti_faces()
img = faces.images
M = 400
N = 4096
img2 = np.array([[0 for i in range(N)] for j in range(M)])
# visage moyen = vm
vm = [0 for i in range(N)]
for a in img:
img2 = a.flatten()
vm = vm + img2
vm1 = vm / 400
vm2 = vm1.reshape(64, 64)
plt.imshow(vm2, amap='gray')
|
n=13
L=[ [ 0 for i in range(n) ] for j in range(n) ]
k,l=n-1,int((n+1)/2-1)
if (n%2!=0):
L[k][l]=1
for i in range (1,n**2):
if (L[(k+i)%n][(l+i)%n] == 0):
L[(k+i)%n][(l+i)%n] = i+1
else:
L[(k+i-2)%n][(l+i-1)%n]=i+1
k,l=(k+i-2)%n,(l+i-1)%n
for i in range(n):
print (L[i],'\n') |
from django.contrib import admin
from online_app.models import *
# Register your models here.
admin.site.register(Repo)
admin.site.register(Package) |
#!/usr/bin/python3
import xmlrpc.client
import time
import sys
if len(sys.argv) == 1:
print("USAGE: %s <server>" % sys.argv[0])
sys.exit(0)
s = xmlrpc.client.ServerProxy('http://%s:8000' % sys.argv[1])
pre = time.time()
response = s.ping()
post = time.time()
diff = post - pre
print(pre,response,post,diff)
# Print list of available methods
#print(s.system.listMethods()) |
#!/usr/bin/env python3
import yaml
from jinja2 import Template
from datetime import datetime
from kubernetes import client, config
def main():
kaniko("sidecar", "latest", "git://github.com/kbase/init-sidecar.git")
def kaniko(image, tag, repo):
# Set image name, and consistent timestamp
build_image_name = image
timestamp = datetime.now().strftime('%Y%m%d%H%M%S%f')
# Set variables to be set in the template
data = {
"image_name": build_image_name + "-" + timestamp,
"image_tag": tag,
"repo_name": repo,
}
# Render YAML from Jinja template
with open('kaniko-template.j2') as file_:
j2_template = Template(file_.read())
build_yaml = j2_template.render(data)
# Begin K8s deploymnent
config.load_kube_config()
dep = yaml.safe_load(build_yaml)
k8s_apps_v1 = client.CoreV1Api()
resp = k8s_apps_v1.create_namespaced_pod(
body=dep, namespace="next")
print("Deployment created. status='%s'" % resp.metadata.name)
if __name__ == '__main__':
main()
|
import os
import csv
import time
import imaplib
import email
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash, jsonify
app = Flask(__name__)
app.config.from_object(__name__)
app.config.update(dict(DATADIR="data"))
app.config.from_envvar('QVTABLE_SETTINGS', silent=True)
datafullpath = os.path.join(app.root_path, app.config.get("DATADIR"))
def email_check():
'''
Check email if there are any new messages
'''
return
print("Checking email...")
mail = imaplib.IMAP4_SSL("imap.gmail.com")
(retcode, capabilities) = mail.login('example@gmail.com','pass')
mail.select(readonly=True)
(retcode, messages) = mail.search(None, '(UNSEEN)')
if retcode == 'OK':
for num in messages[0].split():
print('Processing')
typ, data = mail.fetch(num,'(RFC822)')
for response_part in data:
if isinstance(response_part, tuple):
print(response_part[1])
original = email.message_from_string(str(response_part[1]))
print (original['From'])
print (original['Subject'])
typ, data = mail.store(num,'+FLAGS','\\Seen')
@app.route('/')
def show_entries():
return render_template('main.html')
@app.route('/<filename>')
def show_data(filename):
filebasename = os.path.basename(filename)
ffullpath = os.path.join(datafullpath, filebasename)
if os.path.isfile(ffullpath):
return render_template('main.html', filename=filebasename)
return render_template('main.html')
@app.route('/data/')
def get_data_list():
datafiles = []
for f in os.listdir(datafullpath):
ffullpath = os.path.join(datafullpath, f)
if os.path.isfile(ffullpath):
filedatestr = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(os.path.getmtime(ffullpath)))
elem = {"name": f, "date": filedatestr}
datafiles.append(elem)
datafilessorted = sorted(datafiles, key=lambda k: k['date'], reverse=True)
email_check()
return jsonify(datafilessorted)
@app.route('/data/<filename>')
def get_data(filename):
# Check if the file exists
datapath = app.config.get("DATADIR")
datafilename = os.path.join(datafullpath, os.path.basename(filename))
if not os.path.exists(datafilename):
print("Couldn't find file: " + datafilename)
return None
# Read the file
out = []
with open(datafilename, 'r') as f:
# Detect the format of the csv file
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
csvreader = csv.reader(f, dialect)
for row in csvreader:
out.append(row)
return jsonify(out)
|
import urllib2
from pyIEM import iemdb
import mx.DateTime
i = iemdb.iemdb()
coop = i['coop']
mesosite = i['mesosite']
stmeta = {}
def parse_lonlat( txt ):
tokens = txt.split()
lat = float(tokens[0]) + ((float(tokens[1]) + float(tokens[2]) / 60.0) / 60.0)
lon = float(tokens[3]) - ((float(tokens[4]) + float(tokens[5]) / 60.0) / 60.0)
return lon, lat
for line in open('COOP.TXT'):
lon, lat = parse_lonlat( line[149:168] )
elev = float( line[168:176] )
name = line[99:129].strip()
st = line[59:61]
id = line[:6]
iemid = "%s%s" % (st, id[2:])
sql = """INSERT into stations(id, name, state, country, elevation, network, geom)
VALUES ('%s', '%s', '%s', 'US', %s, '%sCLIMATE', 'SRID=4326;POINT(%s %s)')""" % (
iemid, name, st, elev, st, lon, lat)
stmeta["%s%s" % (st, id) ] = sql
for id in stmeta.keys():
# Go checkout NCDC for data
fp = "http://cdo.ncdc.noaa.gov/climatenormals/clim84/%s/%s.txt" % (id[:2], id)
req = urllib2.Request(fp)
try:
lines = urllib2.urlopen(req).readlines()
except:
print 'Missing %s %s' % (id, fp)
continue
if len(lines) < 40:
continue
data = {}
stationid = '%s%s' % (id[:2].lower(), id[4:])
vars = ['low', 'high', 'blah','blah', 'blah', 'precip']
pointer = -1
try:
for line in lines:
tokens = line.replace("-99", " 0").strip().split()
if line[0] in ['-', ' ']:
continue
if tokens[0] == "JAN":
pointer += 1
ts = mx.DateTime.strptime("%s-01-2001" % (tokens[0],), '%B-%d-%Y')
days = ((ts + mx.DateTime.RelativeDateTime(months=1)) - ts).days
for v in range(int(days)):
ts0 = ts + mx.DateTime.RelativeDateTime(days=v)
if not data.has_key(ts0):
data[ts0] = {}
val = tokens[v+1]
data[ts0][ vars[pointer] ] = float(val)
for ts in data.keys():
sql = "INSERT into ncdc_climate71 (station, valid, high, low, precip) VALUES ('%s', '2000-%s', %s, %s, %s)" % (stationid, ts.strftime("%m-%d"), data[ts]['high'], data[ts]['low'], data[ts]['precip'] / 100.0)
coop.query(sql)
print 'Worked %s %s' % (len(data.keys()), stationid,)
except:
print 'Fail %s' % (id,)
continue
if id[:2] != 'IA':
try:
mesosite.query( stmeta[id] )
except:
pass
|
#! /usr/bin/python
import sys
scaffoldFile = sys.argv[1]
outputFile = "%s_unique.fasta" %(scaffoldFile)
infileScaffolds = open(scaffoldFile, "r")
outfile = open(outputFile, "w")
fastaDict = {}
key = 0
fastaDict[key] = []
for line in infileScaffolds:
if ">" in line:
joinLine = "".join(fastaDict[key])
fastaDict[key] = joinLine
key += 1
fastaDict[key] = []
if ">" not in line:
stripLine = line.strip("\n")
fastaDict[key].append(stripLine)
joinLine = "".join(fastaDict[key])
fastaDict[key] = joinLine
key = 0
for item in sorted(set(fastaDict.values())):
outfile.write(">Iengl_Schafran43_scaffold%d\n" %(key))
outfile.write("%s" %(item))
outfile.write("\n")
key += 1
print "%d unique scaffolds out of %d total" %(len(set(fastaDict.values())), len(fastaDict))
outfile.close()
infileScaffolds.close()
|
# Generated by Django 3.0.3 on 2020-03-21 14:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mywebsite', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='project',
name='desc',
field=models.CharField(default='', max_length=100),
),
migrations.AddField(
model_name='project',
name='project_title',
field=models.CharField(default='', max_length=100),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 8 18:01:34 2015
@author: Erin
"""
# An implementation of example 2 from MT-DREAM(ZS) original Matlab code. (see Laloy and Vrugt 2012)
# 200 dimensional Gaussian distribution
import numpy as np
import os
from pydream.parameters import FlatParam
from pydream.core import run_dream
from pydream.convergence import Gelman_Rubin
def Latin_hypercube(minn, maxn, N):
y = np.random.rand(N, len(minn))
x = np.zeros((N, len(minn)))
for j in range(len(minn)):
idx = np.random.permutation(N)
P = (idx - y[:,j])/N
x[:,j] = minn[j] + P * (maxn[j] - minn[j])
return x
d = 200
A = .5 * np.identity(d) + .5 * np.ones((d,d))
C = np.zeros((d,d))
for i in range(d):
for j in range(d):
C[i][j] = A[i][j] * np.sqrt((i+1)*(j+1))
invC = np.linalg.inv(C)
mu = np.zeros(d)
if d > 150:
log_F = 0
else:
log_F = np.log(((2 * np.pi)**(-d/2))*np.linalg.det(C)**(- 1./2))
#Create initial samples matrix m that will be loaded in as DREAM history file
m = Latin_hypercube(np.linspace(-5, -5, num=d), np.linspace(15, 15, num=d), 1000)
np.save('ndim_gaussian_seed.npy', m)
def likelihood(param_vec):
logp = log_F - .5 * np.sum(param_vec*np.dot(invC, param_vec))
return logp
starts = [m[chain] for chain in range(3)]
params = FlatParam(test_value=mu)
if __name__ == '__main__':
niterations = 150000
# Run DREAM sampling. Documentation of DREAM options is in Dream.py.
converged = False
total_iterations = niterations
nchains = 3
sampled_params, log_ps = run_dream([params], likelihood, niterations=niterations, nchains=nchains, start=starts, start_random=False, save_history=True, adapt_gamma=False, gamma_levels=1, tempering=False, history_file='ndim_gaussian_seed.npy', multitry=5, parallel=False, model_name='ndim_gaussian')
for chain in range(len(sampled_params)):
np.save('ndimgauss_mtdreamzs_3chain_sampled_params_chain_'+str(chain)+'_'+str(total_iterations), sampled_params[chain])
np.save('ndimgauss_mtdreamzs_3chain_logps_chain_'+str(chain)+'_'+str(total_iterations), log_ps[chain])
os.remove('ndim_gaussian_seed.npy')
# Check convergence and continue sampling if not converged
GR = Gelman_Rubin(sampled_params)
print('At iteration: ', total_iterations, ' GR = ', GR)
np.savetxt('ndimgauss_mtdreamzs_3chain_GelmanRubin_iteration_' + str(total_iterations) + '.txt', GR)
old_samples = sampled_params
if np.any(GR > 1.2):
starts = [sampled_params[chain][-1, :] for chain in range(nchains)]
while not converged:
total_iterations += niterations
sampled_params, log_ps = run_dream([params], likelihood, niterations=niterations, nchains=nchains,
start=starts, start_random=False, save_history=True, adapt_gamma=False,
gamma_levels=1, tempering=False, multitry=5, parallel=False,
model_name='ndim_gaussian', restart=True)
for chain in range(len(sampled_params)):
np.save('ndimgauss_mtdreamzs_3chain_sampled_params_chain_' + str(chain) + '_' + str(total_iterations),
sampled_params[chain])
np.save('ndimgauss_mtdreamzs_3chain_logps_chain_' + str(chain) + '_' + str(total_iterations),
log_ps[chain])
old_samples = [np.concatenate((old_samples[chain], sampled_params[chain])) for chain in range(nchains)]
GR = Gelman_Rubin(old_samples)
print('At iteration: ', total_iterations, ' GR = ', GR)
np.savetxt('ndimgauss_mtdreamzs_5chain_GelmanRubin_iteration_' + str(total_iterations)+'.txt', GR)
if np.all(GR < 1.2):
converged = True
try:
# Plot output
import seaborn as sns
from matplotlib import pyplot as plt
total_iterations = len(old_samples[0])
burnin = total_iterations / 2
samples = np.concatenate((old_samples[0][burnin:, :], old_samples[1][burnin:, :], old_samples[2][burnin:, :]))
ndims = len(old_samples[0][0])
colors = sns.color_palette(n_colors=ndims)
for dim in range(ndims):
fig = plt.figure()
sns.distplot(samples[:, dim], color=colors[dim])
fig.savefig('PyDREAM_example_NDimGauss_dimension_' + str(dim))
except ImportError:
pass
else:
run_kwargs = {'parameters':[params], 'likelihood':likelihood, 'niterations':150000, 'nchains':3, 'start':starts, 'start_random':False, 'save_history':True, 'adapt_gamma':False, 'gamma_levels':1, 'tempering':False, 'history_file':'ndim_gaussian_seed.npy', 'multitry':5, 'parallel':False, 'model_name':'ndim_gaussian'}
|
def count(valuelist):
return_list = []
for a in range(0,len(valuelist)-1):
return_list.append(abs(valuelist[a]-valuelist[a+1]))
return return_list
test_case = int(input())
while(test_case):
input_list = list(input())
reversed_input_list = reversed(input_list)
input_list = [ord(x) for x in input_list]
reversed_input_list = [ord(x) for x in reversed_input_list]
input_list = count(input_list)
reversed_input_list = count(reversed_input_list)
if(input_list == reversed_input_list):
print("Funny")
else:
print("Not Funny")
test_case -= 1 |
import os
import random
import string
import setup_catalog
from google.api_core.client_options import ClientOptions
from google.cloud.retail_v2 import SearchServiceClient, SearchRequest
project_number = "1038874412926"
endpoint = "retail.googleapis.com"
isolation_filter_key = "INTEGRATION_FILTER_KEY"
title_query = "Nest_Maxi"
visitor_id = "visitor"
test_id = ''.join(random.sample(string.ascii_lowercase, 1))
# [START search_client]
default_catalog = "projects/{0}/locations/global/catalogs/default_catalog/branches/0".format(project_number)
default_search_placement = "projects/1038874412926/locations/global/catalogs/default_catalog/placements/default_search"
def get_search_service_client():
client_options = ClientOptions(endpoint)
return SearchServiceClient(client_options=client_options)
# [END search_client]
def build_isolation_filter(test__id: str):
return 'attributes.{0}: ANY("{1}")'.format(isolation_filter_key, test__id)
# [START search_product_with_boost_spec]
def search_products_with_boost_spec(query: str, _condition: str, _boost_strength: float):
boost_spec = SearchRequest().BoostSpec()
boost_spec.ConditionBoostSpec().condition = _condition
boost_spec.ConditionBoostSpec.boost = _boost_strength
search_request = SearchRequest()
search_request.placement = default_search_placement
search_request.branch = default_catalog
search_request.query = query
search_request.filter = build_isolation_filter(test_id)
search_request.visitor_id = visitor_id
search_request.boost_spec = boost_spec
print("---search request---")
print(search_request)
return get_search_service_client().search(search_request)
# [END search_product_with_boost_spec]
def search():
setup_catalog.ingest_products(test_id)
search_response = search_products_with_boost_spec(title_query, "(colorFamily: ANY(\"blue\"))", 0.5)
print("BOOST SEARCH RESULTS")
print(search_response.results)
setup_catalog.delete_products()
search()
|
from itertools import product
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
X = np.array([[0,0],[2,0],[4,0],[6,0],[8,0],[10,0],[12,0],[14,0],[16,0],[0,2],
[2,2],[4,2],[6,2],[8,2],[10,2],[12,2],[14,2],[16,2]])
y = np.array([-54,-60,-62,-64,-66,-68,-70,-72,-74,-60,-62,-64,-66,
-68,-70,-72,-74,-76])
# Input space
x1 = np.linspace(X[:,0].min(), X[:,0].max()) #p
x2 = np.linspace(X[:,1].min(), X[:,1].max()) #q
x = (np.array([x1, x2])).T
print(x)
kernel = C(1.0, (1e-3, 1e3)) * RBF([5,5], (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=15)
gp.fit(X, y)
x1x2 = np.array(list(product(x1, x2)))
y_pred, MSE = gp.predict(x1x2, return_std=True)
X0p, X1p = x1x2[:,0].reshape(50,50), x1x2[:,1].reshape(50,50)
Zp = np.reshape(y_pred,(50,50))
# alternative way to generate equivalent X0p, X1p, Zp
# X0p, X1p = np.meshgrid(x1, x2)
# Zp = [gp.predict([(X0p[i, j], X1p[i, j]) for i in range(X0p.shape[0])]) for j in range(X0p.shape[1])]
# Zp = np.array(Zp).T
fig = plt.figure(figsize=(10,8))
#ax = fig.add_subplot(111)
#ax.pcolormesh(X0p, X1p, Zp)
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X0p, X1p, Zp, rstride=1, cstride=1, cmap='jet', linewidth=0, antialiased=False)
plt.show() |
import aiohttp_cors
from app.api import profile, roadmaps, skills, spec, vacancies
def setup_routes(app):
"""Добавлена точка входа для приложения."""
cors = aiohttp_cors.setup(app, defaults={
'*': aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers='*',
allow_headers='*',
),
})
cors.add(app.router.add_get('/spec', spec.handler))
cors.add(app.router.add_get('/vacancies', vacancies.handler))
cors.add(app.router.add_post('/skills', skills.handler))
cors.add(app.router.add_get('/profile/known', profile.known_handler))
cors.add(app.router.add_get('/profile/unknown', profile.unknown_handler))
cors.add(app.router.add_get('/profile/score', profile.score_handler))
cors.add(app.router.add_post('/profile/complete', profile.complete_handler))
cors.add(app.router.add_get('/profile/courses', profile.courses_handler))
cors.add(app.router.add_get('/roadmaps', roadmaps.handler))
|
import hashlib
import random
from string import ascii_uppercase, digits
from rest_framework.response import Response
from rest_framework import status
from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet
from .serializers import TransactionModelSerializer
from .models import Transaction
from rest_framework.decorators import list_route
from rest_framework.permissions import IsAdminUser, IsAuthenticated, AllowAny
from .money_wave_utils import *
import json
from config.settings.keys import *
class TransactionModelViewSet(ModelViewSet):
model = Transaction
permission_classes = [AllowAny]
serializer_class = TransactionModelSerializer
def get_queryset(self):
return Transaction.objects.filter(user=self.request.user).order_by('-modified_on')
@list_route(methods=['post'])
def get_banks(self, request):
"""
get all banks for selected country
:param request:
:return: <Response Object>
"""
country = self.request.data['country']
banks = get_banks(country)
return Response({"banks": banks}, status=status.HTTP_200_OK)
@list_route(methods=['post'])
def resolve_account(self, request):
"""
resolve an account
:param request:
:return:
"""
bank_code = request.data['bank_code']
account_number = request.data['account_number']
currency = request.data['country']
resolve = resolve_account(account_number, bank_code, currency)
if not resolve:
return Response(status=status.HTTP_404_NOT_FOUND)
else:
print(resolve)
return Response({"account": resolve}, status=status.HTTP_200_OK)
@list_route(methods=['post'])
def ravepayment_request(self, request):
hashedPayload = ''
payload = {
"PBFPubKey": FLW_API_KEY,
"amount": request.data['amount'],
"payment_method": "both",
"custom_description": "Kaimun",
"custom_title": "Instant Money Transfers",
"country": request.data['country'],
"currency": request.data['currency'],
"customer_email": request.user.email,
"customer_firstname": request.user.first_name,
"customer_lastname": request.user.last_name,
# "customer_phone": request.data['phone'],
"txref": "KMN-" + ''.join(random.sample((ascii_uppercase+digits), 5))
}
# sort payload and concatenate into a single string
sorted_payload = sorted(payload)
# concatenate sorted_payload. The payload is rearranged and the values concatenated in the order of the sorted keys.
hashed_payload = ''
for value in sorted_payload:
hashed_payload += value
hashed_string = hashed_payload + "FLWSECK-b86e4802fc5eaa03db5e7f73fdc4514e-X"
integrity_hash = hashlib.sha256(hashed_string.lower().encode()).hexdigest()
return Response({'payload': payload, 'integrityHash': integrity_hash})
@list_route(methods=['post'])
def ravepay_deposit(self, request):
# instance = self.get_object()
url = "https://ravesandboxapi.flutterwave.com/flwv3-pug/getpaidx/api/xrequery"
data = {
"txref": request.data['txRef'],
"SECKEY" : FLW_API_SECRET,
"include_payment_entity": 1
}
response = requests.post(url, data=data).json()
account_number = request.data['account_number']
bank_code = request.data['bank_code']
currency = request.data['currency']
amount = float(request.data['amount'])
narration = request.data['narration']
sender = request.user.first_name + ' ' + request.user.last_name
# confirm that the response for the transaction is successful
if response['status'] == 'success':
data = response['data']
if data[0]['chargecode'] == '00':
chargedamount = float(data[0]['chargedamount'])
if chargedamount > amount:
make_transfer = disburse(account_number, bank_code, amount, narration, currency, sender)
if make_transfer:
return Response({'message': 'Successfully Sent Funds'}, status=status.HTTP_200_OK)
else:
return Response({'message': 'Unable to send funds'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({'message': 'Unable to send funds'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({'message': 'Transaction was not successful'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
##changing this to ok for simulation purposes, the moneywave api isn't bring the correct response
else:
return Response({'message': '4Unable to send funds'}, status=status.HTTP_200_OK)
|
import gtk
from System import SystemType
current_system = SystemType()
class StatusIcon:
def __init__(self, parent):
self.parent = parent
iconpath = "WWU.gif"
self.statusicon = gtk.StatusIcon()
self.statusicon.set_from_file(iconpath)
self.statusicon.connect("button_press_event", self.click_event)
# self.statusicon.connect("activate", self.click_event)
self.statusicon.set_tooltip("WWU wifi")
self.window = gtk.Window()
self.window.show_all()
self.window.hide()
self.parent.logged = False
def click_event(self, widget, event):
if event.button == 1:
menu = gtk.Menu()
if self.parent.logged:
logout = gtk.MenuItem("Logout")
else:
login = gtk.MenuItem("Login")
about = gtk.MenuItem("About")
quit = gtk.MenuItem("Quit")
if self.parent.logged:
logout.connect("activate", self.logout)
else:
login.connect("activate", self.login)
about.connect("activate", self.show_about_dialog)
if current_system == "linux":
quit.connect("activate", self.parent.quit)
else:
quit.connect("activate", gtk.main_quit)
if self.parent.logged:
menu.append(logout)
else:
menu.append(login)
menu.append(about)
menu.append(quit)
menu.show_all()
menu.popup(None, None, gtk.status_icon_position_menu, event.button, event.time, self.statusicon)
def show_about_dialog(self, widget):
about_dialog = gtk.AboutDialog()
about_dialog.set_destroy_with_parent(True)
about_dialog.set_name("about wwu-auth")
about_dialog.set_version("0.1")
about_dialog.set_authors(["Morgan Borman"])
about_dialog.run()
about_dialog.destroy()
def set_visibility(self, visibility):
self.statusicon.set_visible(visibility)
def set_blinking(self, blinking):
self.statusicon.set_blinking(blinking)
def logout(self, opt):
self.parent.wwu_de_auth()
self.parent.logged = False
def login(self, opt):
self.parent.wwu_auth()
self.parent.logged = True
|
import requests
import time,random
from bs4 import BeautifulSoup
from urllib import request
def getData(data):
string=""
time,temp,pict,condi,confort,rain,msg=[],[],[],[],[],[],[]
for data_ in data:#取得時間、溫度、天氣狀況、舒適度、降雨機率等資料
time.append(data_.find('th',{'scope':'row'}).text)
temp.append(data_.find_all('td')[0].text)
condi.append(data_.find('img')['title'])
confort.append(data_.find_all('td')[2].text)
rain.append(data_.find_all('td')[3].text)
if "雨" in str(condi[0]): msg.append("記得帶雨傘唷!!")
elif "晴" in str(condi[0]):msg.append("要記得塗防曬喔~~~~")
elif "多雲" in str(condi[0]):msg.append("今天是個適合運動的日子")
else :msg.append("每一天都是新的一天!")
break
#for i in range(len(time)):
string+="時間:%s \n溫度:%s (℃) \n天氣狀況:%s \n舒適度:%s \n降雨機率:%s \n我想對你說:%s"%(time[0],temp[0],condi[0],confort[0],rain[0],msg[0])
return string
def Country(text):
dic={"Taipei_City.htm":["台北市","臺北市","台北","臺北"],"New_Taipei_City.htm":["新北市","新北"],"Taoyuan_City.htm":["桃園市","桃園"],\
"Taichung_City.htm":["臺中市","台中市","台中","臺中"],"Tainan_City.htm":["臺南市","台南市","台南","臺南"],"Kaohsiung_City.htm":["高雄市","高雄"],\
"Keelung_City.htm":["基隆市","基隆"],"Hsinchu_City.htm":["新竹市"],"Hsinchu_County.htm":["新竹縣"],"Miaoli_County.htm":["苗栗縣","苗栗"],\
"Changhua_County.htm":["彰化縣","彰化"],"Nantou_County.htm":["南投縣","南投"],"Yunlin_County.htm":["雲林縣","雲林"],\
"Chiayi_City.htm":["嘉義市"],"Chiayi_County.htm":["嘉義縣"],"Pingtung_County.htm":["屏東縣","屏東"],"Yilan_County.htm":["宜蘭縣","宜蘭"],\
"Hualien_County.htm":["花蓮縣","花蓮"],"Taitung_County.htm":["臺東縣","台東縣","台東","臺東"],"Penghu_County.htm":["澎湖縣","澎湖"],\
"Kinmen_County.htm":["金門縣","金門"],"Lienchiang_County.htm":["連江縣","連江"]}
for k,v in dic.items(): #連進縣市天氣資訊頁面
if text in v :
url ="https://www.cwb.gov.tw/V7/forecast/taiwan/"+k
break
response=requests.get(url)
html=response.content
html_doc=str(html,'utf-8')
soup = BeautifulSoup(html_doc,"html.parser")
data=soup.find('div',{'class':'BoxContent clearfix'}).find('table',{'class':'FcstBoxTable01'}).find('tbody').find_all('tr')#取得標籤資料
return getData(data)
def dataurl():
url="https://www.cwb.gov.tw/V7/forecast/UVI/UVI.htm"
http="https://www.cwb.gov.tw"
response=requests.get(url)
soup = BeautifulSoup(response.text,"html.parser")
data=soup.find('div',{'class':'UVITWmap'})["style"].split("url(")[1][:-1]
return request.urlopen(http+data)
def main():
Country('台北市')
return
if __name__ == '__main__':
main() |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# Time : 2017-08-25 14:33
# Author : MrFiona
# File : summary_optparse.py
# Software: PyCharm Community Edition
"""
模块optparse使用类OptionParser来作为命令行选项的解析器;下面是该类的方法:
1、OptionParser(self, prog=None, usage=None, description=None, epilog=None, option_list=None,
option_class=<class optparse.Option>, version=None, conflict_handler='error', formatter=None, add_help_option=True)
构造函数__init__(),用于创建一个命令行选项解析器实例;其中,参数:
description:
usage : 描述当前脚本程序的用法字符串;显示该用法之前,格式"%prog"将被格式化成当前脚本程序的名称
prog : 默认为当前脚本程序的名称 os.path.basename(sys.argv[0])
description : 当前脚本程序的简单描述、摘要、大纲;它会被显示在命令行选项的帮助之前
epilog : 当前脚本程序的简单描述、摘要、大纲;它会被它会被显示在命令行选项的帮助之后
conflict_handler : 命令行选项冲突处理器;比如,当命令行选项重复时,该如何让处理;可选值:error、resolve
add_help_option : 是否自动生成帮助信息;True:是; False:否; 默认值是True
option_list : 当前脚本程序的命令行选项列表;这个选项列表在standard_options_list中选项添加之后,
但是在版本和帮助选项添加之前;可以手工创建该列表,该列表中的元素都使用函数make_option()生成
例如 : option_list=[make_option("-f","--file",action="store",type="string",dest="filename"), ...]
option_class : 在使用函数add_option()添加命令行选项到解析器时使用的类;默认为optparse.Option类
version : 打印版本的信息
formatter : 帮助信息格式;有两种格式:IndentedHelpFormatter和TitledHelpFormatter;
其中,参数prog在usage和version中使用格式字符串"%prog"代替os.path.basename(sys.argv[0])
2、OptionParser.add_option(self, *args, **kwargs)
该函数用于添加命令行选项;参数*args用于传递一个命令行选项的列表;**kwargs用于传递该命令行选项的属性;有几种用法:
1、parser.add_option(self, optparse.Option):直接添加一个命令行选项类的实例
2、parser.add_option(self, option_list):直接添加一个命令行选项列表;option_list=[make_option(), ...]
3、parser.add_option(*opt_str, ..., kwarg=val, ...)
常用的是第三种;这种用法的函数原型如下:
optparse.OptionParser.add_option(short_option[, long_option], action="store", type="store", dest=None, nargs=1,
default=None, help="help text", metavar="");其中,参数如下:
description:
short_option : 短选项字符串;例如,"-f"、"-X"
long_option : 长选项字符串;例如,"--file"、"--start";长选项和短选项共同构成可选参数*args或*opt_str
action : 行为字符串;它指示optparse当解析到一个命令行选项时该如何处理;可选值store、store_true、store_false、store_const、
append、append_const、count、callback、help、version;默认值是store,表示把命令行参数保存到options对象中
type : 当action的值为存储方式时,type表示存储的数据的类型;有string、int、long、float、complex、choice
dest : 当action的值为存储方式时,dest表示用于存储数据(当前命令行选项附加的参数值)的变量,它会成为函数parse_args()返回的options对象的属性,
通过"对象名.属性名"的方式引用;如果不指定dest参数的值,则使用当前命令行选项的长选项名作为dest参数的缺省值和options对象的属性,
来存储当前命令行选项的附加参数值;如果当前命令行选项没有指定长选项,则使用短选项名作为dest参数的缺省值和options对象的属性,
来存储当前命令行选项的附加参数值
nargs : 指定当前命令行选项应该需要接受的附加参数值的个数;默认值为1;多个参数值之间用空格隔开;当在命令行为该选项输入的附加参数值的个数多于
nargs指定的个数时,则值取用前面的nargs个;当在命令行上为该选项输入的附加参数值的个数少于nargs所指定的个数时,则会报错;
如果nargs>1,则python解释器会把这nargs个参数值组装成一个元组(tuple),然后把这个元组传递给当前程序使用
default : 当action的值为存储方式时,default用于指定dest表示的属性变量的缺省值,即,当前命令行选项附加的参数的缺省值
help : 当前命令行选项的帮助、说明信息
metavar:占位字符串;用于在输出帮助信息时,代替当前命令行选项的附加参数的值进行输出;例如:"-f FILE --file FILE";这个例子中,字符串"FILE"就是metavar的值
例如 : add_option("-f", "--file", action="store", type="string", dest="fileName", default="file.txt", help="save host info", metavar="FILE");
当调用parse_args()函数之后,会返回一个options对象,dest参数的值"fileName"将作为options对象的属性名使用,即:options.fileName;同时把当前命令行选项的
附加参数值保存在该属性中,即:options.fileName="file.txt"
3、(options,args) = optparse.OptionParser.parse_args(self, args=None, values=None)
该函数用于解析命令行参数;其中,参数:
description:
args : 用于传递需要被解析的命令行选项列表;默认是sys.argv[1:]
values : 用于传递命令行选项的附加参数值的对象;是optparse.Values类的对象;
返回值:是一个包含(options,args)对的tuple
args : 所有被处理的参数之后的剩余参数
4、optparse.OptionParser.has_option(self, opt_str):
该函数用于判断OptionParser实例是否有一个名为opt_str的命令行选项;返回值:True-有; False-无;
5、optparse.OptionParser.get_option(self, opt_str):
该函数用于获取命令行选项opt_str的实例;若没有该选项,则返回None;
6、optparse.OptionParser.remove_option(self, opt_str):
该函数用于移除命令行选项opt_str;若OptionParser对象中存在命令行选项opt_str,则移除,否则抛出ValueError异常;
若存在多个opt_str的选项,则所有相关的选项都变成无效;
7、optparse.OptionParser.destroy() : 该函数用于销毁OptionParser对象实例;
"""
import sys
import optparse
parser = optparse.OptionParser(usage="usage: %prog [options] arg1 arg2 .....", version="1.0",
description="This is optparse example code")
def doStop(option, opt_str, value, parser): # 最小化定义,不需要接收参数;
print("option:", option)
print("opt_str", opt_str)
print("value:", value)
print("stopping the web server ......")
print("largs:", parser.largs)
print("rargs:", parser.rargs)
def doStart(option, opt_str, value, parser, *args, **kwargs): # 最大化定义,需要接收参数;
print("option:", option)
print("opt_str", opt_str)
print("value:", value)
print("*args:", args)
print("*kwargs:", kwargs)
print("starting the web server ......")
print("largs:", parser.largs)
print("rargs:", parser.rargs)
parser.add_option("--start", action="callback", callback=doStart, callback_args=("192.168.0.253", 3307),
callback_kwargs={"user": "user", "pswd": "pwd"}, nargs=3, default=None, metavar="START")
parser.add_option("--stop", action="callback", callback=doStop, default=None, metavar="STOP")
def doStart(option, opt_str, value, parser, *args, **kwargs):
print("option:", option)
print("opt_str", opt_str)
print("value:", value)
print("*args:", args)
print("*kwargs:", kwargs)
print("starting the web server ......")
print("largs:", parser.largs)
print("rargs:", parser.rargs)
def doStop(option, opt_str, value, parser):
print("option:", option)
print("opt_str", opt_str)
print("value:", value)
print("stopping the web server ......")
print("largs:", parser.largs)
print("rargs:", parser.rargs)
def Main(argc, argv):
strUsage = "Usage: %prog [option] args"
parser = optparse.OptionParser(usage=strUsage, description="this program is used for study")
parser.add_option("-f", "--file", action="store", type="string", dest="fileName", help="configation file",
metavar="FILE")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=True)
parser.add_option("-q", "--quit", action="store_false", dest="verbose", default=False)
parser.add_option("-H", "--host", action="store", type="string", dest="strHost", nargs=3, default="127.0.0.1",
help="Remote Host IP(IP1 IP2 IP3)", metavar="IP")
parser.add_option("-p", "--port", action="store", type="int", dest="iPort", nargs=3, default="3306",
help="Remote Host Port(Port1 Port2 Port3)", metavar="PORT")
parser.add_option("-u", "--user", action="store", type="string", dest="strUserName", default="admin",
help="Your user name", metavar="UserName")
parser.add_option("-P", "--password", action="store", type="string", dest="strPassword", default="admin",
help="Your password", metavar="Password")
parser.add_option("-i", "--input", action="store", type="string", dest="strInput", default="input.txt",
help="as a file for input", metavar="FILE")
parser.add_option("--start", action="callback", callback=doStart, callback_args=("192.168.0.253", 3307),
callback_kwargs={"user": "user", "pswd": "pwd"}, nargs=3, default=None, metavar="START")
parser.add_option("--stop", action="callback", callback=doStop, default=None, metavar="STOP")
parser.add_option("-a", action="store_const", dest="const_value", default='default_const_value',
const='store_const default value', help='Set a constant const value')
parser.add_option("-c", action="store_true", dest="boolean_switch", default=True, help='Set a switch to True')
parser.add_option("-d", action="store_false", dest="boolean_switch", default=False, help='Set a switch to False')
parser.add_option("-e", action="append", dest="collection", default=[], help='Add repeated values to a list')
parser.add_option("-W", action="append_const", dest="const_collection", const='value-1-to-append', default=[],
help='Add different values to list')
parser.add_option("-D", action="append_const", dest="const_collection", const='value-2-to-append', default=[],
help='Add different values to list')
if (argc < 1):
parser.error("invalid argument for commond line;")
parser.print_help()
sys.exit(1)
# 命令行参数解析和处理
(options, largs) = parser.parse_args()
if (options.fileName):
print("read this file ......")
if (options.strHost):
print("connect to remote host ......")
print("---------options-----------")
print(options)
print("---------largs-----------")
print(largs)
print("---------fileName-----------,", options.fileName)
print("---------strHost-----------,", options.strHost)
print("---------iPort-----------,", options.iPort, type(options.iPort))
print("---------largs-----------,", parser.largs)
print("---------rargs-----------,", parser.rargs)
print("---------values-----------,", parser.values)
print 'store_const:\t', options.const_value
print 'boolean_switch:\t', options.boolean_switch
print 'collection:\t', options.collection
print 'const_collection:\t', options.const_collection
print 'const_collection:\t', options.const_collection
if __name__ == "__main__":
argv = sys.argv[1:]
argc = len(argv)
Main(argc, argv) |
from __future__ import absolute_import
import re
import json
import requests
from apis.base import BaseAPI, APIException
# Flickr API: https://www.flickr.com/services/api/
class FlickrPhotoAPI(BaseAPI):
url_format = "https://farm{farm}.staticflickr.com/{server}/{id}_{secret}_m.jpg"
per_page = 10
def __init__(self, user, max_imgs):
super(FlickrPhotoAPI, self).__init__(user, max_imgs)
self.window_cur = 0
self.get_api_key()
self.load()
def get_api_key(self):
r = requests.get("https://flickr.com/photos/")
if r.status_code == 200:
m = re.search(b'root.YUI_config.flickr.api.site_key = "(.+?)";', r.content)
if m:
self.api_key = m.group(1)
return
raise APIException("Can't get API key from flickr")
def load(self, page=1):
r = requests.get("https://api.flickr.com/services/rest", params={
"method": "flickr.photos.search" if self.user else "flickr.photos.getRecent",
"format": "json",
"user_id": self.user,
"api_key": self.api_key,
"per_page": self.per_page,
"page": page
})
self.page = page
if r.status_code != 200:
self.max_imgs = 0
raise StopIteration()
else:
content = r.content.replace(b"jsonFlickrApi(", b"").rstrip(b")")
self.json = json.loads(content)
if self.json['stat'] == 'ok':
self.total = int(self.json['photos']['total'])
self.items = self.json['photos']['photo']
self.window_cur = 0
else:
raise APIException(self.json['message'])
def __next__(self):
if self.cur >= self.max_imgs or self.cur >= self.total:
raise StopIteration()
if self.window_cur >= len(self.items):
self.load(self.page+1)
item = self.items[self.window_cur]
self.window_cur += 1
self.cur += 1
return self.url_format.format(**item) |
from aiohttp import web
from docker import Client
from .launcher import Launcher
async def create_container_handler(request):
launch_config = request.app['launch_config']
hostname = await request.app['launcher'].launch(**launch_config)
headers = {'Access-Control-Allow-Origin': '*'}
return web.Response(text=hostname, headers=headers)
def create_app():
app = web.Application()
configure_app(app)
add_routes(app)
return app
def configure_app(app):
app['launcher'] = Launcher(docker_client=Client(), max_workers=4)
app['launch_config'] = dict(image='pyepics-workshop-ioc',
network='pyepics-workshop',
tty=True)
def add_routes(app):
app.router.add_post('/new', create_container_handler)
def main():
app = create_app()
web.run_app(app)
|
# Generated by Django 3.1.3 on 2020-11-08 16:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20201105_1100'),
]
operations = [
migrations.AddField(
model_name='game',
name='end_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='game',
name='start_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='guestprofile',
name='guest_id',
field=models.CharField(blank=True, max_length=64, null=True),
),
migrations.AddField(
model_name='guestprofile',
name='lost',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='guestprofile',
name='won',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userprofile',
name='lost',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userprofile',
name='won',
field=models.IntegerField(default=0),
),
]
|
# -*- coding: utf-8 -*-
import itertools
import time
from flask import Flask, jsonify, request, render_template
import os
import io
import firebase_admin
from firebase_admin import db
from firebase_admin import credentials, firestore
from google.cloud import storage
from PIL import Image
import requests
from io import BytesIO
import urllib.request as req
from PIL import Image
import glob
import json
import base64
import threading
from datetime import datetime
# from sqlalchemy import create_engine
# from flask_mysqldb import MySQL
from products import Product
from users import User
from datetime import date
from ast import literal_eval
from decimal import Decimal
import pysftp
import sys
import uuid
app = Flask(__name__)
# application.config['MYSQL_HOST'] = 'aad4ceauedwkx7.ctvp1qfizfsm.us-east-2.rds.amazonaws.com'
# application.config['MYSQL_USER'] = 'root'
# application.config['MYSQL_PASSWORD'] = '27031984As'
# application.config['MYSQL_DB'] = 'ebdb'
# mysql = MySQL(application)
cred = credentials.Certificate("dom-marino-ws-firebase-adminsdk-x049u-1128490a39.json")
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "dom-marino-ws-firebase-adminsdk-x049u-1128490a39.json"
firebase_admin.initialize_app(cred)
# firebase_admin.initialize_app(cred, {
# 'databaseURL': 'https://dom-marino-ws.firebaseio.com/',
# 'storageBucket': 'dom-marino-ws.appspot.com'
# })
# category = 'non_alcoholic_beverages'
# category = 'alcoholic_beverages'
# category = 'beers'
# category = 'candy_pizzas'
# category = 'flapts'
category = 'gourmet_pizzas'
# category = 'pizza_edges'
# category = 'traditional_pizzas'
# category = 'wines'
document_id = ''
imageurl = ''
thumbnailurl = ''
client = storage.Client()
# https://console.cloud.google.com/storage/browser/[bucket-id]/
bucket = client.get_bucket('dom-marino-ws.appspot.com')
# Then do other things...
# blob = bucket.get_blob('categories/beers/beer_icon.png')
# # print(blob.download_as_string())
# blob.upload_from_file('pictures/products/')
# blob2 = bucket.blob('products/' + category + document_id)
# blob2.upload_from_filename(filename='teste.txt')
# imgurl ="https://i.pinimg.com/originals/68/7c/ec/687cec1f523e3ee2b666c38e055a4d6d.png"
# req.urlretrieve(imgurl, "soft_drinks.png")
db = firestore.client()
todo_ref = db.collection('todos')
categories_ref = db.collection('categories')
users_ref = db.collection('users')
orders_ref = db.collection('orders')
non_alcoholic_beverages_ref = db.collection('products').document('non_alcoholic_beverages').collection(
'non_alcoholic_beverages')
alcoholic_beverages_ref = db.collection('products').document('alcoholic_beverages').collection('alcoholic_beverages')
beers_ref = db.collection('products').document('beers').collection('beers')
candy_pizzas_ref = db.collection('products').document('candy_pizzas').collection('candy_pizzas')
flapts_ref = db.collection('products').document('flapts').collection('flapts')
gourmet_pizzas_ref = db.collection('products').document('gourmet_pizzas').collection('gourmet_pizzas')
pizza_edges_ref = db.collection('products').document('pizza_edges').collection('pizza_edges')
traditional_pizzas_ref = db.collection('products').document('traditional_pizzas').collection('traditional_pizzas')
wines_ref = db.collection('products').document('wines').collection('wines')
promotions_ref = db.collection('products').document('promotions').collection('promotions')
two_flavored_pizzas_ref = db.collection('products').document('two_flavored_pizzas').collection('two_flavored_pizzas')
users_ref = db.collection('users')
working_hours_ref = db.collection('workinghours')
# get all the png files from the current folder
# for infile in glob.glob("*.png"):
# for infile in glob.glob("soft_drinks.png"):
# im = Image.open(infile)
# # don't save if thumbnail already exists
# if infile[0:2] != "T_":
# # prefix thumbnail file with T_
# im.save("T_" + infile, "PNG")
#
# data = {}
# with open('T_soft_drinks.png', mode='rb') as file:
# img = file.read()
# data['img'] = base64.encodebytes(img).decode("utf-8")
#
# print(json.dumps(data))
accounts = [
{'name': "Billy", 'balance': 450.0},
{'name': "Kelly", 'balance': 250.0}
]
all_categories = []
all_non_alcoholic_beverages = []
all_alcoholic_beverages = []
all_beers = []
all_pizza_edges = []
all_flapts = []
all_candy_pizzas = []
all_gourmet_pizzas = []
all_traditional_pizzas = []
all_wines = []
all_promotions = []
all_two_flavored_pizzas = []
all_orders = []
all_users = []
# HTTP GET
# HTTP POST
# HTTP PUT
# HTTP DELETE
# HTTP PATCH
# @app.route('/heroes', methods=['POST'])
# def create_hero():
# req = request.json
# hero = SUPERHEROES.push(req)
# return jsonify({'id': hero.key}), 201
# Create a callback on_snapshot function to capture changes
def on_snapshot(doc_snapshot, changes, read_time):
for doc in doc_snapshot:
print(u'Received document snapshot: {}'.format(doc.id))
def on_categories_snapshot(doc_snapshot, changes, read_time):
# print("entrou")
# print("on_categories_snapshot, closed=", cat_watch._closed)
global all_categories
all_categories = []
for doc in doc_snapshot:
category = doc.to_dict()
all_categories.append(category)
# print(category["description"])
def on_nab_snapshot(doc_snapshot, changes, read_time):
global all_non_alcoholic_beverages
all_non_alcoholic_beverages = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = non_alcoholic_beverages_ref.document(doc.id).collection('images').stream()
prices = non_alcoholic_beverages_ref.document(doc.id).collection('prices').stream()
price_broto_stream = non_alcoholic_beverages_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = non_alcoholic_beverages_ref.document(doc.id).collection('prices').document(
'inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_non_alcoholic_beverages.append(product)
def on_ab_snapshot(doc_snapshot, changes, read_time):
global all_alcoholic_beverages
all_alcoholic_beverages = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = alcoholic_beverages_ref.document(doc.id).collection('images').stream()
prices = alcoholic_beverages_ref.document(doc.id).collection('prices').stream()
price_broto_stream = alcoholic_beverages_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = alcoholic_beverages_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_alcoholic_beverages.append(product)
def on_beers_snapshot(doc_snapshot, changes, read_time):
global all_beers
all_beers = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = beers_ref.document(doc.id).collection('images').stream()
prices = beers_ref.document(doc.id).collection('prices').stream()
price_broto_stream = beers_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = beers_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_beers.append(product)
def on_candy_pizzas_snapshot(doc_snapshot, changes, read_time):
global all_candy_pizzas
all_candy_pizzas = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = candy_pizzas_ref.document(doc.id).collection('images').stream()
prices = candy_pizzas_ref.document(doc.id).collection('prices').stream()
price_broto_stream = candy_pizzas_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = candy_pizzas_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_candy_pizzas.append(product)
def on_flapts_snapshot(doc_snapshot, changes, read_time):
global all_flapts
all_flapts = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = flapts_ref.document(doc.id).collection('images').stream()
prices = flapts_ref.document(doc.id).collection('prices').stream()
price_broto_stream = flapts_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = flapts_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_flapts.append(product)
def on_pizza_edges_snapshot(doc_snapshot, changes, read_time):
global all_pizza_edges
all_pizza_edges = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = pizza_edges_ref.document(doc.id).collection('images').stream()
prices = pizza_edges_ref.document(doc.id).collection('prices').stream()
price_broto_stream = pizza_edges_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = pizza_edges_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_pizza_edges.append(product)
def on_traditional_pizzas_snapshot(doc_snapshot, changes, read_time):
global all_traditional_pizzas
all_traditional_pizzas = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = traditional_pizzas_ref.document(doc.id).collection('images').stream()
prices = traditional_pizzas_ref.document(doc.id).collection('prices').stream()
price_broto_stream = traditional_pizzas_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = traditional_pizzas_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_traditional_pizzas.append(product)
def on_gourmet_pizzas_snapshot(doc_snapshot, changes, read_time):
global all_gourmet_pizzas
all_gourmet_pizzas = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = gourmet_pizzas_ref.document(doc.id).collection('images').stream()
prices = gourmet_pizzas_ref.document(doc.id).collection('prices').stream()
price_broto_stream = gourmet_pizzas_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = gourmet_pizzas_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_gourmet_pizzas.append(product)
def on_wines_snapshot(doc_snapshot, changes, read_time):
global all_wines
all_wines = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = wines_ref.document(doc.id).collection('images').stream()
prices = wines_ref.document(doc.id).collection('prices').stream()
price_broto_stream = wines_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = wines_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_wines.append(product)
def on_promotions_snapshot(doc_snapshot, changes, read_time):
global all_promotions
all_promotions = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = promotions_ref.document(doc.id).collection('images').stream()
prices = promotions_ref.document(doc.id).collection('prices').stream()
price_broto_stream = promotions_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = promotions_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_promotions.append(product)
def on_two_flavored_pizzas_snapshot(doc_snapshot, changes, read_time):
global all_two_flavored_pizzas
all_two_flavored_pizzas = []
for doc in doc_snapshot:
product = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
images = two_flavored_pizzas_ref.document(doc.id).collection('images').stream()
prices = two_flavored_pizzas_ref.document(doc.id).collection('prices').stream()
price_broto_stream = two_flavored_pizzas_ref.document(doc.id).collection('prices').document('broto').get()
price_inteira_stream = two_flavored_pizzas_ref.document(doc.id).collection('prices').document('inteira').get()
price_broto = price_broto_stream.to_dict().get('price', '')
price_inteira = price_inteira_stream.to_dict().get('price', '')
if price_broto != None:
product.update({'price_broto': price_broto})
else:
product.update({'price_broto': None})
if price_inteira != None:
product.update({'price_inteira': price_inteira})
else:
product.update({'price_inteira': None})
for image in images:
product.update({'image': image.to_dict().get('url', '')})
# doc.collection('images').on_snapshot(on_nab_images_snapshot)
size_prices = {'prices': {}}
for size_id in prices:
size_prices['prices'][size_id.id] = size_id.to_dict()
product.update(size_prices)
all_two_flavored_pizzas.append(product)
def on_users_snapshot(doc_snapshot, changes, read_time):
global all_users
all_users = []
for doc in doc_snapshot:
user = doc.to_dict()
# print(u'Document snapshot: {}'.format(doc.to_dict()))
# product = Product.from_dict(doc.to_dict())
all_users.append(user)
# Watch the document
cat_watch = categories_ref.on_snapshot(on_categories_snapshot)
nab_watch = non_alcoholic_beverages_ref.on_snapshot(on_nab_snapshot)
ab_watch = alcoholic_beverages_ref.on_snapshot(on_ab_snapshot)
beers_watch = beers_ref.on_snapshot(on_beers_snapshot)
candy_pizzas_watch = candy_pizzas_ref.on_snapshot(on_candy_pizzas_snapshot)
flapts_watch = flapts_ref.on_snapshot(on_flapts_snapshot)
pizza_edges_watch = pizza_edges_ref.on_snapshot(on_pizza_edges_snapshot)
traditional_pizzas_watch = traditional_pizzas_ref.on_snapshot(on_traditional_pizzas_snapshot)
gourmet_pizzas_watch = gourmet_pizzas_ref.on_snapshot(on_gourmet_pizzas_snapshot)
wines_watch = wines_ref.on_snapshot(on_wines_snapshot)
promotions_watch = promotions_ref.on_snapshot(on_promotions_snapshot)
two_flavored_pizzas_watch = two_flavored_pizzas_ref.on_snapshot(on_two_flavored_pizzas_snapshot)
users_watch = users_ref.on_snapshot(on_users_snapshot)
def monitor_watches():
global cat_watch
global nab_watch
global ab_watch
global beers_watch
global candy_pizzas_watch
global flapts_watch
global pizza_edges_watch
global traditional_pizzas_watch
global gourmet_pizzas_watch
global wines_watch
global promotions_watch
global two_flavored_pizzas_watch
global users_watch
threading.Timer(30.0, monitor_watches).start()
if cat_watch._closed:
cat_watch = categories_ref.on_snapshot(on_categories_snapshot)
if nab_watch._closed:
nab_watch = non_alcoholic_beverages_ref.on_snapshot(on_nab_snapshot)
if ab_watch._closed:
ab_watch = alcoholic_beverages_ref.on_snapshot(on_ab_snapshot)
if beers_watch._closed:
beers_watch = beers_ref.on_snapshot(on_beers_snapshot)
if candy_pizzas_watch._closed:
candy_pizzas_watch = candy_pizzas_ref.on_snapshot(on_candy_pizzas_snapshot)
if flapts_watch._closed:
flapts_watch = flapts_ref.on_snapshot(on_flapts_snapshot)
if pizza_edges_watch._closed:
pizza_edges_watch = pizza_edges_ref.on_snapshot(on_pizza_edges_snapshot)
if traditional_pizzas_watch._closed:
traditional_pizzas_watch = traditional_pizzas_ref.on_snapshot(on_traditional_pizzas_snapshot)
if gourmet_pizzas_watch._closed:
gourmet_pizzas_watch = gourmet_pizzas_ref.on_snapshot(on_gourmet_pizzas_snapshot)
if wines_watch._closed:
wines_watch = wines_ref.on_snapshot(on_wines_snapshot)
if promotions_watch._closed:
promotions_watch = promotions_ref.on_snapshot(on_promotions_snapshot)
if two_flavored_pizzas_watch._closed:
two_flavored_pizzas_watch = two_flavored_pizzas_ref.on_snapshot(on_two_flavored_pizzas_snapshot)
if users_watch._closed:
users_watch = users_ref.on_snapshot(on_users_snapshot)
monitor_watches()
def setImageUrl(url):
global imageurl
imageurl = url
@app.route("/")
def home():
return render_template("index.html")
@app.route("/accounts", methods=["GET"])
def getAccounts():
return jsonify(accounts)
# @app.route("/img", methods=["GET"])
# def getImages():
# return json.dumps(data)
@app.route("/account/<id>", methods=["GET"])
def getAccount(id):
id = int(id) - 1
return jsonify(accounts[id])
@app.route("/add", methods=['GET', 'POST'])
def create():
"""
create() : Add document to Firestore collection with request body
Ensure you pass a custom ID as part of json body in post request
e.g. json={'id': '1', 'title': 'Write a blog post'}
"""
try:
data = {
u'name': u'Los Angeles',
u'state': u'CA',
u'country': u'USA'
}
# id = request.json['id']
id = todo_ref.document().id
user = User(uid=u'Tokyo', register_date=u'21/09/2019', main_address_id=u'main_address_id', image=u'image',
name=u'Tokyo', phone=None, email=u'Japan')
todo_ref.add(user.to_dict())
# todo_ref.document(id).set(request.json)
# todo_ref.document(id).set(data)
# todo_ref.add(data)
return jsonify({"success": True}), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route("/makeorder", methods=['POST'])
def makeorder():
# dd/mm/YY
# today = datetime.now()
# # today = today.strftime("%d-%m-%Y")
# today = today.strftime("%Y-%m-%d %H:%M:%S")
today = request.get_json().get('date_time')
startdata = {
u'id': u'{0}'.format(today[:-9])
}
thisOrderRef = orders_ref.document(today[:-9])
thisOrderRef.set(startdata)
thisOrderRef = thisOrderRef.collection(today[:-9])
order_ref_for_update = thisOrderRef
# print("hoje é: {0}".format(today))
try:
coupon_id = request.get_json().get('coupon_id')
delivery = request.get_json().get('delivery')
payment_method = request.get_json().get('payment_method')
payment_change = request.get_json().get('payment_change')
delivery_address = request.get_json().get('delivery_address')
total = request.get_json().get('total')
userId = request.get_json().get('userId')
id = thisOrderRef.document().id
products_id = request.get_json().get('products_id')
# print(products_id)
data = {
u'coupon_id': u'{}'.format(coupon_id),
u'dateTime': u'{}'.format(today),
u'id': u'{}'.format(id),
u'delivery': u'{}'.format(delivery),
u'payment_method': u'{}'.format(payment_method),
u'payment_change': u'{}'.format(payment_change),
u'delivery_address': u'{}'.format(delivery_address),
u'total': u'{}'.format(total),
u'userId': u'{}'.format(userId)
}
thisOrderRef.document(id).set(data)
thisOrderRef = thisOrderRef.document(id).collection('products_id')
#product.update({'price_broto': None})
# product_dict = literal_eval(products_id)
json_acceptable_string = products_id.replace("'", "\"")
product_dict = json.loads(json_acceptable_string)
# print(product_dict)
total_paid = Decimal('0.00')
for key, value in product_dict.items():
product = value
thisId = thisOrderRef.document().id
paid_price = 0.00
pizza_edge_price = 0.00
pizza_edge_description = ""
product_description = ""
img_url = ""
all_items = []
if product.get("isTwoFlavoredPizza") == 0:
if product.get("product1_category") == "beers":
all_items.extend(all_beers)
elif product.get("product1_category") == "alcoholic_beverages":
all_items.extend(all_alcoholic_beverages)
elif product.get("product1_category") == "flapts":
all_items.extend(all_flapts)
elif product.get("product1_category") == "non_alcoholic_beverages":
all_items.extend(all_non_alcoholic_beverages)
elif product.get("product1_category") == "promotions":
all_items.extend(all_promotions)
elif product.get("product1_category") == "wines":
all_items.extend(all_wines)
elif product.get("product1_category") == "candy_pizzas":
all_items.extend(all_candy_pizzas)
elif product.get("product1_category") == "gourmet_pizzas":
all_items.extend(all_gourmet_pizzas)
elif product.get("product1_category") == "traditional_pizzas":
all_items.extend(all_traditional_pizzas)
if "pizza" not in product.get("product1_category"):
for item in all_items:
if item.get('id') == product.get("product_id"):
paid_price = item.get("price")
product_description = item.get('description')
img_url = item.get('image')
else:
if product.get("pizza_edge_id") != "null":
for pizza_edge in all_pizza_edges:
if pizza_edge.get('id') == product.get("pizza_edge_id"):
pizza_edge_description = pizza_edge.get("description")
if product.get("size") == "Broto":
pizza_edge_price = pizza_edge.get("price_broto")
if product.get("size") == "Inteira":
pizza_edge_price = pizza_edge.get("price_inteira")
for item in all_items:
if item.get('id') == product.get("product_id"):
product_description = item.get('description')
img_url = item.get('image')
if product.get("size") == "Broto":
paid_price = item.get("price_broto")
if product.get("size") == "Inteira":
paid_price = item.get("price_inteira")
new_price = Decimal(paid_price)+Decimal(pizza_edge_price)
paid_price = round(new_price, 2)
else:
product1_price = 0.00
product2_price = 0.00
if product.get("pizza_edge_id") != "null":
for pizza_edge in all_pizza_edges:
if pizza_edge.get('id') == product.get("pizza_edge_id"):
pizza_edge_description = pizza_edge.get("description")
if product.get("size") == "Broto":
pizza_edge_price = pizza_edge.get("price_broto")
if product.get("size") == "Inteira":
pizza_edge_price = pizza_edge.get("price_inteira")
if product.get("product1_category") == "traditional_pizzas":
all_items.extend(all_traditional_pizzas)
elif product.get("product1_category") == "gourmet_pizzas":
all_items.extend(all_gourmet_pizzas)
elif product.get("product1_category") == "candy_pizzas":
all_items.extend(all_candy_pizzas)
for product1 in all_items:
if product1.get('id') == product.get("product_id"):
product_description = product1.get('description')
img_url = "https://storage.googleapis.com/dom-marino-ws.appspot.com/categories/custom/two_flavored_pizza_image.png"
if product.get("size") == "Broto":
product1_price = product1.get("price_broto")
if product.get("size") == "Inteira":
product1_price = product1.get("price_inteira")
all_items = []
if product.get("product2_category") == "traditional_pizzas":
all_items.extend(all_traditional_pizzas)
elif product.get("product2_category") == "gourmet_pizzas":
all_items.extend(all_gourmet_pizzas)
elif product.get("product2_category") == "candy_pizzas":
all_items.extend(all_candy_pizzas)
for product2 in all_items:
if product2.get('id') == product.get("product2_id"):
product_description += " + "+product2.get('description')
if product.get("size") == "Broto":
product2_price = product2.get("price_broto")
if product.get("size") == "Inteira":
product2_price = product2.get("price_inteira")
product1_decimal_price = Decimal(product1_price)
product2_decimal_price = Decimal(product2_price)
max_price = max(product1_decimal_price, product2_decimal_price)
pizza_edge_decimal_price = Decimal(pizza_edge_price)
max_price_decimal = Decimal(max_price)
new_price = max_price_decimal+pizza_edge_decimal_price
paid_price = new_price
thisProduct = {
u'category': u'{}'.format(product.get("category")),
u'notes': u'{}'.format(product.get("notes")),
u'id': u'{}'.format(thisId),
u'paid_price': u'{}'.format(paid_price),
u'pizza_edge_id': u'{}'.format(product.get("pizza_edge_id")),
u'pizza_edge_description': u'{}'.format(pizza_edge_description),
u'pizza_edge_paid_price': u'{}'.format(pizza_edge_price),
u'product1_category': u'{}'.format(product.get("product1_category")),
u'product2_category': u'{}'.format(product.get("product2_category")),
u'product2_id': u'{}'.format(product.get("product2_id")),
u'product_description': u'{}'.format(product_description),
u'product_id': u'{}'.format(product.get("product_id")),
u'product_image_url': u'{}'.format(img_url),
u'quantity': u'{}'.format(product.get("quantity")),
u'isTwoFlavoredPizza': u'{}'.format(product.get("isTwoFlavoredPizza")),
u'size': u'{}'.format(product.get("size"))
}
total_paid += Decimal(paid_price)*Decimal(product.get("quantity"))
thisOrderRef.document(thisId).set(thisProduct)
delivery_tax_ref_snapshot = db.collection('delivery_tax').document('current_tax').get()
tax = delivery_tax_ref_snapshot.to_dict()['value']
if delivery_address.lower() != "retirada":
total_paid += Decimal(tax)
order_ref_for_update.document(id).update({u'total': str(round(total_paid, 2))})
return jsonify({"success": True}), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/get_working_hours', methods=['GET'])
def get_working_hours():
week_day = request.args.get('weekDay')
docSnapshot = working_hours_ref.document(week_day).get()
return jsonify(docSnapshot.to_dict()), 200
@app.route('/list_user_orders', methods=['GET'])
def list_user_orders():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
all_orders=[]
user_id = request.args.get('id')
docSnapshot = orders_ref.stream()
for doc in docSnapshot:
data_stream = orders_ref.document(doc.id).collection(doc.id).where(u'userId', u'==', user_id).stream()
for order in data_stream:
thisOrder = order.to_dict()
tempMap = dict()
products_stream = orders_ref.document(doc.id).collection(doc.id).document(order.id).collection("products_id").stream()
# thisProductDict = {}
for product in products_stream:
thisProduct = product.to_dict()
# thisOrder["products_id"][product.id] = thisProduct
tempMap[product.id] = thisProduct
thisOrder.update({"products_id": tempMap})
# print(thisProduct)
all_orders.append(thisOrder)
try:
# Check if ID was passed to URL query
return jsonify(all_orders), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_categories', methods=['GET'])
def list_categories():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_categories) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
cat_id = request.args.get('id')
if cat_id:
category = object
for element in all_categories:
if element['id'] == cat_id:
category = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(category), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_categories), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/create_user', methods=['GET', 'POST'])
def create_user():
# user_id = users_ref.document().id
# print("Posted file: {}".format(request.files['image_file']))
# file = request.files['image_file']
# files = {'file': file.read()}
uid = request.form['uid']
name = request.form['name']
email = request.form['email']
phone = request.form['phone']
street = request.form['street']
streetNumber = request.form['streetNumber']
neighborhood = request.form['neighborhood']
city = request.form['city']
imgUrl = request.form['img_url']
isRegisterComplete = request.form['isRegisterComplete']
# print('entrou2', file=sys.stdout, flush=True)
if request.form['hasImageFile'] == "True":
image = request.files['image_file'].read()
print('imagem não é nula', file=sys.stdout, flush=True)
# print(u'Received document snapshot: {}'.format(doc.id))
# session = ftplib.FTP_TLS('157.230.167.73', 'root', '27031984As')
# # file = open('kitten.jpg', 'rb') # file to send
# session.storbinary('STOR /var/www/powermemes.com/dommarino/{}.jpg'.format(uid), image) # send the file
# image.close() # close file and FTP
# session.quit()
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
with pysftp.Connection(host='157.230.167.73', username='root', password='27031984As', cnopts=cnopts) as sftp:
print("Connection succesfully stablished ... ")
# Switch to a remote directory
if not sftp.isdir('/var/www/powermemes.com/htdocs/dommarino/userimg/{}'.format(uid)):
sftp.mkdir('/var/www/powermemes.com/htdocs/dommarino/userimg/{}'.format(uid))
sftp.cwd('/var/www/powermemes.com/htdocs/dommarino/userimg/{}'.format(uid))
img_id = str(uuid.uuid1())
print('imge id={}'.format(img_id))
f = sftp.open('/var/www/powermemes.com/htdocs/dommarino/userimg/{0}/{1}.png'.format(uid, img_id), 'wb')
f.write(image)
# sftp.put(image.file.name, '/var/www/powermemes.com/dommarino/{}.jpg'.format(uid))
# print(products_id)
imgUrl = "https://powermemes.com/dommarino/userimg/{0}/{1}.png".format(uid, img_id)
elif imgUrl=="":
imgUrl="https://powermemes.com/dommarino/userimg/avatar.png"
data = {
u'uid': u'{}'.format(uid),
u'name': u'{}'.format(name),
u'email': u'{}'.format(email),
u'phone': u'{}'.format(phone),
u'street': u'{}'.format(street),
u'streetNumber': u'{}'.format(streetNumber),
u'neighborhood': u'{}'.format(neighborhood),
u'city': u'{}'.format(city),
u'image_url': u'{}'.format(imgUrl),
u'isRegisterComplete': u'{}'.format(isRegisterComplete),
}
users_ref.document(uid).set(data)
print(data)
return jsonify({"success": True}), 200
# print(image)
@app.route('/list_users', methods=['GET'])
def list_users():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
try:
# Check if ID was passed to URL query
user_id = request.args.get('uid')
if user_id:
user_snapshot = users_ref.document(user_id).get()
user = user_snapshot.to_dict()
return jsonify(user), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_users), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_non_alcoholic_beverages', methods=['GET'])
def list_non_alcoholic_beverages():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_non_alcoholic_beverages) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
nab_id = request.args.get('id')
if nab_id:
nab = object
for element in all_non_alcoholic_beverages:
if element['id'] == nab_id:
nab = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(nab), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_non_alcoholic_beverages), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_alcoholic_beverages', methods=['GET'])
def list_alcoholic_beverages():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_alcoholic_beverages) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
ab_id = request.args.get('id')
if ab_id:
ab = object
for element in all_alcoholic_beverages:
if element['id'] == ab_id:
ab = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(ab), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_alcoholic_beverages), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_beers', methods=['GET'])
def list_beers():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_beers) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
beer_id = request.args.get('id')
if beer_id:
beer = object
for element in all_beers:
if element['id'] == beer_id:
beer = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(beer), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_beers), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_candy_pizzas', methods=['GET'])
def list_candy_pizzas():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_candy_pizzas) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
candypizza_id = request.args.get('id')
if candypizza_id:
candypizza = object
for element in all_candy_pizzas:
if element['id'] == candypizza_id:
candypizza = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(candypizza), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_candy_pizzas), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_flapts', methods=['GET'])
def list_flapts():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_flapts) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
flapts_id = request.args.get('id')
if flapts_id:
flapt = object
for element in all_flapts:
if element['id'] == flapts_id:
flapt = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(flapt), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_flapts), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_pizza_edges', methods=['GET'])
def list_pizza_edges():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_pizza_edges) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
pizza_edge_id = request.args.get('id')
if pizza_edge_id:
pizza_edge = object
for element in all_pizza_edges:
if element['id'] == pizza_edge_id:
pizza_edge = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(pizza_edge), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_pizza_edges), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_products', methods=['GET'])
def list_products():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
# while len(all_traditional_pizzas) == 0:
# time.sleep(1)
try:
# Check if ID was passed to URL query
product_id = request.args.get('id')
category_id = request.args.get('category_id')
all_items = []
if category_id == "beers":
while len(all_beers) == 0:
time.sleep(1)
all_items.extend(all_beers)
elif category_id == "alcoholic_beverages":
while len(all_alcoholic_beverages) == 0:
time.sleep(1)
all_items.extend(all_alcoholic_beverages)
elif category_id == "flapts":
while len(all_flapts) == 0:
time.sleep(1)
all_items.extend(all_flapts)
elif category_id == "non_alcoholic_beverages":
while len(all_non_alcoholic_beverages) == 0:
time.sleep(1)
all_items.extend(all_non_alcoholic_beverages)
elif category_id == "promotions":
while len(all_promotions) == 0:
time.sleep(1)
all_items.extend(all_promotions)
elif category_id == "wines":
while len(all_wines) == 0:
time.sleep(1)
all_items.extend(all_wines)
elif category_id == "candy_pizzas":
while len(all_candy_pizzas) == 0:
time.sleep(1)
all_items.extend(all_candy_pizzas)
elif category_id == "gourmet_pizzas":
while len(all_gourmet_pizzas) == 0:
time.sleep(1)
all_items.extend(all_gourmet_pizzas)
elif category_id == "traditional_pizzas":
while len(all_traditional_pizzas) == 0:
time.sleep(1)
all_items.extend(all_traditional_pizzas)
elif category_id == "pizza_edges":
while len(all_pizza_edges) == 0:
time.sleep(1)
all_items.extend(all_pizza_edges)
elif category_id == "two_flavored_pizzas":
while len(all_two_flavored_pizzas) == 0:
time.sleep(1)
all_items.extend(all_two_flavored_pizzas)
if product_id:
product = object
for element in all_items:
if element['id'] == product_id:
product = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(product), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_items), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_traditional_pizzas', methods=['GET'])
def list_traditional_pizzas():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_traditional_pizzas) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
trad_pizza_id = request.args.get('id')
if trad_pizza_id:
trad_pizza = object
for element in all_traditional_pizzas:
if element['id'] == trad_pizza_id:
trad_pizza = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(trad_pizza), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_traditional_pizzas), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_gourmet_pizzas', methods=['GET'])
def list_gourmet_pizzas():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_gourmet_pizzas) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
gourmet_pizza_id = request.args.get('id')
if gourmet_pizza_id:
gourmet_pizza = object
for element in all_gourmet_pizzas:
if element['id'] == gourmet_pizza_id:
gourmet_pizza = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(gourmet_pizza), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_gourmet_pizzas), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_wines', methods=['GET'])
def list_wines():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_wines) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
wine_id = request.args.get('id')
if wine_id:
wine = object
for element in all_wines:
if element['id'] == wine_id:
wine = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(wine), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_wines), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_promotions', methods=['GET'])
def list_promotions():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_promotions) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
promotion_id = request.args.get('id')
if promotion_id:
promotion = object
for element in all_promotions:
if element['id'] == promotion_id:
promotion = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(promotion), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_promotions), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/list_two_flavored_pizzas', methods=['GET'])
def list_two_flavored_pizzas():
"""
read() : Fetches documents from Firestore collection as JSON
todo : Return document that matches query ID
all_todos : Return all documents
"""
while len(all_two_flavored_pizzas) == 0:
time.sleep(1)
try:
# Check if ID was passed to URL query
two_flavored_pizza_id = request.args.get('id')
if two_flavored_pizza_id:
two_flavored_pizza = object
for element in all_two_flavored_pizzas:
if element['id'] == two_flavored_pizza_id:
two_flavored_pizza = element
# nab = non_alcoholic_beverages_ref.document(nab_id).get()
return jsonify(two_flavored_pizza), 200
else:
# all_nab = [doc.to_dict() for doc in non_alcoholic_beverages_ref.stream()]
return jsonify(all_two_flavored_pizzas), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/user', methods=['GET'])
def retrieve_user():
try:
id = request.args.get('id')
user_snapshot = users_ref.document(id).get()
user = user_snapshot.to_dict()
orders_id_snapshot = users_ref.document(id).collection("orders_id").stream()
user['orders_id'] = {}
for order in orders_id_snapshot:
# print(order.to_dict())
# orders.append(order.to_dict())
user['orders_id'][order.id] = {}
user['orders_id'][order.id]["id"] = order.id
# print(user)
# pedidos = dict(itertools.zip_longest(*[iter(orders)] * 2, fillvalue=""))
# print(orders)
# user.update(orders)
return jsonify(user), 200
# return jsonify({"success": True}), 200
except Exception as e:
print('error')
return f"An Error Occured: {e}"
@app.route('/update', methods=['POST', 'PUT'])
def update():
"""
update() : Update document in Firestore collection with request body
Ensure you pass a custom ID as part of json body in post request
e.g. json={'id': '1', 'title': 'Write a blog post today'}
"""
try:
id = request.json['id']
todo_ref.document(id).update(request.json)
return jsonify({"success": True}), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route('/delete', methods=['GET', 'DELETE'])
def delete():
"""
delete() : Delete a document from Firestore collection
"""
try:
# Check for ID in URL query
todo_id = request.args.get('id')
todo_ref.document(todo_id).delete()
return jsonify({"success": True}), 200
except Exception as e:
return f"An Error Occured: {e}"
@app.route("/account", methods=['GET', 'POST'])
def addAccount():
# import requests
#
# # Data
# data = {
# 'data1': 'something',
# 'data2': 'otherthing'
# }
#
# # Custom headers
# headers = {
# 'content-type': 'multipart/form-data'
# }
#
# # Get response from server
# response = requests.post('http://localhost/', data=data, headers=headers)
#
# # If you care about the response
# print(response.json())
# with application.app_context():
# cur = mysql.connection.cursor()
# cur.execute('INSERT INTO users(id, name, email, phone) VALUES (%s, %s, %s, %s)',
# ('27siod037581984', 'Rogério Pires', 'l2othujk7857jkrs2703@gmail.com', '+5518988021682'))
# mysql.connection.commit()
# cur.close()
return jsonify(accounts[1])
port = int(os.environ.get('PORT', 8080))
if __name__ == '__main__':
# application.run(debug=True)#, host='0.0.0.0',port=5000)
app.run(threaded=True, host='0.0.0.0', port=port)
|
__author__ = 'Li Bai'
"""the available data are loaded from nordhavn3_april.csv, and the explanatory variables are weather forecasts
('temperature', 'humidity', 'DNI (Direct normal irradiance)', 'windspeed') and
the output is heat load. Considering the time-frequency domain analysis, 'Day sin',
'Day cos', 'Week sin', 'Week cos' are added as input features for feature selection
with PACF analysis, it can be seen that all the weather related variables are 3-lagged correlated. therefore,
all the above variables are considered at time t, t-1 and t-2; However, for the heat load for day-ahead forecast,
only the heat load before t-24 can be known ahead of time, thus only heat load at t-24 and t-25 are used as inputs
The feature selection results in this dataset are ['heat-lag-0','heat-lag-24', 'heat-lag-25', 'temperature-lag-0', 'temperature-lag-1',
'temperature-lag-2', 'humidity-lag-2', 'windspeed-lag-2','DNI-lag-2', 'Day cos-lag-2']; such results are used for
artifiical intelligence methods (SK-learn package and Tensorflow packages) and online learning methods"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from sklearn.metrics import mean_absolute_error, mean_squared_error
import tensorflow as tf
import time
import matplotlib as mpl
import matplotlib.pyplot as plt
from helpers import add_day_week_features, data_gene, feature_selection, LAG_DICT, SHIFT_HEAT, LAG_DICT1, SHIFT_HEAT1
df1_ww = pd.read_csv ('D:\\OneDrive\\OneDrive - Danmarks Tekniske '
'Universitet\\energydataDTU\\venv\\data_gene'
'\\nornhavn3_april'
'.csv',sep=',', index_col=0)
# load files
df1_ww.index=pd.to_datetime(df1_ww.index)
df1_ww['windspeed']=np.sqrt(df1_ww['windx'].to_numpy()**2+df1_ww[
'windy'].to_numpy()**2)
df_ww_copy = df1_ww.copy()
df_ww_copy=pd.DataFrame(columns=['heat', 'temperature', 'humidity',
'DNI','windspeed'], index=df1_ww.index)
df_ww_copy['heat']=df1_ww['Counter [MWh]']
df_ww_copy['temperature']=df1_ww['temperature']
df_ww_copy['DNI']=df1_ww['solarflux']
df_ww_copy['windspeed']=df1_ww['windspeed']
df_ww_copy['humidity']=df1_ww['humidity']
# plot PACF or ACF and plot FFT spectrum
# plot_acf_or_pacf(df_ww_copy)
# fft_analy(df_ww_copy)
# # heat load comes from space heating!
# fall = df_ww_copy[(df_ww_copy.index >= '2018-1-21 00:00:00')
# & (df_ww_copy.index < '2020-07-05 00:00:00')]
df=add_day_week_features(df_ww_copy)
df1_new=data_gene(LAG_DICT, SHIFT_HEAT, df)
index_start=24-df1_new.index[0].hour
index_end=1+df1_new.index[-1].hour
df1_new=df1_new.iloc[index_start:-index_end,:]
df1_new_copy=df1_new.copy()
# '2018-01-21 00:00:00' ~ '2020-07-05 23:00:00'
# select the heating season data
start0=datetime.datetime(2018,1,22,0,0,0);
end0=datetime.datetime(2018,5,31,23,0,0);
start1=datetime.datetime(2018,9,24,0,0,0);
end1=datetime.datetime(2019,5,31,23,0,0);
start2=datetime.datetime(2019,9,24,0,0,0);
end2=datetime.datetime(2020,5,31,23,0,0);
date_gene0 = pd.date_range(start=start0, end=end0, freq='H').tolist()
date_gene1 = pd.date_range(start=start1, end=end1, freq='H').tolist()
date_gene2 = pd.date_range(start=start2, end=end2, freq='H').tolist()
dates = date_gene0 + date_gene1 + date_gene2
# 3:1 for train and test
df1_new=df1_new.loc[dates,:]
N_total = len(df1_new)
N_train=int(int(N_total*0.75/24)*24);
train_df=df1_new[0:N_train]
# normalization!!!
train_df_copy=train_df.copy()
train_df_mean=train_df_copy.mean()
train_df_std=train_df_copy.std()
train_df_copy=(train_df_copy-train_df_mean)/train_df_std
y_train=train_df_copy['heat-lag-0']
train_df_copy.pop('heat-lag-0')
X_train=train_df_copy.copy()
feature_set_lasso, feature_set_xtree, feature_set_info=feature_selection(df1_new, X_train, y_train, alpha=0.05,
n_estimators=20)
# ========================feature selection results========================
columns=['heat-lag-0','heat-lag-24', 'heat-lag-25', 'temperature-lag-0', 'temperature-lag-1',
'temperature-lag-2', 'humidity-lag-2', 'windspeed-lag-2','DNI-lag-2', 'Day cos-lag-2']
# ===========the selected columns are saved to "nordhavn_terminal3_selected.csv"================
# df1_out=df1_new[columns]
# df1_out.to_csv("nordhavn_terminal3_selected.csv")
#
|
import sys
from collections import OrderedDict
import pandas as pd
import numpy as np
import operator as op
import tensorflow as tf
from .common import constructNetwork
from .common import constructNetworkWithoutDropout
from .common import convertDateColsToInt
from .common import arrayToText
from .common import constructCleverHansModel
from .common import loadConfig
def main(test_path, model_path):
loadConfig('./config')
test_data = pd.read_csv(test_path)
print(("test_drivers data size %d\n"%test_data.shape[0]))
print("Raw data loaded successfully.....\n")
# Intepret params
param_path = model_path+'.param'
param_file = open(param_path)
lines = param_file.readlines()
X_mean_train = []
X_std_train = []
expandMap = OrderedDict()
i = 0
while i < len(lines):
line = lines[i].strip()
if line == '':
i+=1
continue
if line == 'X_mean:':
i+=1
line = lines[i].strip()
X_mean_train = [float(x.strip()) for x in line.split(',') if x.strip()]
i+=1
continue
if line == 'X_std:':
i+=1
line = lines[i].strip()
X_std_train = [float(x.strip()) for x in line.split(',') if x.strip()]
i+=1
continue
tokens = line.split(':')
k = tokens[0].strip()
if len (tokens) == 1:
expandMap[k] = []
i+=1
continue
v = tokens[1].strip()
if v == '':
expandMap[k] = []
else:
expandMap[k] = [x.strip() for x in v.split(',,,') if x.strip()]
i+=1
param_file.close()
Y_LABEL = 'Default'
KEYS = [i for i in list(test_data.keys()) if i != Y_LABEL]
TEST_SIZE = test_data.shape[0]
N_POSITIVE = test_data[Y_LABEL].sum()
N_INPUT = test_data.shape[1] - 1
N_CLASSES = 2
print("Variables loaded successfully...\n")
print(("Number of predictors \t%s" %(N_INPUT)))
print(("Number of classes \t%s" %(N_CLASSES)))
print(("TESTING_SIZE \t%s"%(TEST_SIZE)))
print(("Number of positive instances \t%s" %(N_POSITIVE)))
print("\n")
print("Metrics displayed:\tPrecision\n")
date_cols = ['OrDate','FirstPayment']
test_data = convertDateColsToInt(test_data, date_cols)
print("Start expanding the test data: ")
nan_cols = test_data[test_data.columns[test_data.isnull().any()]]
test_data.drop(nan_cols.columns, axis=1, inplace=True)
cat = test_data[list(expandMap.keys())]
print(("Expand cat data "+str(cat.columns.values)+"\n"))
num = test_data.drop(cat.columns, axis=1)
data = pd.DataFrame()
for i in cat.columns:
if len(expandMap[i]) == 0:
continue
tmp = pd.DataFrame(0, index = np.arange(test_data.shape[0]), columns = expandMap[i])
tmp1 = pd.get_dummies(cat[i], prefix=str(i), drop_first=True)
for col in tmp1.columns:
if col in tmp.columns:
tmp[col] = tmp1[col]
data = pd.concat([data, tmp], axis=1)
test_data = pd.concat([num,data,nan_cols], axis=1).reset_index(drop=True)
print("Expand categorical features.\n")
print("After expanding: \n")
ori_KEYS = KEYS
N_INPUT = test_data.shape[1] - 1
KEYS = [i for i in list(test_data.keys()) if i != Y_LABEL]
print(("Number of predictors \t%s" %(N_INPUT)))
print(KEYS)
X_test = test_data[KEYS].get_values()
y_test = test_data[Y_LABEL].get_values()
X_test = (X_test - X_mean_train)/ X_std_train
#------------------------------------------------------------------------------
# Neural net construction
# Tf placeholders
X = tf.placeholder(tf.float32, [None, N_INPUT])
y = tf.placeholder(tf.int64, [None])
dropout_keep_prob = tf.placeholder(tf.float32)
pred, layerList = constructNetwork(X,dropout_keep_prob,N_INPUT,N_CLASSES)
# Loss and optimizer
logits = pred
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)) # softmax loss
correct_prediction = tf.equal(tf.argmax(pred, 1), y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
confusion = tf.confusion_matrix(y, tf.argmax(pred,1), 2)
print("Net built successfully...\n")
print("Starting training...\n")
#------------------------------------------------------------------------------
# Training
# Launch session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, model_path+'.ckpt')
print("Testing...\n")
#------------------------------------------------------------------------------
# Testing
#test_acc = sess.run(accuracy, feed_dict={X: X_test, y: y_test, dropout_keep_prob:1.})
#
#test_conf = sess.run(confusion, feed_dict={X: X_test, y: y_test, dropout_keep_prob:1.})
test_conf = np.zeros((2,2))
indices = np.arange(0, X_test.shape[0])
for batch_indices in np.array_split(indices, 100):
batch_xs = X_test[batch_indices, :]
batch_ys = y_test[batch_indices]
test_conf += sess.run(confusion, feed_dict = {X:batch_xs, y: batch_ys, dropout_keep_prob:1.})
accuracy = (test_conf[0][0] + test_conf[1][1])/float(np.sum(test_conf))
print(("Testing accuracy: %.3f" % accuracy))
print(test_conf)
sess.close()
print("Session closed!")
if __name__ == '__main__':
test_path = sys.argv[1]
model_path = sys.argv[2]
main(test_path, model_path)
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
class Utils():
def __init__(self):
pass
def get_otsu_threshold(self, image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_not(gray)
thresh = cv2.threshold(gray, 0, 255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
return thresh
def align_image(self, thresh):
#thresh = get_otsu_threshold(image)
shape = thresh.shape
zeros = np.zeros((thresh.shape[0], 500))
thresh = np.hstack([zeros,thresh,zeros])
shape = thresh.shape
zeros = np.zeros((500, thresh.shape[1]))
thresh = np.vstack([zeros,thresh,zeros])
#show(thresh)
coords = np.column_stack(np.where(thresh.T > 0))
#print(coords.shape)
rows,cols = thresh.shape[:2]
[vx,vy,x,y] = cv2.fitLine(coords, cv2.DIST_WELSCH,0,0.01,0.1)
lefty = int((-x*vy/vx) + y)
righty = int(((cols-x)*vy/vx)+y)
#cv2.line(thresh,(cols-1,righty),(0,lefty),(255,255,255),10)
angle = (vy/vx)*180/3.14
(h, w) = thresh.shape
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(thresh, M, (w, h),flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
#rotated = imutils.rotate(thresh, -angle)
return rotated
def crop_signature_fast(self,image):
h,w = image.shape
xmin = 0
xmax = w-1
ymin = 0
ymax = h-1
for i in range(w):
if np.sum(image[:,i]) > image.shape[1] * 0.85:
#print(np.sum(image[:,i]))
xmin = i
break
for i in range(w-1, 0, -1):
if np.sum(image[:,i]) > image.shape[1] * 0.85:
#print(np.sum(image[:,i]))
xmax = i
break
for i in range(h-1, 0, -1):
if np.sum(image[i]) > image.shape[0] * 0.85:
#print(np.sum(image[i]))
ymax = i
break
for i in range(h):
if np.sum(image[i]) > image.shape[0] * 0.85:
#print(np.sum(image[i]))
ymin = i
break
crop_sig = image[ymin:ymax , xmin:xmax]
return crop_sig
def pad(self, img):
new_img = np.zeros((150,550))
if img.shape[0] == 140:
k1 = int((550-img.shape[1])/2)
k2 = int((550-img.shape[1])/2 + img.shape[1]%2)
new_img[5:-5,k1:-k2] = img
else:
k1 = int((150-img.shape[0])/2)
k2 = int((150-img.shape[0])/2 + img.shape[0]%2)
new_img[k1:-k2,5:-5] = img
return new_img
def resize(self,img):
p1 = img.shape[0]/140
p2 = img.shape[1]/540
if p1>p2:
p2 = int(img.shape[1]/p1)
p1 = 140
else:
p1 = int(img.shape[0]/p2)
p2 = 540
resized = cv2.resize(img, (p2,p1), interpolation = cv2.INTER_AREA)
resized = self.pad(resized)
return resized
def process(self,img):
img = self.get_otsu_threshold(img)
img = self.align_image(img)
img = self.crop_signature_fast(img)
img = self.resize(img)
return img
def show_images_sidebyside(self, im1, im2, cmap = 'gray'):
fig, ax = plt.subplots(1,2)
fig.set_figheight(10)
fig.set_figwidth(10)
ax[0].imshow(im1, cmap = cmap);
ax[1].imshow(im2, cmap = cmap);
plt.show()
|
from django import template
register = template.Library()
from video.models import Video
@register.inclusion_tag('video/tags/lattest_videos.html', takes_context=True)
def lattest_videos(context):
lattest_videos = Video.objects.all().filter( galleries__is_public=True).order_by('created')[:6]
context['lattest_videos'] = lattest_videos
return context
|
import logging
from datetime import datetime
import requests
from common import db_session
from configuration import API_KEY
from .models import TrainActivity
BASE_V2_URL = 'http://realtime.mbta.com/developer/api/v2'
logger = logging.getLogger(__name__)
def format_mbta_request_url(api_key: str):
return '{}/predictionsbyroutes?api_key={}&routes=Green-b&format=json'.format(BASE_V2_URL, api_key)
def get_and_insert_current_predictions_by_routes() -> (int, int):
"""Queries the MBTA and upserts an observation row for each datapoint
Returns: (number of new rows inserted, number of rows upserted)
"""
list_of_train_activities = get_current_predictions_by_routes()
with db_session(autoflush=False, echo=True) as session:
with session.no_autoflush:
for activity in list_of_train_activities:
session.merge(activity)
new_records_count = len(session.new)
updated_records_count = len(list_of_train_activities) - new_records_count
return new_records_count, updated_records_count
def get_current_predictions_by_routes(api_key=API_KEY) -> [TrainActivity]:
"""Queries the MBTA api and returns a list with an activity data point for each vehicle"""
# r = requests.get('http://realtime.mbta.com/developer/api/v2/predictionsbyroute?api_key=wX9NwuHnZU2ToO7GmGR9uw&route=Green-B&direction=1&format=json')
request_url = format_mbta_request_url(api_key)
r = requests.get(request_url)
json = r.json()['mode'][0]['route'][0]
# top level data for each datapoint
route_id = json.get('route_id')
route_name = json.get('route_name')
eastbound = json.get('direction')[1]
direction_id = eastbound.get('direction_id')
direction_name = eastbound.get('direction_name')
trips = eastbound.get('trip')
list_of_train_activities: [TrainActivity] = []
for trip in trips:
trip_data = trip.get('vehicle')
trip_data['route_id'] = route_id
trip_data['route_name'] = route_name
trip_data['direction_id'] = direction_id
trip_data['direction_name'] = direction_name
trip_data.update({'trip_id': trip.get('trip_id')})
trip_data.update({'trip_name': trip.get('trip_name')})
trip_data.update({'trip_headsign': trip.get('trip_headsign')})
trip_data.update({'timestamp': datetime.fromtimestamp(int(trip_data.get('vehicle_timestamp')))})
list_of_train_activities.append(TrainActivity(**trip_data))
return list_of_train_activities
def get_observations_since(high_water_timestamp=0) -> ([TrainActivity], int):
with db_session() as session:
observations = session.query(TrainActivity).filter(TrainActivity.vehicle_timestamp > high_water_timestamp)
all_obs = observations.all()
if not all_obs:
return None, None
new_high_water_mark = max([obs.vehicle_timestamp for obs in observations])
session.expunge_all()
return all_obs, new_high_water_mark
|
# coding: utf-8
"""
The sum of the squares of the first ten natural numbers is,
1^(2) + 2^(2) + ... + 10^(2) = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^(2) = 55^(2) = 3025
Hence the difference between the sum of the squares of the first ten natural
numbers and the square of the sum is 3025 − 385 = 2640.
Find the difference between the sum of the squares of the first one hundred
natural numbers and the square of the sum.
From http://projecteuler.net/index.php?section=problems&id=6
"""
def problem006(min, max):
sum_of_squares = sum([i**2 for i in range(min, max + 1)])
square_of_sums = sum(range(min, max + 1)) ** 2
return square_of_sums - sum_of_squares
if __name__ == '__main__':
assert problem006(1, 10) == 2640
print problem006(1, 100)
|
# -*- coding:utf-8 -*-
import os
def getPlus(a, b):
k1 = len(str(a))
s1 = str(a)
k2 = len(str(b))
s2 = str(b)
print k1, type(s1), s1, " |--| ", k2, type(s2), s2
p = list()
k = 0
for item_b in s2[::-1]:
index = k
for item_a in s1[::-1]:
num = int(item_a) * int(item_b)
if len(p) == index:
p.append(num)
index += 1
continue
p[index] += num
index += 1
k += 1
print len(p), p
for x in range(len(p)):
if x == len(p) - 1:
p[x] = str(p[x])
continue
if p[x] / 10 == 0:
p[x] = str(p[x])
print x, type(p[x]), p[x]
continue
elif p[x] / 10 != 0:
m = p[x] / 10
p[x + 1] += m
p[x] = str(p[x] % 10)
res = "".join(p[::-1])
print len(res), res
return res
if __name__ == "__main__":
t = list([1, 2, 3])
print max(t), min(t)
print t
print type(20 % 10), 20 % 10
res = getPlus(str(999999999999999999999999), str(9996646168496898169999999))
print "res:", type(res), res
|
from selenium import webdriver
from selenium.common.exceptions import ElementClickInterceptedException, NoSuchElementException
from selenium.webdriver.common.keys import Keys
import time
from password import *
driver = webdriver.Chrome("C:\Chromedriver\chromedriver")
URL = "https://tinder.com/"
driver.get(URL)
driver.find_element_by_xpath("/html/body/div[1]/div/div[1]/div/main/div[1]/div/div/div/div/header/div/div[2]/div[2]/a").click()
main_page = driver.current_window_handle
print(main_page)
print(driver.title)
time.sleep(1)
driver.find_element_by_xpath("//button[@aria-label='Zaloguj się przez Facebooka']").click()
time.sleep(5)
# changing the handles to access login page
for handle in driver.window_handles:
if handle != main_page:
login_page = handle
# change the control to signin page
driver.switch_to.window(login_page)
driver.find_element_by_xpath("//button[@title='Akceptuj wszystkie']").click()
time.sleep(1)
driver.find_element_by_xpath("//input[@type='text']").send_keys(FACEBOOK_EMAIL)
driver.find_element_by_xpath("//input[@type='password']").send_keys(FACEBOOK_PASSWORD, Keys.ENTER)
driver.switch_to.window(main_page)
time.sleep(6)
driver.find_element_by_xpath("//button[@data-testid='allow']").click()
time.sleep(1)
driver.find_element_by_xpath("//button[@data-testid='allow']").click()
page = driver.find_element_by_tag_name('body')
for _ in range(100):
page.send_keys(Keys.ARROW_RIGHT)
time.sleep(3)
try:
driver.find_element_by_xpath("//button[@title='Wróć do Tindera']").click()
except NoSuchElementException:
try:
driver.find_element_by_xpath("/html/body/div[2]/div/div/div[2]/button[2]").click()
except NoSuchElementException:
continue
|
"""Helper set of functions to read in and parse multiple types of input sources."""
import sys
import os
import datetime
from bs4 import BeautifulSoup
from shapely.geometry.polygon import Polygon
def read(ftype, inDir, inSuffix, startTime, endTime):
"""
Determines the user-specified file type and parses it accordingly
Parameters
----------
ftype : string
The file type to process: segmotion, probsevere, or ryan
inDir : string
The input directory
inSuffix : string
The lowest subdirectory of the input directory
startTime : string
The earliest time to process
endTime : string
The latest time to process
Returns
-------
list
[stormCells, totNumCells, numTrackTimes, dates] - List containing dicts of all
storm cells, the number of cells, the number of files, and all valid dates
"""
if ftype == 'ryan': return readRyan(inDir, inSuffix, startTime, endTime)
elif ftype == 'segmotion': return readSegmotion(inDir, inSuffix, startTime, endTime)
elif ftype == 'probsevere': return readProbSevere(inDir, inSuffix, startTime, endTime)
def readRyan(inDir, inSuffix, startTime, endTime):
"""
Parses post-processed segmotion files (.data) from Ryan's original code
Parameters
----------
inDir : string
The input directory
inSuffix : string
The lowest subdirectory of the input directory
startTime : string
The earliest time to process
endTime : string
The latest time to process
Returns
-------
list
[stormCells, totNumCells, numTrackTimes, dates] - List containing dicts of all
storm cells, the number of cells, the number of files, and all valid dates
"""
numTrackTimes = 0
totNumCells = 0
stormCells = {}
dates = []
# Read in Ryan files
for root, dirs, files in os.walk(inDir):
if inSuffix != '' and not (files and not dirs and os.path.split(root)[-1] == inSuffix): continue
for trackFile in files:
if trackFile.endswith('.data'):
# Skip hidden files
if trackFile.startswith('._'): continue
# Check if file falls in date range
try:
fileDate = datetime.datetime.strptime(str(trackFile).split('_')[0], '%Y-%m-%d-%H%M%S')
except ValueError:
print('File ' + str(trackFile) + ' has an invalid name. Expected format YYYY-MM-DD-hhmmss_...')
continue
if not startTime <= fileDate < endTime:
continue
if fileDate.date() not in dates: dates.append(fileDate.date())
# Open file
f = open(root + '/' + trackFile)
lines = f.readlines()
f.close()
# Skip probSevere files
if int(lines[28].split()[0]) == 1:
print('\nWARNING: Unable to process storm objects from probSevere in Ryan format. Use "-t probsevere" instead.')
print(str(trackFile) + ' will be skipped.\n')
continue
print(trackFile)
numTrackTimes += 1
# Get Individual cell metadata
cells = lines[32::5]
numCells = len(cells)
for cell in cells:
cell = cell.split()
cellID = totNumCells
stormCells[cellID] = {'time':fileDate, 'lat':float(cell[0]), 'lon':float(cell[1]), 'latr':float(cell[3]),
'lonr':float(cell[4]), 'orientation':float(cell[8]), 'track':str(cell[9]) + '_' + str(fileDate.date()), 'old_track': str(cell[9])}
totNumCells += 1
return [stormCells, totNumCells, numTrackTimes, dates]
def readSegmotion(inDir, inSuffix, startTime, endTime):
"""
Parses raw segmotion .xml files
Parameters
----------
inDir : string
The input directory
inSuffix : string
The lowest subdirectory of the input directory
startTime : string
The earliest time to process
endTime : string
The latest time to process
Returns
-------
list
[stormCells, totNumCells, numTrackTimes, dates] - List containing dicts of all
storm cells, the number of cells, the number of files, and all valid dates
"""
numTrackTimes = 0
totNumCells = 0
stormCells = {}
dates = []
# Read in Segmotion files
for root, dirs, files in os.walk(inDir):
if inSuffix != '' and not (files and not dirs and os.path.split(root)[-1] == inSuffix): continue
for trackFile in files:
if trackFile.endswith('.xml'):
# Skip hidden files
if trackFile.startswith('._'): continue
# Check if file falls in date range
try:
fileDate = datetime.datetime.strptime(str(trackFile).split('.')[0], '%Y%m%d-%H%M%S')
except ValueError:
print('File ' + str(trackFile) + ' has an invalid name. Expected format YYYYMMDD-hhmmss.xml...')
continue
if not startTime <= fileDate < endTime:
continue
if fileDate.date() not in dates: dates.append(fileDate.date())
# Open file
f = open(root + '/' + trackFile)
lines = BeautifulSoup(f, 'html.parser').find_all('datacolumn')
f.close()
print(trackFile)
numTrackTimes += 1
numCells = len(lines[2].find_all('item'))
for i in range(0, numCells):
time = fileDate
latr = float(str(lines[4].find_all('item')[i]).split('"')[1])
lat = float(str(lines[5].find_all('item')[i]).split('"')[1])
lonr = float(str(lines[6].find_all('item')[i]).split('"')[1])
lon = float(str(lines[7].find_all('item')[i]).split('"')[1])
orientation = float(str(lines[12].find_all('item')[i]).split('"')[1])
track = str(lines[13].find_all('item')[i]).split('"')[1]
cellID = totNumCells
stormCells[cellID] = {'time': time, 'latr': latr, 'lat': lat, 'lonr': lonr, 'lon': lon,
'orientation': orientation, 'track': track + '_' + str(fileDate.date()), 'old_track': track}
totNumCells += 1
return [stormCells, totNumCells, numTrackTimes, dates]
def readProbSevere(inDir, inSuffix, startTime, endTime):
"""
Parses probSevere .ascii files
Parameters
----------
inDir : string
The input directory
inSuffix : string
The lowest subdirectory of the input directory
startTime : string
The earliest time to process
endTime : string
The latest time to process
Returns
-------
list
[stormCells, totNumCells, numTrackTimes, dates] - List containing dicts of all
storm cells, the number of cells, the number of files, and all valid dates
"""
numTrackTimes = 0
totNumCells = 0
stormCells = {}
dates = []
# Read in ProbSevere files
for root, dirs, files in os.walk(inDir):
if inSuffix != '' and not (files and not dirs and os.path.split(root)[-1] == inSuffix): continue
for trackFile in files:
if trackFile.endswith('.ascii'):
# Skip hidden files
if trackFile.startswith('._'): continue
# Check if file falls in date range
try:
date = str(trackFile).split('.')[0].split('_')[3]
time = str(trackFile).split('.')[0].split('_')[4]
fileDate = datetime.datetime.strptime(date + '_' + time, '%Y%m%d_%H%M%S')
except ValueError:
print('File ' + str(trackFile) + ' has an invalid name. Expected format SSEC_AWIPS_PROBSEVERE_YYYYMMDD_hhmmss.ascii...')
continue
if not startTime <= fileDate < endTime:
continue
if fileDate.date() not in dates: dates.append(fileDate.date())
# Open file
f = open(root + '/' + trackFile)
lines = f.readlines()
f.close()
print(trackFile)
numTrackTimes += 1
for line in lines:
if line.startswith('Valid:'): continue
data = str(line).split(':')
lats = list(map(float, data[7].split(',')[0::2]))
lons = list(map(float, data[7].split(',')[1::2]))
track = data[8]
latr = (max(lats) - min(lats)) / 2.
lonr = abs(max(lons) - min(lons)) / 2.
# Calculate centroid
points = []
for i in range(0, len(lats)):
points.append((lons[i], lats[i]))
poly = Polygon(points)
lon = poly.centroid.x
lat = poly.centroid.y
cellID = totNumCells
stormCells[cellID] = {'time': fileDate, 'latr': latr, 'lat': lat, 'lonr': lonr, 'lon': lon,
'orientation': 'NaN', 'track': track + '_' + str(fileDate.date()), 'old_track': track}
totNumCells += 1
return [stormCells, totNumCells, numTrackTimes, dates]
|
from django.conf.urls import url
from django.urls import include, path
from fichaArticulo import views as ficha_views
from . import views
urlpatterns = [
path('', include(([path('', ficha_views.fichaArticulo, name='fichaArticulo')],'fichaArticulo'), namespace='ficha')),
path(r'', ficha_views.fichaArticulo, name='fichaArticulo'),
url(r'^lista/$', views.lista, name='lista'),
url(r'^reserva/$', views.reserva, name='reserva'),
url(r'^editar/$', views.editar, name='editar'),
url(r'^editarFoto/$', views.editarFoto, name='editarFoto'),
url(r'^editarDes/$', views.editarDes, name='editarDes'),
url(r'^editarRes/$', views.editarRes, name='editarRes'),
url(r'^editarEst/$', views.editarEst, name='editarEst'),
url(r'^aceptarnombre/$', views.aceptarNombre, name='aceptarNombre'),
url(r'^cancelarnombre/$', views.cancelarNombre, name='cancelarNombre'),
url(r'^aceptardes/$', views.aceptarDes, name='aceptarDes'),
url(r'^aceptarest/$', views.aceptarEst, name='aceptarEst'),
url(r'^aceptarfoto/$', views.aceptarFoto, name='aceptarFoto'),
url(r'^aceptarres/$', views.aceptarRes, name='aceptarRes')
]
|
#
# MIT License
#
# Copyright (c) 2018 Matteo Poggi m.poggi@unibo.it
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
def get_disp(x):
disp = 0.3 * conv(x, 2, 3, 1, tf.nn.sigmoid)
return disp
def conv(x, num_out_layers, kernel_size, stride, activation_fn=tf.nn.elu):
p = np.floor((kernel_size - 1) / 2).astype(np.int32)
p_x = tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]])
return slim.conv2d(p_x, num_out_layers, kernel_size, stride, 'VALID', activation_fn=activation_fn)
def conv_block(x, num_out_layers, kernel_size):
conv1 = conv(x, num_out_layers, kernel_size, 1)
conv2 = conv(conv1, num_out_layers, kernel_size, 2)
return conv2
def maxpool(x, kernel_size):
p = np.floor((kernel_size - 1) / 2).astype(np.int32)
p_x = tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]])
return slim.max_pool2d(p_x, kernel_size)
def resconv(x, num_layers, stride):
do_proj = tf.shape(x)[3] != num_layers or stride == 2
shortcut = []
conv1 = conv(x, num_layers, 1, 1)
conv2 = conv(conv1, num_layers, 3, stride)
conv3 = conv(conv2, 4 * num_layers, 1, 1, None)
if do_proj:
shortcut = conv(x, 4 * num_layers, 1, stride, None)
else:
shortcut = x
return tf.nn.elu(conv3 + shortcut)
def resblock(x, num_layers, num_blocks):
out = x
for i in range(num_blocks - 1):
out = resconv(out, num_layers, 1)
out = resconv(out, num_layers, 2)
return out
def upconv(x, num_out_layers, kernel_size, scale):
upsample = upsample_nn(x, scale)
convs = conv(upsample, num_out_layers, kernel_size, 1)
return convs
def deconv(x, num_out_layers, kernel_size, scale):
p_x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]])
convs = slim.conv2d_transpose(p_x, num_out_layers, kernel_size, scale, 'SAME')
return convs[:,3:-1,3:-1,:]
def upsample_nn(x, ratio):
s = tf.shape(x)
h = s[1]
w = s[2]
return tf.image.resize_nearest_neighbor(x, [h * ratio, w * ratio])
|
# encoding: UTF-8
import main
from dal import base_dal
from test_main.constants import *
def test_delete_performance_report():
base_dal.delete_performance_report(YEAR, QUARTER)
if __name__ == '__main__':
main.setup_logging()
test_delete_performance_report()
|
def exercicio4():
print("Programa de calculo de média final")
media1 = int(input("Digite a média do primeiro bimestre: "))
media2 = int(input("Digite a média do segundo bimestre: "))
media3 = int(input("Digite a média do terceiro bimestre: "))
media4 = int(input("Digite a média do quarto bimestre: "))
def media_dos_bimestres():
totalPontos = media1 + media2 + media3 + media4
mediaFinal = totalPontos / 4
return mediaFinal
print (f'A média final é {media_dos_bimestres()}')
exercicio4() |
# Generated by Django 3.1.7 on 2021-03-07 15:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gamma', '0006_merge_20210304_0032'),
]
operations = [
migrations.AddField(
model_name='post',
name='header_image',
field=models.ImageField(blank=True, null=True, upload_to='images/'),
),
migrations.AlterField(
model_name='post',
name='measurement',
field=models.TextField(choices=[('N/A', 'N/A'), ('km', 'km'), ('m', 'm')]),
),
]
|
import csv
import subprocess
# stdout = subprocess.PIPE, stderr = subprocess.PIPE
subprocess.run(
["abaqus", "cae", "noGUI=./abaqusScript/autoParametric2DnoGUI.py"], shell=True)
print("*********************")
with open('force_output.csv', 'r') as file:
reader = csv.reader(file)
for row in reader:
for col in row:
print(float(col))
|
# Generated by Django 2.0 on 2018-01-24 07:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Coin',
fields=[
('coin_id', models.CharField(max_length=10, primary_key=True, serialize=False)),
('coin_name', models.CharField(max_length=50)),
('github_link', models.TextField()),
('official_site_link', models.TextField()),
('total_contributors', models.IntegerField()),
('active_contributors', models.IntegerField()),
('read_me_score', models.IntegerField(default=0)),
('issues_score', models.IntegerField(default=0)),
('issues_open', models.IntegerField()),
('issues_closed', models.IntegerField()),
('pr_score', models.IntegerField(default=0)),
('pr_open', models.IntegerField()),
('pr_closed', models.IntegerField()),
('pr_merged', models.IntegerField()),
],
),
]
|
# coding: utf-8
from flask import Flask, render_template
from flask import request
import funpy.app as funpy
app = Flask(__name__) #インスタンス生成
@app.route('/weather',methods=['GET', 'POST'])
def add_numbers():
lat = request.args.get('lat')
lng = request.args.get('lng')
num = funpy.api(lat,lng)
return render_template('index.html',message = num)
@app.route("/") #アプリケーション/indexにアクセスが合った場合
def index():
num = funpy.api(35.681167,139.767052)
return render_template('index.html',message = num) #/indexにアクセスが来たらtemplates内のindex.htmlが開きます
#ここがサーバーサイドからクライアントサイドへなにかを渡すときのポイントになります。
if __name__ == "__main__":
# webサーバー立ち上げ
app.run()
|
import os
from influxdb_client import InfluxDBClient, Point
from influxdb_client.client.write_api import SYNCHRONOUS
bucket = os.getenv("INFLUX_BUCKET")
host = os.getenv("INFLUX_HOST")
port = os.getenv("INFLUX_PORT")
org = os.getenv("INFLUX_ORG")
token = os.getenv("INFLUX_TOKEN")
class HiveData(object):
def __init__(self, host: str, port: int, bucket: str, token: str, org: str):
url = f"http://{host}:{port}"
self._bucket = bucket
self._client = InfluxDBClient(url=url, token=token, org=org)
self._write_api = self._client.write_api(write_options=SYNCHRONOUS)
self._query_api = self._client.query_api()
async def write_point(self, mac: str, weight: float, temp_in: float, temp_out: float) -> None:
p = (
Point("hivedata")
.tag("board", mac)
.field("weight", weight)
.field("temperature_inside", temp_in)
.field("temperature_outside", temp_out)
)
self._write_api.write(bucket=self._bucket, record=p)
|
from django.contrib import admin
from .models import Evento
from .models import Professores
from .models import Alunos, Cursos
admin.site.register(Evento)
admin.site.register(Professores)
admin.site.register(Alunos)
admin.site.register(Cursos)
|
"""
Tests Deploy CLI
"""
from subprocess import CalledProcessError, PIPE
from unittest import TestCase
from mock import patch, call
from samcli.lib.samlib.cloudformation_command import execute_command, find_executable
class TestExecuteCommand(TestCase):
def setUp(self):
self.args = ("--arg1", "value1", "different args", "more")
@patch("subprocess.check_call")
@patch("samcli.lib.samlib.cloudformation_command.find_executable")
def test_must_add_template_file(self, find_executable_mock, check_call_mock):
find_executable_mock.return_value = "mycmd"
check_call_mock.return_value = True
execute_command("command", self.args, "/path/to/template")
check_call_mock.assert_called_with(["mycmd", "cloudformation", "command"] +
["--arg1", "value1", "different args", "more",
"--template-file", "/path/to/template"])
@patch("sys.exit")
@patch("subprocess.check_call")
@patch("samcli.lib.samlib.cloudformation_command.find_executable")
def test_command_must_exit_with_status_code(self, find_executable_mock, check_call_mock, exit_mock):
find_executable_mock.return_value = "mycmd"
check_call_mock.side_effect = CalledProcessError(2, "Error")
exit_mock.return_value = True
execute_command("command", self.args, None)
exit_mock.assert_called_with(2)
class TestFindExecutable(TestCase):
@patch("subprocess.Popen")
@patch("platform.system")
def test_must_use_raw_name(self, platform_system_mock, popen_mock):
platform_system_mock.return_value = "Linux"
execname = "foo"
find_executable(execname)
self.assertEquals(popen_mock.mock_calls, [
call([execname], stdout=PIPE, stderr=PIPE)
])
@patch("subprocess.Popen")
@patch("platform.system")
def test_must_use_name_with_cmd_extension_on_windows(self, platform_system_mock, popen_mock):
platform_system_mock.return_value = "windows"
execname = "foo"
expected = "foo.cmd"
result = find_executable(execname)
self.assertEquals(result, expected)
self.assertEquals(popen_mock.mock_calls, [
call(["foo.cmd"], stdout=PIPE, stderr=PIPE)
])
@patch("subprocess.Popen")
@patch("platform.system")
def test_must_use_name_with_exe_extension_on_windows(self, platform_system_mock, popen_mock):
platform_system_mock.return_value = "windows"
execname = "foo"
expected = "foo.exe"
popen_mock.side_effect = [OSError, "success"] # fail on .cmd extension
result = find_executable(execname)
self.assertEquals(result, expected)
self.assertEquals(popen_mock.mock_calls, [
call(["foo.cmd"], stdout=PIPE, stderr=PIPE),
call(["foo.exe"], stdout=PIPE, stderr=PIPE)
])
@patch("subprocess.Popen")
@patch("platform.system")
def test_must_use_name_with_no_extension_on_windows(self, platform_system_mock, popen_mock):
platform_system_mock.return_value = "windows"
execname = "foo"
expected = "foo"
popen_mock.side_effect = [OSError, OSError, "success"] # fail on .cmd and .exe extension
result = find_executable(execname)
self.assertEquals(result, expected)
self.assertEquals(popen_mock.mock_calls, [
call(["foo.cmd"], stdout=PIPE, stderr=PIPE),
call(["foo.exe"], stdout=PIPE, stderr=PIPE),
call(["foo"], stdout=PIPE, stderr=PIPE),
])
@patch("subprocess.Popen")
@patch("platform.system")
def test_must_raise_error_if_executable_not_found(self, platform_system_mock, popen_mock):
platform_system_mock.return_value = "windows"
execname = "foo"
popen_mock.side_effect = [OSError, OSError, OSError, "success"] # fail on all executable names
with self.assertRaises(OSError) as ctx:
find_executable(execname)
expected = "Unable to find AWS CLI installation under following names: {}".format(["foo.cmd", "foo.exe", "foo"])
self.assertEquals(expected, str(ctx.exception))
self.assertEquals(popen_mock.mock_calls, [
call(["foo.cmd"], stdout=PIPE, stderr=PIPE),
call(["foo.exe"], stdout=PIPE, stderr=PIPE),
call(["foo"], stdout=PIPE, stderr=PIPE),
])
|
from Calculator import BasicArithmeticOperation0_1 as BAO
# import BasicArithmeticOperation0_1 as BAO
import numpy as np
import time
import matplotlib.pyplot as plt
def trapezium_area(top, base, height):
area = BAO.multi(1 / 2, (BAO.multi(BAO.add(top, base), height)))
return area
def integration_simp(equ, start, end, n):
h = (end-start)/(n)
x_values = np.arange(start, end+h, h)
area = 0
print("size : " + str(x_values.size))
for i in range(100):
if i in [0, 49, 98, 99]:
print("0", end="")
elif i in [24, 48, 74]:
print("5", end="")
elif i == 23:
print("2", end="")
elif i == 73:
print("7", end="")
elif i == 97:
print("1", end="")
else:
print("*", end="")
print()
y_values = np.zeros(x_values.size)
# print(y_values.size)
for i in range(x_values.size):
y_values[i] = eval(equ.replace("X", str(x_values[i])))
# print("-1 : " + str(y_values[-1]))
# print("4 sum : " + str((y_values[2:-2].sum())))
# print(y_values[2:-1])
first = y_values[0]
print("first : ", end = "")
print(first)
end = y_values[-1]
print("end : ", end="")
print(end)
twice = 2*y_values[2:-2:2]
print("twice : ", end="")
print(twice)
sum_twice = twice.sum()
print("sum_twice : ", end="")
print(sum_twice)
four_times = 4*y_values[1:-1:2]
print("four_times : ", end="")
print(four_times)
sum_four_times = four_times.sum()
print("sum_four_times : ", end="")
print(sum_four_times)
area = y_values[0] + y_values[-1] + 2*y_values[2:-2:2].sum() + 4*y_values[1:-1:2].sum()
return area / (3*n)
def integration_simp_check(equ, start, end, step):
x_values = np.arange(start, end, step)
area = 0
# print(x_values)
for i in range(100):
if i in [0, 49, 98, 99]:
print("0", end="")
elif i in [24, 48, 74]:
print("5", end="")
elif i == 23:
print("2", end="")
elif i == 73:
print("7", end="")
elif i == 97:
print("1", end="")
else:
print("*", end="")
print()
y_values = np.zeros(x_values.size)
# print(y_values.size)
for i in range(x_values.size):
y_values[i] = eval(equ.replace("X", str(x_values[i])))
# if i % (x_values.size // 100) == 0:
# print("*", end="")
for i in range(y_values.size - 2):
area += (y_values[i] + 4 * y_values[i + 1] + y_values[i + 2]) * step / 3
return area
def integration_trape(equ, start, end, step):
x_values = np.arange(start, end, step)
area = 0
# print(x_values)
for i in range(100):
if i in [0, 49, 98, 99]:
print("0", end="")
elif i in [24, 48, 74]:
print("5", end="")
elif i == 23:
print("2", end="")
elif i == 73:
print("7", end="")
elif i == 97:
print("1", end="")
else:
print("*", end="")
print()
y_values = np.zeros(x_values.size)
# print(y_values.size)
for i in range(x_values.size):
y_values[i] = eval(equ.replace("X", str(x_values[i])))
for i in range(y_values.size - 1):
area += trapezium_area(y_values[i], y_values[i + 1], step)
# if i % (x_values.size // 100) == 0:
# print("*", end="")
return area
# a = np.array([0, 1, 2, 3, 4])
# a_squared = a*a
# print(a_squared)
# a_2 = a_squared+2
# print(a_2)
# a_2_4 = a_2*4
# print(a_2_4)
#
# result = a_2[0] + a_2_4[1] + a_2[2] + a_2[1] + a_2_4[2] + a_2[3] + a_2[2] + a_2_4[3] + a_2[4]
# print(result/3)
# start_time = time.time()
# print("check\n" + str(integration_simp_check("X*X+2", 0, 5, 0.5)))
# end_time = time.time()
# print(end_time - start_time)
start_time = time.time()
print("simpton\n" + str(integration_simp("X*X", 1, 4, 10)))
end_time = time.time()
print(end_time - start_time)
start_time = time.time()
print(integration_trape("X*X", 0, 5, 0.001))
end_time = time.time()
print(end_time - start_time)
#
# x = np.zeros(10000) # x軸の値
# y1 = np.zeros(x.size) # y軸の値
#
# for i in range(x.size-2):
# y1[i] = integration_simp("X*X+2", 0, 5, 5/(i+1))
# # figureを生成する
# fig = plt.figure()
#
# # axをfigureに設定する
# ax = fig.add_subplot(1, 1, 1)
#
# # axesにplot
# ax.plot(x, y1, "-", linewidth=1)
#
# # 表示する
# plt.show() |
from django.db import models
from django.utils.text import slugify
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
class Blog(models.Model):
title = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True)
content = models.TextField()
date_posted = models.DateTimeField(default = timezone.now())
category = models.ForeignKey('blog.Category',on_delete = models.CASCADE)
author = models.ForeignKey(User, on_delete = models.CASCADE)
def save(self,*args,**kwargs):
self.slug=slugify(self.title)
super(Blog, self).save(*args,**kwargs)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('view_post', kwargs={'slug': self.slug})
class Category(models.Model):
title = models.CharField(max_length=100, db_index=True)
slug = models.SlugField(max_length=100, db_index=True)
def save(self,*args,**kwargs):
self.slug=slugify(self.title)
super(Category, self).save(*args,**kwargs)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('home') |
import os
import json
def store_string_xml(xml_results, field_values, field_name):
'''Format values for writing xml output'''
for field_number in range(0, len(field_values)):
field_number_name = str(field_number).zfill(2)
k = field_name + field_number_name
xml_results[k] = field_values[field_number]
def write_ispyb_xml(filename, full_command_line, write_directory, xml_results):
'''Write items in the _xml_results into an XML file to be stored in
ISPyB'''
xml_template = os.path.join(os.environ['FAST_EP_ROOT'],
'lib', 'templates', 'ispyb.xml')
phs_stat_fom_template = os.path.join(
os.environ['FAST_EP_ROOT'], 'lib', 'templates',
'phasing_statistics_fom.xml')
phs_stat_mapcc_template = os.path.join(
os.environ['FAST_EP_ROOT'], 'lib', 'templates',
'phasing_statistics_mapcc.xml')
if not os.path.exists(xml_template):
print('XML template not found: %s' % xml_template)
return
if not os.path.exists(phs_stat_fom_template):
print('XML template not found: %s' % phs_stat_fom_template)
return
if not os.path.exists(phs_stat_mapcc_template):
print('XML template not found: %s' % phs_stat_mapcc_template)
return
# get phasing statistics from xml_results
(all_phs_stat_fom, all_phs_stat_mapcc) = get_phasing_statistics(
phs_stat_fom_template, phs_stat_mapcc_template, xml_results)
import datetime
time_stamp = '%4d-%02d-%02d %02d:%02d:%02d' % tuple(datetime.datetime.now(
).timetuple()[:6])
open(filename, 'w').write(
open(xml_template, 'r').read().format(
commandline = full_command_line,
results_directory = write_directory,
spacegroup_id = xml_results['SPACEGROUP'],
solvent_content = xml_results['SOLVENTCONTENT'],
enantiomorph = xml_results['ENANTIOMORPH'],
lowres = xml_results['LOWRES'],
highres = xml_results['HIGHRES'],
shelxc_spacegroup = xml_results['SHELXC_SPACEGROUP_ID'],
substructure_method = xml_results['SUBSTRUCTURE_METHOD'],
phasing_statistics_fom = all_phs_stat_fom,
phasing_statistics_mapcc = all_phs_stat_mapcc,
time_stamp = time_stamp
))
def get_phasing_statistics(fom_template, cc_template, xml_results):
total_bins = 1
all_phs_stat_fom = ""
all_phs_stat_mapcc = ""
# find number of bins - use RESOLUTION_LOW as the field to check for this
done = False
while not done:
bin_number_name = str(total_bins).zfill(2)
try:
resolution_low = xml_results['RESOLUTION_LOW' + bin_number_name]
except KeyError:
done = True
continue
total_bins += 1
for bin_number in range(total_bins):
bin_number_name = str(bin_number).zfill(2)
resolution_low = float(xml_results['RESOLUTION_LOW' + bin_number_name])
resolution_high = float(xml_results['RESOLUTION_HIGH' + bin_number_name])
fom = float(xml_results['FOM' + bin_number_name])
mapcc = float(xml_results['MAPCC' + bin_number_name])
nreflections = int(xml_results['NREFLECTIONS' + bin_number_name])
if resolution_low == None or resolution_high == None or \
fom == None or mapcc == None or nreflections == None:
raise RuntimeError("One of the fields is empty.")
all_phs_stat_fom += open(fom_template,'r').read().format(
bin_number = bin_number + 1,
number_bins = total_bins,
bin_low_res = resolution_low,
bin_high_res = resolution_high,
bin_fom = fom,
num_refl = nreflections)
all_phs_stat_mapcc += open(cc_template,'r').read().format(
bin_number = bin_number + 1,
number_bins = total_bins,
bin_low_res = resolution_low,
bin_high_res = resolution_high,
bin_map_cc = mapcc,
num_refl = nreflections)
return (all_phs_stat_fom, all_phs_stat_mapcc)
def xmlfile2json(filename):
'''Parse an ISpyB XML file into a JSON formatted string'''
from lxml import etree
tree = etree.parse(filename)
xml_dict = __node2json(tree.getroot())
return json.dumps(xml_dict, indent=4, separators=(',', ':'))
def __node2json(node):
if len(node):
node_dict = {}
for child in node:
if child.tag in node_dict:
if isinstance(node_dict[child.tag], list):
node_dict[child.tag].append(__node2json(child))
else:
node_dict[child.tag] = [node_dict[child.tag], __node2json(child)]
else:
node_dict[child.tag] = __node2json(child)
if node_dict:
return node_dict
return None
else:
return node.text
|
from rest_framework.exceptions import APIException
from rest_framework import status
class ConflictError(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = 'Conflict'
class InternalServiceError(APIException):
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_detail = 'Internal server error, try again later.'
default_code = 'internal_server_error'
class ServiceUnavailableError(APIException):
status_code = status.HTTP_503_SERVICE_UNAVAILABLE
default_detail = 'Service temporarily unavailable, try again later.'
default_code = 'service_unavailable'
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
import cityscapesscripts.helpers.labels as CSLabels
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
from mmengine.fileio import dump
from mmengine.utils import (Timer, mkdir_or_exist, track_parallel_progress,
track_progress)
def collect_files(img_dir, gt_dir):
suffix = 'leftImg8bit.png'
files = []
for img_file in glob.glob(osp.join(img_dir, '**/*.png')):
assert img_file.endswith(suffix), img_file
inst_file = gt_dir + img_file[
len(img_dir):-len(suffix)] + 'gtFine_instanceIds.png'
# Note that labelIds are not converted to trainId for seg map
segm_file = gt_dir + img_file[
len(img_dir):-len(suffix)] + 'gtFine_labelIds.png'
files.append((img_file, inst_file, segm_file))
assert len(files), f'No images found in {img_dir}'
print(f'Loaded {len(files)} images from {img_dir}')
return files
def collect_annotations(files, nproc=1):
print('Loading annotation images')
if nproc > 1:
images = track_parallel_progress(load_img_info, files, nproc=nproc)
else:
images = track_progress(load_img_info, files)
return images
def load_img_info(files):
img_file, inst_file, segm_file = files
inst_img = mmcv.imread(inst_file, 'unchanged')
# ids < 24 are stuff labels (filtering them first is about 5% faster)
unique_inst_ids = np.unique(inst_img[inst_img >= 24])
anno_info = []
for inst_id in unique_inst_ids:
# For non-crowd annotations, inst_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = inst_id // 1000 if inst_id >= 1000 else inst_id
label = CSLabels.id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
category_id = label.id
iscrowd = int(inst_id < 1000)
mask = np.asarray(inst_img == inst_id, dtype=np.uint8, order='F')
mask_rle = maskUtils.encode(mask[:, :, None])[0]
area = maskUtils.area(mask_rle)
# convert to COCO style XYWH format
bbox = maskUtils.toBbox(mask_rle)
# for json encoding
mask_rle['counts'] = mask_rle['counts'].decode()
anno = dict(
iscrowd=iscrowd,
category_id=category_id,
bbox=bbox.tolist(),
area=area.tolist(),
segmentation=mask_rle)
anno_info.append(anno)
video_name = osp.basename(osp.dirname(img_file))
img_info = dict(
# remove img_prefix for filename
file_name=osp.join(video_name, osp.basename(img_file)),
height=inst_img.shape[0],
width=inst_img.shape[1],
anno_info=anno_info,
segm_file=osp.join(video_name, osp.basename(segm_file)))
return img_info
def cvt_annotations(image_infos, out_json_name):
out_json = dict()
img_id = 0
ann_id = 0
out_json['images'] = []
out_json['categories'] = []
out_json['annotations'] = []
for image_info in image_infos:
image_info['id'] = img_id
anno_infos = image_info.pop('anno_info')
out_json['images'].append(image_info)
for anno_info in anno_infos:
anno_info['image_id'] = img_id
anno_info['id'] = ann_id
out_json['annotations'].append(anno_info)
ann_id += 1
img_id += 1
for label in CSLabels.labels:
if label.hasInstances and not label.ignoreInEval:
cat = dict(id=label.id, name=label.name)
out_json['categories'].append(cat)
if len(out_json['annotations']) == 0:
out_json.pop('annotations')
dump(out_json, out_json_name)
return out_json
def parse_args():
parser = argparse.ArgumentParser(
description='Convert Cityscapes annotations to COCO format')
parser.add_argument('cityscapes_path', help='cityscapes data path')
parser.add_argument('--img-dir', default='leftImg8bit', type=str)
parser.add_argument('--gt-dir', default='gtFine', type=str)
parser.add_argument('-o', '--out-dir', help='output path')
parser.add_argument(
'--nproc', default=1, type=int, help='number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
cityscapes_path = args.cityscapes_path
out_dir = args.out_dir if args.out_dir else cityscapes_path
mkdir_or_exist(out_dir)
img_dir = osp.join(cityscapes_path, args.img_dir)
gt_dir = osp.join(cityscapes_path, args.gt_dir)
set_name = dict(
train='instancesonly_filtered_gtFine_train.json',
val='instancesonly_filtered_gtFine_val.json',
test='instancesonly_filtered_gtFine_test.json')
for split, json_name in set_name.items():
print(f'Converting {split} into {json_name}')
with Timer(print_tmpl='It took {}s to convert Cityscapes annotation'):
files = collect_files(
osp.join(img_dir, split), osp.join(gt_dir, split))
image_infos = collect_annotations(files, nproc=args.nproc)
cvt_annotations(image_infos, osp.join(out_dir, json_name))
if __name__ == '__main__':
main()
|
import os
import pathlib
from unittest.mock import Mock
import cv2
import numpy as np
from liveprint.lp import Projector, WhiteBackground
from liveprint.pose import PosesFactory, Poses, Keypoint, TorsoKeyPoints
from liveprint.utils import Apng
class FakePosesFactory(PosesFactory):
def poses(self, image):
return FakePoses()
class FakePoses(Poses):
def torso_keypoints(self, threshold=0.15):
return iter([FakeTorsoKeypoints()])
class FakeKeypoint(Keypoint):
def __init__(self, number, x, y, score):
self.number = number
self.x = x
self.y = y
self.score = score
def threshold(self, thresh):
return self.score >= thresh
def coords(self):
return int(self.x), int(self.y)
class FakeTorsoKeypoints(TorsoKeyPoints):
def left_shoulder(self) -> "Keypoint":
return self._left_shoulder
def right_shoulder(self) -> "Keypoint":
return self._right_shoulder
def left_hip(self) -> "Keypoint":
return self._left_hip
def right_hip(self) -> "Keypoint":
return self._right_hip
def __init__(
self,
left_shoulder=(740, 161,),
right_shoulder=(875, 150,),
left_hip=(759, 308,),
right_hip=(862, 311,),
):
self._left_shoulder = FakeKeypoint(5, *left_shoulder, 0.6)
self._right_shoulder = FakeKeypoint(6, *right_shoulder, 0.6)
self._left_hip = FakeKeypoint(11, *left_hip, 0.6)
self._right_hip = FakeKeypoint(12, *right_hip, 0.6)
class FakeProjectableRegion:
def __init__(self, output_height=768, output_width=1024):
self._output_resolution = (output_height, output_width, 3)
def of(self, webcam_img):
return 255 * np.ones(shape=self._output_resolution, dtype=np.uint8)
def test_projector():
path = os.path.join(
pathlib.Path(__file__).parent.absolute(), "..", "resources", "test_image_1.png"
)
projectable_region_dims = [768, 1024]
output_image = Projector(
WhiteBackground([*projectable_region_dims, 3]),
FakePosesFactory(),
FakeProjectableRegion(*projectable_region_dims),
Apng([cv2.imread(path, cv2.IMREAD_UNCHANGED)]),
).project(Mock())
expected_image = cv2.imread(
os.path.join(
pathlib.Path(__file__).parent.absolute(),
"..",
"resources",
"test_output_1.png",
),
cv2.IMREAD_UNCHANGED,
)
np.testing.assert_almost_equal(output_image, expected_image)
class TransparentBackground:
def layers(self):
return []
def test_projector_transparent_background():
path = os.path.join(
pathlib.Path(__file__).parent.absolute(), "..", "resources", "test_image_1.png"
)
projectable_region_dims = [768, 1024]
output_image = Projector(
TransparentBackground(),
FakePosesFactory(),
FakeProjectableRegion(*projectable_region_dims),
Apng([cv2.imread(path, cv2.IMREAD_UNCHANGED)]),
).project(None)
expected_image = cv2.imread(
os.path.join(
pathlib.Path(__file__).parent.absolute(),
"..",
"resources",
"test_output_2.png",
),
cv2.IMREAD_UNCHANGED,
)
np.testing.assert_almost_equal(output_image, expected_image)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 3 02:40:45 2020
@author: amk170930
"""
import airsim
import numpy as np
import setup_path
import os
from datetime import datetime
import time
class frequencyTest:
# connect to the AirSim simulator
client = airsim.CarClient()
client.confirmConnection()
client.enableApiControl(True)
car_controls = airsim.CarControls()
start = time.time()
prevTime = start
car_state = client.getCarState()
def carStateFreq(self):
#Test variables
revTime = 2 #seconds
brakeTime = 1 #seconds
tot = 0
for idx in range(10):
#Go reverse
self.car_controls.throttle = -0.5
self.car_controls.is_manual_gear = True;
self.car_controls.manual_gear = -1
self.car_controls.steering = 0
self.client.setCarControls(self.car_controls)
print("Go reverse")
time.sleep(revTime) # let car drive a bit
self.car_controls.is_manual_gear = False; # change back gear to auto
self.car_controls.manual_gear = 0
# apply brakes
self.car_controls.brake = 1
self.client.setCarControls(self.car_controls)
print("Apply brakes")
time.sleep(brakeTime) # let car drive a bit
self.car_controls.brake = 0 #remove brake
#Time calculations
currentTime = time.time()
self.car_state = self.client.getCarState()
diff = float((currentTime - self.prevTime - revTime - brakeTime)*1000)#miliseconds
self.prevTime = currentTime
freq = 1000/diff #Hertz
tot = tot + freq
print("Difference: %f Frequency: %f" % (diff,freq))
print("\nAverage frequency: %f"% (tot/10.0))
def Freq():
client = airsim.CarClient()
VehicleClient = airsim.VehicleClient()
sensor_state = VehicleClient.getImuData()
car_controls = airsim.CarControls()
testCases = 10
revTime = 0#seconds
time1 = time.time()
for sensor in range(5):
idx = 0
tot = 0
if sensor == 0:
print("\n\n\nIMU Data:")
elif sensor ==1:
print("\n\n\nBarometer Data:")
elif sensor == 2:
print("\n\n\nMagnetometer Data:")
elif sensor == 3:
print("\n\n\nGps Data:")
elif sensor == 4:
print("\n\n\nDistance Sensor Data:")
#prevTime = datetime.now().timestamp()
prevTime = sensor_state.time_stamp/1000000000
while idx <=testCases:
#Go reverse
car_controls.throttle = -0.5
car_controls.is_manual_gear = True;
car_controls.manual_gear = -1
car_controls.steering = 0
client.setCarControls(car_controls)
#print("Go reverse")
time.sleep(revTime) # let car drive a bit
car_controls.is_manual_gear = False; # change back gear to auto
car_controls.manual_gear = 0
if sensor == 0:
sensor_state = VehicleClient.getImuData()
elif sensor ==1:
sensor_state = VehicleClient.getBarometerData()
elif sensor == 2:
sensor_state = VehicleClient.getMagnetometerData()
elif sensor == 3:
sensor_state = VehicleClient.getGpsData()
elif sensor == 4:
sensor_state = VehicleClient.getDistanceSensorData()
#Time calculations
#currentTime = datetime.now().timestamp()
#car_state = client.getCarState()
currentTime = sensor_state.time_stamp/1000000000 #convert nanoseconds to seconds
diff = (((currentTime - prevTime)-revTime)*1000)#miliseconds
prevTime = currentTime
if diff !=0:
freq = 1000/diff #Hertz
tot = tot + freq
else:
#print("0 difference encountered")
continue
#print("Difference (In miliseconds): %f Frequency (Hz): %f" % (diff,freq))
idx = idx + 1
time2 = time.time()
print("\nAverage frequency: %f"% (float(idx)/(time2-time1)))
#frequencyTest.carStateFreq()
frequencyTest.Freq()
|
#!/usr/bin/env python3
# NOTE: NEEDS SYNCHRONIZATION FOR MULTITHREADING
import random
import string
from enum import Enum, auto
from abc import ABC,abstractmethod
from typing import (
Dict,
List,
Optional,
Tuple
)
def _random_id() -> str:
ascii = string.ascii_lowercase
return "".join(random.choices(ascii, k=GameGateway.NUM_ROOM_LETTERS))
class JoinReturnCodes(Enum):
SUCCESS = auto()
NAME_IN_USE = auto()
ROOM_NOT_FOUND = auto()
class GetReturnCodes(Enum):
SUCCESS = auto()
ROOM_NOT_FOUND = auto()
NAME_NOT_FOUND = auto()
class StartReturnCodes(Enum):
SUCCESS = auto()
TOO_FEW_PLAYERS = auto()
ALREADY_STARTED = auto()
ROOM_NOT_FOUND = auto()
class InteractReturnCodes(Enum):
SUCCESS = auto()
INVALID_DATA = auto()
WRONG_STATE = auto()
ROOM_NOT_FOUND = auto()
PLAYER_NOT_FOUND = auto()
class Player:
def __init__(self):
pass
class Room(ABC):
def __init__(self):
self.players: Dict[str, Player] = {}
def add_player(self, name) -> bool:
if name in self.players.keys():
return False
self.players[name] = Player()
return True
@abstractmethod
def start(self) -> StartReturnCodes:
pass
@abstractmethod
def get_room_state(self, player) -> Tuple[InteractReturnCodes, str, str]:
pass
@abstractmethod
def submit_data(self, player, data) -> InteractReturnCodes:
pass
class GameGateway:
NUM_ROOM_LETTERS = 4
def __init__(self):
self.rooms: Dict[str, Room] = {}
def room_start(self, room) -> StartReturnCodes:
if room not in self.rooms:
return StartReturnCodes.ROOM_NOT_FOUND
return self.rooms[room].start()
def new_game(self, room_class) -> str:
room = _random_id()
self.rooms[room] = room_class()
return room
def join_room(self, room, name) -> JoinReturnCodes:
try:
success = self.rooms[room].add_player(name)
if success:
return JoinReturnCodes.SUCCESS
else:
return JoinReturnCodes.NAME_IN_USE
except:
return JoinReturnCodes.ROOM_NOT_FOUND
def get_room_state(self, room, name=None) -> Tuple[InteractReturnCodes, str, str]:
if room in self.rooms:
if name is None or name in self.rooms[room].players:
return self.rooms[room].get_room_state(name)
else:
return (InteractReturnCodes.PLAYER_NOT_FOUND, '', '')
return (InteractReturnCodes.ROOM_NOT_FOUND, '', '')
def submit_data(self, room, name, data) -> InteractReturnCodes:
if room in self.rooms:
if name in self.rooms[room].players:
return self.rooms[room].submit_data(name, data)
else:
return InteractReturnCodes.PLAYER_NOT_FOUND
return InteractReturnCodes.ROOM_NOT_FOUND
|
class Solution:
def romanToInt(self, s: str) -> int:
dictionary = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
my_sum = 0
for i in range( len(s) ):
if i ==0:
my_sum = dictionary[s[i]]
elif dictionary[s[i-1]] < dictionary[s[i]]:
# be careful:
# smaller roman literal ‘I’ appears before it,
# we need to subtract ‘I’ from ‘V’
# that we already added another ‘I’ before it,
# so we need to subtract a total of 'two' one’s from it
my_sum = my_sum + dictionary[s[i]] - (2*dictionary[s[i-1]])
else:
my_sum = my_sum + dictionary[s[i]]
return my_sum
|
import numpy as np
from sklearn.linear_model import LogisticRegression
hours = np.array([0.5, 0.75, 1, 1.25, 1.5, 1.75, 1.75, 2, 2.25, 2.5, 2.75, 3, 3.25, 3.5, 4, 4.25, 4.5, 8, 4.75, 5, 5.5]).reshape(-1, 1)
approved = np.array([0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1])
lr = LogisticRegression()
lr.fit(hours, approved)
new_hours = np.array([1, 5.22, 4, 3.4, 6, 0]).reshape(-1, 1)
prediction = lr.predict(new_hours)
prob_predictions = lr.predict_proba(new_hours)
np.set_printoptions(3)
print('Prediction data:')
print('New Hours: {}'.format(new_hours.reshape(1,-1)))
print('Approved or not: {}'.format(prediction))
print('Probability: {}'.format(prob_predictions[:,1]))
|
__author__ = 'dang'
from datetime import datetime
import json
import redis
import configinfo
class PubSubModel:
'''
A backend for real-time events reporting, every time the receivers send an alert
it will be save into redis database, redis will publish the events to
event stream consumed by the browser
'''
client = None
def __init__(self):
self.connect_db()
def connect_db(self):
'''
initialize the connection to the redis database
'''
if self.client is None:
self.client = redis.StrictRedis(host=configinfo.host, port=configinfo.port)
def get_room(self, sensor_id):
return self.client.get(sensor_id)
def set_room(self, sensor_id, room):
self.client.set(sensor_id, room)
def publish_event(self, event):
now = datetime.now().replace(microsecond=0).time()
# data = {
# "sensor_id":event['sensor_id'],
# "co2_level":event['co2_level'],
# "timestamp":now.__format__("%Y-%m-%d %H:%M:%S"),
# "room": self.get_room(event['sensor_id']),
# "state":event['state']
# }
state = "OK"
if event['C'] > 23:
state = "notOK"
data = {
"sensor_id": event['sensor_id'],
"room": self.get_room(event['sensor_id']),
"rf": event['rf'],
"temperature": event['C'],
"state": state
}
self.client.publish(configinfo.event_stream_name, json.dumps(data))
def get_publisher(self):
return self.client.pubsub()
|
from rest_framework import serializers
from assets.models import *
class HostGroupSerializer(serializers.ModelSerializer):
class Meta:
model = HostGroup
fields = ('id', 'name', 'createTime',)
class HostSerializer(serializers.ModelSerializer):
hostGroup = serializers.PrimaryKeyRelatedField(many=False, queryset=HostGroup.objects.all())
class Meta:
model = Host
fields = (
'id', 'hostGroup', 'serialNumber', 'ip', 'ipEth1', 'kernel', 'os', 'osArch', 'osRelease', 'saltId',
'saltStatus', 'createTime', 'updateTime')
|
#-*- coding: UTF-8 -*-
import re
from flask import render_template, request, redirect, url_for, json, abort, session
from xichuangzhu import app
import config
from xichuangzhu.models.author_model import Author
from xichuangzhu.models.work_model import Work
from xichuangzhu.models.collection_model import Collection
from xichuangzhu.models.dynasty_model import Dynasty
from xichuangzhu.models.quote_model import Quote
from xichuangzhu.utils import content_clean, check_admin
# page all authors
#--------------------------------------------------
# view (public)
@app.route('/author')
def authors():
dynasties = Dynasty.get_dynasties()
for d in dynasties:
d['authors'] = Author.get_authors_by_dynasty(d['DynastyID'])
for a in d['authors']:
quote = Quote.get_quote_by_random(a['AuthorID'])
a['Quote'] = quote['Quote'] if quote else ""
a['QuotesNum'] = Quote.get_quotes_num_by_author(a['AuthorID'])
hot_authors = Author.get_hot_authors(8)
for a in hot_authors:
quote = Quote.get_quote_by_random(a['AuthorID'])
a['Quote'] = quote['Quote'] if quote else ""
return render_template('authors.html', dynasties=dynasties, hot_authors=hot_authors)
# page single author
#--------------------------------------------------
# view (public)
@app.route('/author/<author_abbr>')
def single_author(author_abbr):
author = Author.get_author_by_abbr(author_abbr)
if not author:
abort(404)
quote = Quote.get_quote_by_random(author['AuthorID'])
quotes_num = Quote.get_quotes_num_by_author(author['AuthorID'])
collections = Collection.get_collections_by_author(author['AuthorID'])
works = Work.get_works_by_author(author['AuthorID'])
for work in works:
work['Content'] = content_clean(work['Content'])
# count num of different type work.
# return like this - works_num['shi'] = {'type_name': '诗', 'num': 0}.
work_types = Work.get_types()
works_num = {}
for wt in work_types:
works_num[wt['WorkType']] = {'type_name': wt['TypeName'], 'num': 0}
for work in works:
work_type = work['Type']
works_num[work_type]['num'] += 1
return render_template('single_author.html', author=author, quote=quote, quotes_num=quotes_num, collections=collections, works=works, works_num=works_num)
# page add author
#--------------------------------------------------
# view (admin)
@app.route('/author/add', methods=['GET', 'POST'])
def add_author():
check_admin()
if request.method == 'GET':
dynasties = Dynasty.get_dynasties()
return render_template('add_author.html', dynasties=dynasties)
elif request.method == 'POST':
author = request.form['author']
abbr = request.form['abbr']
introduction = request.form['introduction']
birthYear = request.form['birthYear']
deathYear = request.form['deathYear']
dynastyID = int(request.form['dynastyID'])
Author.add_author(author, abbr, introduction, birthYear, deathYear, dynastyID)
return redirect(url_for('single_author', author_abbr=abbr))
# page edit author
#--------------------------------------------------
# view (admin)
@app.route('/author/edit/<int:authorID>', methods=['GET', 'POST'])
def edit_author(authorID):
check_admin()
if request.method == 'GET':
dynasties = Dynasty.get_dynasties()
author = Author.get_author_by_id(authorID)
return render_template('edit_author.html', dynasties=dynasties, author=author)
elif request.method == 'POST':
author = request.form['author']
abbr = request.form['abbr']
introduction = request.form['introduction']
birthYear = request.form['birthYear']
deathYear = request.form['deathYear']
dynastyID = int(request.form['dynastyID'])
Author.edit_author(author, abbr, introduction, birthYear, deathYear, dynastyID, authorID)
return redirect(url_for('single_author', author_abbr=abbr))
# page - admin quotes
#--------------------------------------------------
# view (admin)
@app.route('/quote/admin/<int:author_id>')
def admin_quotes(author_id):
check_admin()
author = Author.get_author_by_id(author_id)
quotes = Quote.get_quotes_by_author(author_id)
return render_template('admin_quotes.html', quotes=quotes, author=author)
# proc - add quote (admin)
@app.route('/quote/add/<int:author_id>', methods=['POST'])
def add_quote(author_id):
check_admin()
quote = request.form['quote']
work_id = int(request.form['work-id'])
work_title = Work.get_work(work_id)['Title']
Quote.add(author_id, quote, work_id, work_title)
return redirect(url_for('admin_quotes', author_id=author_id))
# proc - delete quote (admin)
@app.route('/quote/delete/<int:quote_id>')
def delete_quote(quote_id):
check_admin()
author_id = int(request.args['author_id'])
Quote.delete(quote_id)
return redirect(url_for('admin_quotes', author_id=author_id))
# page edit quote
#--------------------------------------------------
# view (admin)
@app.route('/quote/edit/<int:quote_id>', methods=['GET', 'POST'])
def edit_quote(quote_id):
check_admin()
if request.method == 'GET':
quote = Quote.get_quote_by_id(quote_id)
return render_template('edit_quote.html', quote=quote)
elif request.method == 'POST':
quote = request.form['quote']
work_id = int(request.form['work-id'])
work = Work.get_work(work_id)
Quote.edit(quote_id, work['AuthorID'], quote, work['WorkID'], work['Title'])
return redirect(url_for('admin_quotes', author_id=work['AuthorID'])) |
# Generated by Django 3.0.14 on 2022-01-31 20:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('machines', '0004_device_invoice_pdf'),
]
operations = [
migrations.AlterModelOptions(
name='device',
options={'ordering': ['-pk']},
),
migrations.AddField(
model_name='machine',
name='is_active',
field=models.BooleanField(blank=True, default=True),
),
]
|
import string
import numpy as np
import pandas as pd
import pytest
from matchms import Spectrum
from ms2deepscore import SpectrumBinner
from ms2deepscore.data_generators import (DataGeneratorAllInchikeys,
DataGeneratorAllSpectrums,
DataGeneratorCherrypicked,
_exclude_nans_from_labels,
_validate_labels)
from ms2deepscore.MetadataFeatureGenerator import (CategoricalToBinary,
StandardScaler)
from ms2deepscore.spectrum_pair_selection import (
SelectedCompoundPairs, select_spectrum_pairs_wrapper)
from tests.test_user_worfklow import (get_reference_scores,
load_processed_spectrums)
def create_dummy_data():
"""Create fake data to test generators.
"""
mz, intens = 100.0, 0.1
spectrums = []
letters = list(string.ascii_uppercase[:10])
# Create fake similarities
similarities = {}
for i, letter1 in enumerate(letters):
for j, letter2 in enumerate(letters):
similarities[(letter1, letter2)] = (len(letters) - abs(i - j)) / len(letters)
tanimoto_fake = pd.DataFrame(similarities.values(),
index=similarities.keys()).unstack()
# Create fake spectra
fake_inchikeys = []
for i, letter in enumerate(letters):
dummy_inchikey = f"{14 * letter}-{10 * letter}-N"
fake_inchikeys.append(dummy_inchikey)
spectrums.append(Spectrum(mz=np.array([mz + (i+1) * 25.0]), intensities=np.array([intens]),
metadata={"inchikey": dummy_inchikey,
"compound_name": letter}))
# Generate a duplicated spectrum for half the inchikeys
if i >= 5:
spectrums.append(Spectrum(mz=np.array([mz + (i+1) * 25.0]), intensities=np.array([2*intens]),
metadata={"inchikey": dummy_inchikey,
"compound_name": f"{letter}-2"}))
# Set the column and index names
tanimoto_fake.columns = [x[:14] for x in fake_inchikeys]
tanimoto_fake.index = [x[:14] for x in fake_inchikeys]
ms2ds_binner = SpectrumBinner(100, mz_min=10.0, mz_max=1000.0, peak_scaling=1)
binned_spectrums = ms2ds_binner.fit_transform(spectrums)
return binned_spectrums, tanimoto_fake, ms2ds_binner
def create_test_data():
spectrums = load_processed_spectrums()
tanimoto_scores_df = get_reference_scores()
ms2ds_binner = SpectrumBinner(100, mz_min=10.0, mz_max=1000.0, peak_scaling=0.5)
binned_spectrums = ms2ds_binner.fit_transform(spectrums)
return binned_spectrums, tanimoto_scores_df, ms2ds_binner
def collect_results(generator, batch_size, dimension):
n_batches = len(generator)
X = np.zeros((batch_size, dimension, 2, n_batches))
y = np.zeros((batch_size, n_batches))
for i, batch in enumerate(generator):
X[:, :, 0, i] = batch[0][0]
X[:, :, 1, i] = batch[0][1]
y[:, i] = batch[1]
return X, y
def test_DataGeneratorCherrypicked():
"""Test DataGeneratorCherrypicked using generated data.
"""
# Define other parameters
batch_size = 8
mz, intens = 100.0, 0.1
spectrums = []
num_of_unique_inchikeys = 15
letters = list(string.ascii_uppercase[:num_of_unique_inchikeys])
letters += letters
def generate_binary_vector(n):
binary_vector = np.zeros(10, dtype=int)
binary_vector[i % 3] = 1
binary_vector[i % 5 + 3] = 1
binary_vector[i % 4] = 1
binary_vector[i % 10] = 1
binary_vector[8 - i // 9] = 1
binary_vector[6 - i // 15] = 1
return binary_vector
# Create fake spectra
fake_inchikeys = []
for i, letter in enumerate(letters):
dummy_inchikey = f"{14 * letter}-{10 * letter}-N"
fingerprint = generate_binary_vector(i)
fake_inchikeys.append(dummy_inchikey)
spectrums.append(Spectrum(mz=np.array([mz + (i+1) * 25.0]), intensities=np.array([intens]),
metadata={"precursor_mz": 111.1,
"inchikey": dummy_inchikey,
"compound_name": letter,
"fingerprint": fingerprint,
}))
ms2ds_binner = SpectrumBinner(100, mz_min=10.0, mz_max=1000.0, peak_scaling=1)
binned_spectrums = ms2ds_binner.fit_transform(spectrums)
dimension = len(ms2ds_binner.known_bins)
scp = select_spectrum_pairs_wrapper(
spectrums,
selection_bins=np.array([(x/4, x/4 + 0.25) for x in range(0, 4)]),
average_pairs_per_bin=1)
# Create generator
test_generator = DataGeneratorCherrypicked(binned_spectrums=binned_spectrums,
spectrum_binner=ms2ds_binner,
selected_compound_pairs=scp,
batch_size=batch_size,
augment_removal_max=0.0,
augment_removal_intensity=0.0,
augment_intensity=0.0,
augment_noise_max=0)
x, y = test_generator.__getitem__(0)
assert x[0].shape == x[1].shape == (batch_size, dimension), "Expected different data shape"
assert y.shape[0] == batch_size
assert set(test_generator.indexes) == set(list(range(num_of_unique_inchikeys))), "Something wrong with generator indices"
# Test many cycles --> scores properly distributed into bins?
counts = []
repetitions = 100
total = batch_size * repetitions
for _ in range(repetitions):
for i, batch in enumerate(test_generator):
counts.extend(list(batch[1]))
assert len(counts) == total
assert (np.array(counts) > 0.5).sum() > 0.4 * total
assert (np.array(counts) <= 0.5).sum() > 0.4 * total
# Check mostly equal distribution accross all four bins:
assert (np.array(counts) <= 0.25).sum() > 0.22 * total
assert ((np.array(counts) > 0.25) & (np.array(counts) <= 0.5)).sum() > 0.22 * total
assert ((np.array(counts) > 0.5) & (np.array(counts) <= 0.75)).sum() > 0.22 * total
assert (np.array(counts) > 0.75).sum() > 0.22 * total
def test_DataGeneratorAllInchikeys():
"""Test DataGeneratorAllInchikeys using generated data.
"""
binned_spectrums, tanimoto_scores_df, ms2ds_binner = create_dummy_data()
assert binned_spectrums[0].binned_peaks == {0: 0.1}, "Something went wrong with the binning"
# Define other parameters
batch_size = 8
dimension = len(ms2ds_binner.known_bins)
selected_inchikeys = tanimoto_scores_df.index
# Create generator
test_generator = DataGeneratorAllInchikeys(binned_spectrums=binned_spectrums,
selected_inchikeys=selected_inchikeys,
spectrum_binner=ms2ds_binner,
reference_scores_df=tanimoto_scores_df,
batch_size=batch_size,
augment_removal_max=0.0,
augment_removal_intensity=0.0,
augment_intensity=0.0,
augment_noise_max=0)
x, y = test_generator.__getitem__(0)
assert x[0].shape == x[1].shape == (batch_size, dimension), "Expected different data shape"
assert set(test_generator.indexes) == set(list(range(dimension))), "Something wrong with generator indices"
# Test if every inchikey was picked once (and only once):
assert (x[0].sum(axis=0) > 0).sum() == batch_size # This works since each spectrum in the dummy set has one unique peak
# Test many cycles --> scores properly distributed into bins?
counts = []
repetitions = 100
total = batch_size * repetitions
for _ in range(repetitions):
for i, batch in enumerate(test_generator):
counts.extend(list(batch[1]))
assert (np.array(counts) > 0.5).sum() > 0.4 * total
assert (np.array(counts) <= 0.5).sum() > 0.4 * total
def test_DataGeneratorAllSpectrums():
"""Basic first test for DataGeneratorAllInchikeys using actual data.
"""
binned_spectrums, tanimoto_scores_df, ms2ds_binner = create_dummy_data()
assert binned_spectrums[0].binned_peaks == {0: 0.1}, "Something went wrong with the binning"
# Define other parameters
batch_size = 8 # Set the batch size to 8 to make sure it is a different number than the number of bins.
dimension = len(ms2ds_binner.known_bins)
np.random.seed(41) #Set the seed to make sure multiple spectra are selected every time.
selected_inchikeys = tanimoto_scores_df.index
# Create generator
test_generator = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums,
selected_inchikeys=selected_inchikeys,
reference_scores_df=tanimoto_scores_df,
spectrum_binner=ms2ds_binner,
batch_size=batch_size,
augment_removal_max=0.0,
augment_removal_intensity=0.0,
augment_intensity=0.0,
augment_noise_max=0)
x, y = test_generator.__getitem__(0)
assert x[0].shape == x[1].shape == (batch_size, dimension), "Expected different data shape"
assert set(test_generator.indexes) == set(list(range(len(binned_spectrums)))), "Something wrong with generator indices"
# Test if every inchikey not was picked only once
assert not (x[0].sum(axis=0) > 0).sum() == batch_size, \
"For each inchikey only one spectrum was picked instead of all spectra"
# Test many cycles --> scores properly distributed into bins?
counts = []
repetitions = 100
total = batch_size * repetitions
for _ in range(repetitions):
for i, batch in enumerate(test_generator):
counts.extend(list(batch[1]))
assert (np.array(counts) > 0.5).sum() > 0.4 * total
assert (np.array(counts) <= 0.5).sum() > 0.4 * total
def test_DataGeneratorAllInchikeys_real_data():
"""Basic first test for DataGeneratorAllInchikeys using actual data.
"""
# Get test data
binned_spectrums, tanimoto_scores_df, ms2ds_binner = create_test_data()
# Define other parameters
batch_size = 10
dimension = len(ms2ds_binner.known_bins)
selected_inchikeys = tanimoto_scores_df.index[:80]
# Create generator
test_generator = DataGeneratorAllInchikeys(binned_spectrums=binned_spectrums,
selected_inchikeys=selected_inchikeys,
reference_scores_df=tanimoto_scores_df,
spectrum_binner=ms2ds_binner, batch_size=batch_size,
augment_removal_max=0.0,
augment_removal_intensity=0.0,
augment_intensity=0.0)
A, B = test_generator.__getitem__(0)
assert A[0].shape == A[1].shape == (batch_size, dimension), "Expected different data shape"
assert B.shape[0] == 10, "Expected different label shape."
assert test_generator.settings["num_turns"] == 1, "Expected different default."
assert test_generator.settings["augment_intensity"] == 0.0, "Expected changed value."
def test_DataGeneratorAllSpectrumsRealData():
"""Basic first test for DataGeneratorAllSpectrums"""
# Get test data
binned_spectrums, tanimoto_scores_df, ms2ds_binner = create_test_data()
# Define other parameters
batch_size = 10
dimension = 88
# Create generator
test_generator = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:150],
reference_scores_df=tanimoto_scores_df,
spectrum_binner=ms2ds_binner, batch_size=batch_size,
augment_removal_max=0.0,
augment_removal_intensity=0.0,
augment_intensity=0.0)
A, B = test_generator.__getitem__(0)
assert A[0].shape == A[1].shape == (10, dimension), "Expected different data shape"
assert B.shape[0] == 10, "Expected different label shape."
assert test_generator.settings["num_turns"] == 1, "Expected different default."
assert test_generator.settings["augment_intensity"] == 0.0, "Expected changed value."
def test_DataGeneratorAllSpectrums_no_inchikey_leaking():
"""Test if non-selected InChIKeys are correctly removed"""
# Get test data
binned_spectrums, tanimoto_scores_df, ms2ds_binner = create_test_data()
# Define other parameters
batch_size = 8
dimension = 88
# Create generator
test_generator = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:8],
reference_scores_df=tanimoto_scores_df,
spectrum_binner=ms2ds_binner, batch_size=batch_size,
augment_removal_max=0.0,
augment_removal_intensity=0.0,
augment_intensity=0.0)
assert test_generator.reference_scores_df.shape == (6, 6), "Expected different reduced shape of labels"
expected_inchikeys = ['BBXXLROWFHWFQY',
'FBOUIAKEJMZPQG',
'GPXLRLUVLMHHIK',
'JXCGFZXSOMJFOA',
'RZILCCPWPBTYDO',
'UYJUZNLFJAWNEZ']
found_inchikeys = test_generator.reference_scores_df.columns.to_list()
found_inchikeys.sort()
assert found_inchikeys == expected_inchikeys, \
"Expected different InChIKeys to remain in reference_scores_df"
# Test if the expected labels are returned by generator
expected_labels = np.array([0.38944724, 0.39130435, 0.39378238, 0.40045767, 0.40497738,
0.40930233, 0.43432203, 0.46610169, 0.47416413, 0.48156182,
0.50632911, 0.5214447 , 0.52663934, 0.59934853, 0.63581489])
repetitions = 200
collect_results = np.zeros(repetitions * batch_size) # Collect 2000 results
for i in range(repetitions):
_, B = test_generator.__getitem__(0)
collect_results[batch_size*i:batch_size*(i+1)] = B
assert len(np.unique(collect_results)) <= 15, "Expected max 15 possible results"
present_in_expected_labels = [(np.round(x,6) in list(np.round(expected_labels, 6))) for x in np.unique(collect_results)]
assert np.all(present_in_expected_labels), "Got unexpected labels from generator"
def test_DataGeneratorAllSpectrums_asymmetric_label_input():
# Create generator
binned_spectrums, tanimoto_scores_df, ms2ds_binner = create_test_data()
asymmetric_scores_df = tanimoto_scores_df.iloc[:, 2:]
with pytest.raises(ValueError) as msg:
_ = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums,
reference_scores_df=asymmetric_scores_df,
spectrum_binner=ms2ds_binner)
assert "index and columns of reference_scores_df are not identical" in str(msg), \
"Expected different ValueError"
def test_DataGeneratorAllSpectrums_fixed_set():
"""
Test whether use_fixed_set=True toggles generating the same dataset on each epoch.
"""
# Get test data
binned_spectrums, tanimoto_scores_df, ms2ds_binner = create_test_data()
# Define other parameters
batch_size = 4
dimension = 88
# Create generator that generates a fixed set every epoch
fixed_generator = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:8],
reference_scores_df=tanimoto_scores_df,
spectrum_binner=ms2ds_binner, batch_size=batch_size,
num_turns=5, use_fixed_set=True)
first_X, first_y = collect_results(fixed_generator, batch_size, dimension)
second_X, second_y = collect_results(fixed_generator, batch_size, dimension)
assert np.array_equal(first_X, second_X)
assert np.array_equal(first_y, second_y)
assert fixed_generator.settings["random_seed"] is None
def test_DataGeneratorAllSpectrums_fixed_set_random_seed():
"""
Test whether use_fixed_set=True toggles generating the same dataset on each epoch.
And if same random_seed leads to exactly the same output.
"""
# Get test data
binned_spectrums, tanimoto_scores_df, ms2ds_binner = create_test_data()
# Define other parameters
batch_size = 4
dimension = 88
# Create normal generator
normal_generator = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:8],
reference_scores_df=tanimoto_scores_df,
spectrum_binner=ms2ds_binner, batch_size=batch_size,
use_fixed_set=False)
# Create generator that generates a fixed set every epoch
fixed_generator = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:8],
reference_scores_df=tanimoto_scores_df,
spectrum_binner=ms2ds_binner, batch_size=batch_size,
num_turns=5, use_fixed_set=True, random_seed=0)
first_X, first_y = collect_results(normal_generator, batch_size, dimension)
second_X, second_y = collect_results(normal_generator, batch_size, dimension)
assert not np.array_equal(first_X, second_X)
assert first_y.shape == (4, 2), "Expected different number of labels"
first_X, first_y = collect_results(fixed_generator, batch_size, dimension)
second_X, second_y = collect_results(fixed_generator, batch_size, dimension)
assert np.array_equal(first_X, second_X)
assert first_y.shape == (4, 10), "Expected different number of labels"
# Create another fixed generator based on the same dataset that should generate the same
# fixed set
fixed_generator2 = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:8],
reference_scores_df=tanimoto_scores_df,
spectrum_binner=ms2ds_binner, batch_size=batch_size,
num_turns=5, use_fixed_set=True,
random_seed=0)
first_X, first_y = collect_results(fixed_generator, batch_size, dimension)
second_X, second_y = collect_results(fixed_generator2, batch_size, dimension)
assert np.array_equal(first_X, second_X)
def test_DataGeneratorAllSpectrums_additional_inputs():
"""
Test if additional input parameter works as intended
"""
# Get test data
spectrums = load_processed_spectrums()
tanimoto_scores_df = get_reference_scores()
# Run for two test cases.
# Testing a single and multiple inputs is important, since numpy can do weird things with 1D arrays of len= 1
test_cases = [(StandardScaler("precursor_mz", mean=0, std=1000), ),
(StandardScaler("precursor_mz", mean=0, std=1000),
CategoricalToBinary("ionmode", entries_becoming_one="negative", entries_becoming_zero="positive"))]
for additional_feature_types in test_cases:
# additional_feature_types = ()
ms2ds_binner = SpectrumBinner(100, mz_min=10.0, mz_max=1000.0, peak_scaling=0.5,
additional_metadata=additional_feature_types)
binned_spectrums = ms2ds_binner.fit_transform(spectrums)
# Define other parameters
batch_size = 4
data_generator = DataGeneratorAllSpectrums(binned_spectrums, tanimoto_scores_df,
spectrum_binner=ms2ds_binner, batch_size=batch_size)
batch_X, batch_y = data_generator.__getitem__(0)
for batch_X_values in batch_X:
assert len(batch_X_values) == len(batch_y) == batch_size, "Batchsizes from X and y are not the same."
assert len(batch_X[1][0]) == len(additional_feature_types) == len(batch_X[3][0]), "There are not as many inputs as specified."
# Test specific class methods
# ---------------------------
def test_validate_labels():
# Test case 1: reference_scores_df with different index and column names
ref_scores = pd.DataFrame({'A1': [0.5, 0.6], 'A2': [0.7, 0.8]}, index=['B1', 'B2'])
with pytest.raises(ValueError):
_validate_labels(ref_scores)
# Test case 2: reference_scores_df with identical index and column names
ref_scores = pd.DataFrame({'A1': [0.5, 0.6], 'A2': [0.7, 0.8]}, index=['A1', 'A2'])
_validate_labels(ref_scores) # Should not raise ValueError
def test_exclude_nans_from_labels():
# Create a sample DataFrame with NaN values
data = {
"A": [1, 2, np.nan, 4],
"B": [2, 3, 4, 5],
"C": [3, 4, 5, np.nan],
"D": [4, 5, 6, 7]
}
reference_scores_df = pd.DataFrame(data, index=["A", "B", "C", "D"])
# Call the _exclude_nans_from_labels method
clean_df = _exclude_nans_from_labels(reference_scores_df)
# Expected DataFrame after removing rows and columns with NaN values
expected_data = {
"A": [1, 2],
"B": [2, 3]
}
expected_clean_df = pd.DataFrame(expected_data, index=["A", "B"])
# Check if the cleaned DataFrame is equal to the expected DataFrame
assert np.allclose(clean_df.values, expected_clean_df.values)
assert np.all(clean_df.index == clean_df.columns)
assert np.all(clean_df.index == ["A", "B"])
|
from . import base, items
def note_class(self, class_, level):
if not hasattr(self, 'classes'):
self.classes = []
self.classes.append((class_, level))
class _Progression:
def __init__(self, list, level):
import collections
self.x=collections.defaultdict(int)
for i in range(0, level):
for j in list[i]: self.x[j]+=1
def __repr__(self): return repr(dict(self.x))
def __add__(self, other):
result=_Progression([], 0)
result.x=self.x
for k, v in other.x.items(): result.x[k]+=v
return result
def items(self):
return self.x.items()
class Standard:
@staticmethod
def init(self, level, **kwargs):
base.add(self, 'level', level, base.plus)
base.add(self, 'proficiency_bonus', 2+(level-1)//4, base.plus)
base.set_methods(self, Standard)
def first_level_hp(self):
return dice_sides_type(self.hit_dice)[1]+base.modifier(self.constitution)
def level_up(self, roll=False):
sides=dice_sides_type(self.hit_dice)[1]
if roll: x=roll('d{}'.format(sides))
else: x=sides//2+1
self.max_hp+=x+base.modifier(self.constitution)
class Spellcaster:
@staticmethod
def init(self, level, **kwargs):
Standard.init(self, level, **kwargs)
base.add(self, 'slots', [
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 0, 0, 0, 0, 0],
[4, 2, 0, 0, 0, 0, 0, 0, 0],
[4, 3, 0, 0, 0, 0, 0, 0, 0],
[4, 3, 2, 0, 0, 0, 0, 0, 0],
[4, 3, 3, 0, 0, 0, 0, 0, 0],
[4, 3, 3, 1, 0, 0, 0, 0, 0],
[4, 3, 3, 2, 0, 0, 0, 0, 0],
[4, 3, 3, 3, 1, 0, 0, 0, 0],
[4, 3, 3, 3, 2, 0, 0, 0, 0],
[4, 3, 3, 3, 2, 1, 0, 0, 0],
[4, 3, 3, 3, 2, 1, 0, 0, 0],
[4, 3, 3, 3, 2, 1, 1, 0, 0],
[4, 3, 3, 3, 2, 1, 1, 0, 0],
[4, 3, 3, 3, 2, 1, 1, 1, 0],
[4, 3, 3, 3, 2, 1, 1, 1, 0],
[4, 3, 3, 3, 2, 1, 1, 1, 1],
[4, 3, 3, 3, 3, 1, 1, 1, 1],
[4, 3, 3, 3, 3, 2, 1, 1, 1],
[4, 3, 3, 3, 3, 2, 1, 1, 1],
][level], lambda old, new: [old[i]+new[i] for i in range(9)])
if kwargs.get('new', False):
cantrips=3
if level>=4: cantrips+=1
if level>=10: cantrips+=1
base.add(self, 'choices', {'cantrips': cantrips}, base.dict_add)
class SpellPreparer:
@staticmethod
def init(self, level, **kwargs):
Spellcaster.init(self, level, **kwargs)
base.set_methods(self, SpellPreparer)
def prepared_spells(self, **kwargs):
return max(base.modifier(self.spellcasting_ability())+self.level, 1)
class Cleric:
@staticmethod
def init(self, level, **kwargs):
note_class(self, 'Cleric', level)
SpellPreparer.init(self, level, **kwargs)
base.set_methods(self, Cleric)
self.add_hit_dice(level, 8)
base.add(self, 'proficiencies', [
'light_armor', 'medium_armor', 'shield',
'wisdom_saving_throw', 'charisma_saving_throw',
]+items.simple_weapons, base.union)
base.add(self, 'special_qualities', ['ritual_casting'], base.union)
base.add(self, 'features', _Progression([
['spellcasting', 'divine_domain'],
['channel_divinity', 'divine_domain'],
[],
['ability_score_improvement'],
['destroy_undead'],
['channel_divinity', 'divine_domain'],
[],
['ability_score_improvement', 'destroy_undead', 'divine_domain'],
[],
['divine_intervention'],
['destroy_undead'],
['ability_score_improvement'],
[],
['destroy_undead'],
[],
['ability_score_improvement'],
['destroy_undead', 'divine_domain'],
['channel_divinity'],
['ability_score_improvement'],
['divine_intervention_improvement'],
], level), base.plus)
base.add(self, 'spells', [
[],
['bane', 'bless', 'command', 'create_or_destroy_water', 'cure_wounds', 'detect_evil_and_good', 'detect_magic', 'detect_poison_and_disease', 'guiding_bolt', 'healing_word', 'inflict_wounds', 'protection_from_evil_and_good', 'purify_food_and_drink', 'sanctuary', 'shield_of_faith'],
['aid', 'augury', 'blindness_deafness', 'calm_emotions', 'continual_flame', 'enhance_ability', 'find_traps', 'gentle_repose', 'hold_person', 'lesser_restoration', 'locate_object', 'prayer_of_healing', 'protection_from_poison', 'silence', 'spiritual_weapon', 'warding_bond', 'zone_of_truth'],
['animate_dead', 'beacon_of_hope', 'bestow_curse', 'clairvoyance', 'create_food_and_water', 'daylight', 'dispel_magic', 'glyph_of_warding', 'magic_circle', 'mass_healing_word', 'meld_into_stone', 'protection_from_energy', 'remove_curse', 'revivify', 'sending', 'speak_with_dead', 'spirit_guardians', 'tongues', 'water_walk'],
['banishment', 'control_water', 'death_ward', 'divination', 'freedom_of_movement', 'guardian_of_faith', 'locate_creature', 'stone_shape'],
['commune', 'contagion', 'dispel_evil_and_good', 'flame_strike', 'geas', 'greater_restoration', 'hallow', 'insect_plague', 'legend_lore', 'mass_cure_wounds', 'planar_binding', 'raise_dead', 'scrying'],
['blade_barrier', 'create_undead', 'find_the_path', 'forbiddance', 'harm', 'heal', 'heroes_feast', 'planar_ally', 'true_seeing', 'word_of_recall'],
['conjure_celestial', 'divine_word', 'etherealness', 'fire_storm', 'plane_shift', 'regenerate', 'resurrection', 'symbol'],
['antimagic_field', 'control_weather', 'earthquake', 'holy_aura'],
['astral_projection', 'gate', 'mass_heal', 'true_resurrection'],
], base.spell_add)
self.spellcasting_ability=lambda: self.wisdom
if kwargs.get('new', False):
base.add(self, 'choices', {
'2 cleric skills': ['history', 'insight', 'medicine', 'persuasion', 'religion'],
'cleric weapon': ['mace', 'warhammer'],
'cleric armor': ['scale_mail', 'leather_armor', 'chain_mail'],
'cleric weapon 2': ['light_crossbow']+items.simple_weapons,
'cleric pack': ['priests_pack', 'explorers_pack'],
'cleric alternate gp': '5d4*10',
}, base.dict_add)
self.wearing=['shield']
spells=[
['guidance', 'light', 'mending', 'resistance', 'sacred_flame', 'thaumaturgy'],
]
class Wizard:
@staticmethod
def init(self, level, **kwargs):
note_class(self, 'Wizard', level)
SpellPreparer.init(self, level, **kwargs)
base.set_methods(self, Wizard)
self.add_hit_dice(level, 6)
base.add(self, 'proficiencies', [
'dagger', 'dart', 'sling', 'quarterstaff', 'light_crossbow',
'wisdom_saving_throw', 'intelligence_saving_throw',
], base.union)
base.add(self, 'features', _Progression([
['spellcasting', 'arcane_recovery'],
['arcane_tradition'],
[],
['ability_score_improvement'],
[],
['arcane_tradition'],
[],
['ability_score_improvement'],
[],
['arcane_tradition'],
[],
['ability_score_improvement'],
[],
['arcane_tradition'],
[],
['ability_score_improvement'],
[],
['spell_mastery'],
['ability_score_improvement'],
['signature_spell'],
], level), base.plus)
self.spellcasting_ability=lambda: self.intelligence
if kwargs.get('new', False):
base.add(self, 'choices', {
'2 wizard skills': ['arcana', 'history', 'insight', 'investigation', 'medicine', 'religion'],
'wizard weapon': ['quarterstaff', 'dagger'],
'wizard junk': ['component_pouch', 'arcane_focus'],
'wizard pack': ['scholars_pack', 'explorers_pack'],
'wizard alternate gp': '4d4*10',
}, base.dict_add)
self.carrying=['spellbook']
spells=[
[
'acid_splash', 'chill_touch', 'dancing_lights', 'light',
'mage_hand', 'mending', 'message', 'minor_illusion',
'prestidigitation', 'ray_of_frost', 'shocking_grasp', 'true_strike',
],
]
class Rogue:
@staticmethod
def init(self, level, **kwargs):
note_class(self, 'Rogue', level)
Standard.init(self, level)
base.set_methods(self, Rogue)
self.add_hit_dice(level, 8)
base.add(self, 'proficiencies', [
'light_armor',
'hand_crossbow', 'longsword', 'rapier', 'shortsword',
'thieves_tools',
'dexterity_saving_throw', 'intelligence_saving_throw',
]+items.simple_weapons, base.union)
base.add(self, 'features', _Progression([
['expertise', 'sneak_attack', 'thieves_cant'],
['cunning_action'],
['roguish_archetype'],
['ability_score_improvement'],
['uncanny_dodge'],
['expertise'],
['evasion'],
['ability_score_improvement'],
['roguish_archetype'],
['ability_score_improvement'],
['reliable_talent'],
['ability_score_improvement'],
['roguish_archetype'],
['blindsense'],
['slippery_mind'],
['ability_score_improvement'],
['roguish_archetype'],
['elusive'],
['ability_score_improvement'],
['stroke_of_luck'],
], level), base.plus)
if kwargs.get('new', False):
base.add(self, 'choices', {
'4 rogue skills': [
'acrobatics', 'athletics', 'deception', 'insight',
'intimidation', 'investigation', 'perception', 'performance',
'persuasion', 'sleight_of_hand', 'stealth',
],
'rogue weapon': ['rapier', 'shortsword'],
'rogue weapon 2': [['shortbow', 'quiver'], 'shortsword'],
'rogue pack': ['burglars_pack', 'dungeoneers_pack', 'explorers_pack'],
'rogue alternate gp': '4d4*10',
}, base.dict_add)
self.wearing=['leather_armor', 'dagger', 'dagger']
self.carrying=['thieves_tools']
def sneak_attack(self): return '{}d6'.format((self.level-1)//2)
class Fighter:
@staticmethod
def init(self, level, **kwargs):
note_class(self, 'Fighter', level)
Standard.init(self, level)
base.set_methods(self, Fighter)
self.add_hit_dice(level, 10)
base.add(self, 'proficiencies', [
'light_armor', 'medium_armor', 'heavy_armor', 'shield',
'strength_saving_throw', 'constitution_saving_throw',
]+items.simple_weapons+items.martial_weapons, base.union)
base.add(self, 'features', _Progression([
['fighting_style', 'second_wind'],
['action_surge'],
['martial_archetype'],
['ability_score_improvement'],
['extra_attack'],
['ability_score_improvement'],
['martial_archetype'],
['ability_score_improvement'],
['indomitable'],
['martial_archetype'],
['extra_attack'],
['ability_score_improvement'],
['indomitable'],
['ability_score_improvement'],
['martial_archetype'],
['ability_score_improvement'],
['action_surge', 'indomitable'],
['martial_archetype'],
['ability_score_improvement'],
['extra_attack'],
], level), base.plus)
def attack(self, *args, **kwargs):
critical_hit=20
if hasattr(self, 'martial_archetype') and self.martial_archetype=='champion' and self.level>=3:
critical_hit=19
return base.Entity.attack(self, *args, **kwargs)
import types
self.attack=types.MethodType(attack, self)
if kwargs.get('new', False):
base.add(self, 'choices', {
'2 fighter skills': [
'acrobatics', 'athletics', 'animal_handling', 'history',
'insight', 'intimidation', 'perception', 'survival',
],
'fighter armor': ['chain_mail', ['leather_armor', 'longbow', 'quiver']],
'fighter weapon': items.martial_weapons,
'fighter shield': ['shield']+items.martial_weapons,
'fighter weapon 2': [['light_crossbow', 'quiver'], {'handaxe': 2}],
'fighter pack': ['dungeoneers_pack', 'explorers_pack'],
'fighter alternate gp': '5d4*10',
}, base.dict_add)
class Druid:
@staticmethod
def init(self, level, **kwargs):
note_class(self, 'Druid', level)
SpellPreparer.init(self, level, **kwargs)
base.set_methods(self, Druid)
self.add_hit_dice(level, 8)
base.add(self, 'proficiencies', [
'light_armor', 'medium_armor', 'shield',
'club', 'dagger', 'dart', 'javelin', 'mace', 'quarterstaff',
'scimitar', 'sickle', 'sling', 'spear',
'herbalism_kit',
'intelligence_saving_throw', 'wisdom_saving_throw',
], base.union)
base.add(self, 'features', _Progression([
['druidic', 'spellcasting'],
['wild_shape', 'druid_circle'],
[],
['wild_shape', 'ability_score_improvement'],
[],
['druid_circle'],
[],
['wild_shape', 'ability_score_improvement'],
[],
['druid_circle'],
[],
['ability_score_improvement'],
[],
['druid_circle'],
[],
['ability_score_improvement'],
[],
['timeless_body', 'beast_spells'],
['ability_score_improvement'],
['archdruid'],
], level), base.plus)
self.spellcasting_ability=lambda: self.wisdom
if kwargs.get('new', False):
base.add(self, 'choices', {
'2 druid skills': [
'arcana', 'animal_handling', 'insight', 'medicine',
'nature', 'perception', 'religion', 'survival',
],
'druid shield': ['shield']+items.simple_weapons,
'druid weapon': ['scimitar']+items.simple_weapons,
'druid alternate gp': '2d4*10',
}, base.dict_add)
self.wearing=['leather_armor']
self.carrying=items.explorers_pack+['druidic_focus']
spells=[
['guidance', 'mending', 'produce_flame', 'resistance', 'shillelagh'],
]
class Bard:
@staticmethod
def init(self, level, **kwargs):
note_class(self, 'Bard', level)
Spellcaster.init(self, level, **kwargs)
base.set_methods(self, Bard)
self.add_hit_dice(level, 8)
base.add(self, 'proficiencies', [
'light_armor',
'hand_crossbow', 'longsword', 'rapier', 'shortsword',
'dexterity_saving_throw', 'charisma_saving_throw',
]+items.simple_weapons, base.union)
base.add(self, 'features', _Progression([
['bardic_inspiration', 'spellcasting'],
['jack_of_all_trades', 'song_of_rest'],
['bard_college', 'expertise'],
['ability_score_improvement'],
['bardic_inspiration', 'font_of_inspiration'],
['countercharm', 'bard_college'],
[],
['ability_score_improvement'],
['song_of_rest'],
['bardic_inspiration', 'expertise', 'magical_secrets'],
[],
['ability_score_improvement'],
['song_of_rest'],
['magical_secrets', 'bard_college'],
['bardic_inspiration'],
['ability_score_improvement'],
['song_of_rest'],
['magical_secrets'],
['ability_score_improvement'],
['superior_inspiration'],
], level), base.plus)
self.spellcasting_ability=lambda: self.charisma
if kwargs.get('new', False):
base.add(self, 'choices', {
'3 bard skills': 'any',
'3 bard tools': 'musical instruments',
'bard weapon': ['rapier', 'longsword']+items.simple_weapons,
'bard pack': ['diplomats_pack', 'entertainers_pack'],
'bard instrument': 'musical instrument',
'bard alternate gp': '5d4*10',
}, base.dict_add)
self.wearing=['leather_armor', 'dagger']
spells=[
[
'dancing_lights', 'light', 'mage_hand', 'mending', 'message',
'minor_illusion', 'prestidigitation', 'true_strike',
]
]
class Sorcerer:
@staticmethod
def init(self, level, **kwargs):
note_class(self, 'Sorcerer', level)
Spellcaster.init(self, level, **kwargs)
base.set_methods(self, Sorcerer)
self.add_hit_dice(level, 6)
base.add(self, 'proficiencies', [
'dagger', 'dart', 'sling', 'quarterstaff', 'light_crossbow',
'constitution_saving_throw', 'charisma_saving_throw',
], base.union)
base.add(self, 'features', _Progression([
['sorcerous_origin', 'spellcasting'],
['font_of_magic'],
['metamagic'],
['ability_score_improvement'],
[],
['sorcerous_origin'],
[],
['ability_score_improvement'],
[],
['metamagic'],
[],
['ability_score_improvement'],
[],
['sorcerous_origin'],
[],
['ability_score_improvement'],
['metamagic'],
['sorcerous_origin'],
['ability_score_improvement'],
['sorcerous_restoration'],
], level), base.plus)
self.spellcasting_ability=lambda: self.charisma
if kwargs.get('new', False):
base.add(self, 'choices', {
'2 sorceror skills': [
'arcana', 'deception', 'insight', 'intimidation',
'persuasion', 'religion',
],
'sorceror weapon': items.simple_weapons,
'sorceror pack': ['dungeoneers_pack', 'explorers_pack'],
'sorceror junk': ['component_pouch', 'arcane_focus'],
'sorceror alternate gp': '3d4*10',
}, base.dict_add)
self.wearing=['dagger', 'dagger']
spells=[
[
'acid_splash', 'chill_touch', 'dancing_lights', 'light',
'mage_hand', 'mending', 'message', 'minor_illusion',
'prestidigitation', 'ray_of_frost', 'shocking_grasp', 'true_strike',
],
]
class Ranger:
@staticmethod
def init(self, level, **kwargs):
note_class(self, 'Ranger', level)
Standard.init(self, level, **kwargs)
base.set_methods(self, Ranger)
self.add_hit_dice(level, 10)
base.add(self, 'proficiencies', [
'light_armor', 'medium_armor', 'shield',
'dexterity_saving_throw', 'strength_saving_throw',
]+items.simple_weapons+items.martial_weapons, base.union)
base.add(self, 'features', _Progression([
['favored_enemy', 'natural_explorer'],
['spellcasting', 'fighting_style'],
['ranger_archetype', 'primeval_awareness'],
['ability_score_improvement'],
['extra_attack'],
['favored_enemy', 'natural_explorer'],
['ranger_archetype'],
['ability_score_improvement', 'lands_stride'],
[],
['natural_explorer', 'hide_in_plain_sight'],
['ranger_archetype'],
['ability_score_improvement'],
[''],
['favored_enemy', 'vanish'],
['ranger_archetype'],
['ability_score_improvement'],
[],
['feral_senses'],
['ability_score_improvement'],
['foe_slayer'],
], level), base.plus)
base.add(self, 'slots', [
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 0, 0, 0, 0, 0],
[4, 2, 0, 0, 0, 0, 0, 0, 0],
[4, 2, 0, 0, 0, 0, 0, 0, 0],
[4, 3, 0, 0, 0, 0, 0, 0, 0],
[4, 3, 0, 0, 0, 0, 0, 0, 0],
[4, 3, 2, 0, 0, 0, 0, 0, 0],
[4, 3, 2, 0, 0, 0, 0, 0, 0],
[4, 3, 3, 0, 0, 0, 0, 0, 0],
[4, 3, 3, 0, 0, 0, 0, 0, 0],
[4, 3, 3, 1, 0, 0, 0, 0, 0],
[4, 3, 3, 1, 0, 0, 0, 0, 0],
[4, 3, 3, 2, 0, 0, 0, 0, 0],
[4, 3, 3, 2, 0, 0, 0, 0, 0],
[4, 3, 3, 3, 1, 0, 0, 0, 0],
[4, 3, 3, 3, 1, 0, 0, 0, 0],
[4, 3, 3, 3, 2, 0, 0, 0, 0],
[4, 3, 3, 3, 2, 0, 0, 0, 0],
][level], lambda old, new: [old[i]+new[i] for i in range(9)])
self.spellcasting_ability=lambda: self.wisdom
if kwargs.get('new', False):
base.add(self, 'choices', {
'3 ranger skills': [
'animal_handling', 'athletics', 'insight', 'investigation',
'nature', 'perception', 'stealth', 'survival',
],
'ranger armor': ['scale_mail', 'leather_armor'],
'ranger weapon': items.simple_weapons,
'ranger weapon 2': items.simple_weapons,
'ranger pack': ['dungeoneers_pack', 'explorers_pack'],
'ranger alternate gp': '5d4*10',
}, base.dict_add)
if level>1: base.add(self, 'choices', {
'ranger spells': 1+level//2
}, base.dict_add)
self.wearing=['longbow']
self.carrying=['quiver']
class Barbarian:
@staticmethod
def init(self, level, **kwargs):
note_class(self, 'Barbarian', level)
Standard.init(self, level)
base.set_methods(self, Barbarian)
self.add_hit_dice(level, 12)
base.add(self, 'proficiencies', [
'light_armor', 'medium_armor', 'shield',
'strength_saving_throw', 'constitution_saving_throw',
]+items.martial_weapons, base.union)
base.add(self, 'features', _Progression([
['rage', 'unarmored_defense'],
['reckless_attack', 'danger_sense'],
['primal_path'],
['ability_score_improvement'],
['extra_attack', 'fast_movement'],
['primal_path'],
['feral_instinct'],
['ability_score_improvement'],
['brutal_critical'],
['primal_path'],
['relentless'],
['ability_score_improvement'],
['brutal_critical'],
['primal_path'],
['persistent_rage'],
['ability_score_improvement'],
['brutal_critical'],
['indomitable_might'],
['ability_score_improvement'],
['primal_champion'],
], level), base.plus)
self.rages=2
if level>=3: self.rages+=1
if level>=6: self.rages+=1
if level>=12: self.rages+=1
if level>=17: self.rages+=1
if level>=20: self.rages=float('inf')
self.rage_damage=2
if level>=9: self.rage_damage+=1
if level>=16: self.rage_damage+=1
if kwargs.get('new', False):
base.add(self, 'choices', {
'2 barbarian skills': [
'animal_handling', 'athletics', 'intimidation', 'nature',
'perception', 'survival',
],
'barbarian weapon': [i for i in items.martial_weapons if 'melee' in items.items[i]['type']],
'barbarian weapon 2': [{'handaxe': 2}]+items.simple_weapons,
}, base.dict_add)
self.carrying=items.explorers_pack+[{'javelins': 4}]
class Monk:
@staticmethod
def init(self, level, **kwargs):
note_class(self, 'Monk', level)
Standard.init(self, level)
base.set_methods(self, Monk)
self.add_hit_dice(level, 8)
base.add(self, 'proficiencies', [
'shortsword',
'strength_saving_throw', 'dexterity_saving_throw',
]+items.simple_weapons, base.union)
base.add(self, 'features', _Progression([
['martial_arts', 'unarmored_defense'],
['ki', 'unarmored_movement'],
['monastic_tradition', 'deflect_missiles'],
['ability_score_improvement', 'slow_fall'],
['extra_attack', 'stunning_strike'],
['ki_empowered_strikes', 'monastic_tradition'],
['evasion', 'stillness_of_mind'],
['ability_score_improvement'],
['unarmored_movement'],
['purity_of_body'],
['monastic_tradition'],
['ability_score_improvement'],
['tongue_of_sun_and_moon'],
['diamond_soul'],
['timeless_body'],
['ability_score_improvement'],
['monastic_tradition'],
['empty_body'],
['ability_score_improvement'],
['perfect_self'],
], level), base.plus)
self.martial_arts='d4'
if level>=5: self.martial_arts='d6'
if level>=11: self.martial_arts='d8'
if level>=17: self.martial_arts='d10'
self.ki_points=0
if level>=2: self.ki_points=level
self.unarmored_movement=0
if level>=2: self.unarmored_movement=10
if level>=6: self.unarmored_movement=15
if level>=10: self.unarmored_movement=20
if level>=14: self.unarmored_movement=25
if level>=18: self.unarmored_movement=30
if kwargs.get('new', False):
base.add(self, 'choices', {
'2 monk skills': [
'acrobatics', 'athletics', 'history', 'insight', 'religion',
'stealth',
],
'monk tools': "any artisan's tools or musical instrument",
'monk weapon': ['shortsword']+items.simple_weapons,
'monk pack': ['dungeoneers_pack', 'explorers_pack'],
'monk alternate gp': '5d4',
}, base.dict_add)
self.carrying=[{'darts': 10}]
class Paladin:
@staticmethod
def init(self, level, **kwargs):
note_class(self, 'Paladin', level)
Standard.init(self, level)
base.set_methods(self, Paladin)
self.add_hit_dice(level, 10)
base.add(self, 'proficiencies', [
'light_armor', 'medium_armor', 'heavy_armor', 'shield',
'wisdom_saving_throw', 'charisma_saving_throw',
]+items.simple_weapons+items.martial_weapons, base.union)
base.add(self, 'features', _Progression([
['divine_sense', 'lay_on_hands'],
['fighting_style', 'spellcasting', 'divine_smite'],
['divine_health', 'sacred_oath'],
['ability_score_improvement'],
['extra_attack'],
['aura_of_protection'],
['sacred_oath'],
['ability_score_improvement'],
[],
['aura_of_courage'],
['divine_smite'],
['ability_score_improvement'],
[],
['cleansing_touch'],
['sacred_oath'],
['ability_score_improvement'],
[],
['aura_improvements'],
['ability_score_improvement'],
['sacred_oath'],
], level), base.plus)
base.add(self, 'slots', [
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 0, 0, 0, 0, 0],
[4, 2, 0, 0, 0, 0, 0, 0, 0],
[4, 2, 0, 0, 0, 0, 0, 0, 0],
[4, 3, 0, 0, 0, 0, 0, 0, 0],
[4, 3, 0, 0, 0, 0, 0, 0, 0],
[4, 3, 2, 0, 0, 0, 0, 0, 0],
[4, 3, 2, 0, 0, 0, 0, 0, 0],
[4, 3, 3, 0, 0, 0, 0, 0, 0],
[4, 3, 3, 0, 0, 0, 0, 0, 0],
[4, 3, 3, 1, 0, 0, 0, 0, 0],
[4, 3, 3, 1, 0, 0, 0, 0, 0],
[4, 3, 3, 2, 0, 0, 0, 0, 0],
[4, 3, 3, 2, 0, 0, 0, 0, 0],
[4, 3, 3, 3, 1, 0, 0, 0, 0],
[4, 3, 3, 3, 1, 0, 0, 0, 0],
[4, 3, 3, 3, 2, 0, 0, 0, 0],
[4, 3, 3, 3, 2, 0, 0, 0, 0],
][level], lambda old, new: [old[i]+new[i] for i in range(9)])
self.spellcasting_ability=lambda: self.charisma
if kwargs.get('new', False):
base.add(self, 'choices', {
'2 paladin skills': [
'athletics', 'insight', 'intimidation', 'medicine',
'persuasion', 'religion',
],
'paladin weapon': items.martial_weapons,
'paladin shield': ['shield']+items.martial_weapons,
'paladin weapon 2': [{'javelin': 5}]+[i for i in items.simple_weapons if 'melee' in items.items[i]['type']],
'paladin pack': ['priests_pack', 'explorers_pack'],
'paladin alternate gp': '5d4',
}, base.dict_add)
self.wearing=['chain_mail']
self.carrying=['holy_symbol']
class Warlock:
@staticmethod
def init(self, level, **kwargs):
note_class(self, 'Warlock', level)
Standard.init(self, level)
base.set_methods(self, Warlock)
self.add_hit_dice(level, 8)
base.add(self, 'proficiencies', [
'light_armor',
'wisdom_saving_throw', 'charisma_saving_throw',
]+items.simple_weapons, base.union)
base.add(self, 'features', _Progression([
['otherworldly_patron', 'pact_magic'],
['eldritch_invocations'],
['pact_boon'],
['ability_score_improvement'],
[],
['otherworldly_patron'],
[],
['ability_score_improvement'],
[],
['otherworldly_patron'],
['mystic_arcanum'],
['ability_score_improvement'],
['mystic_arcanum'],
['otherworldly_patron'],
['mystic_arcanum'],
['ability_score_improvement'],
['mystic_arcanum'],
[],
['ability_score_improvement'],
['eldritch_master'],
], level), base.plus)
base.add(self, 'slots', [
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 2, 0, 0, 0, 0, 0],
[0, 0, 0, 2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 0, 0, 0, 0],
[0, 0, 0, 0, 3, 0, 0, 0, 0],
[0, 0, 0, 0, 3, 0, 0, 0, 0],
[0, 0, 0, 0, 3, 0, 0, 0, 0],
[0, 0, 0, 0, 3, 0, 0, 0, 0],
[0, 0, 0, 0, 3, 0, 0, 0, 0],
[0, 0, 0, 0, 3, 0, 0, 0, 0],
[0, 0, 0, 0, 4, 0, 0, 0, 0],
[0, 0, 0, 0, 4, 0, 0, 0, 0],
[0, 0, 0, 0, 4, 0, 0, 0, 0],
[0, 0, 0, 0, 4, 0, 0, 0, 0],
][level], lambda old, new: [old[i]+new[i] for i in range(9)])
if kwargs.get('new', False):
cantrips=2
if level>=4: cantrips+=1
if level>=10: cantrips+=1
base.add(self, 'choices', {'cantrips': cantrips}, base.dict_add)
self.known_spells=[0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15][level]
self.invocations=[0, 0, 2, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8][level]
self.spellcasting_ability=lambda: self.charisma
if kwargs.get('new', False):
base.add(self, 'choices', {
'2 warlock skills': [
'arcana', 'deception', 'history', 'intimidation',
'investigation', 'nature', 'religion',
],
'warlock weapon': [['light_crossbow', 'quiver']]+items.simple_weapons,
'warlock weapon 2': items.simple_weapons,
'warlock junk': ['component_pouch', 'arcane_focus'],
'warlock pack': ['scholars_pack', 'dungeoneers_pack'],
'warlock alternate gp': '4d4*10',
}, base.dict_add)
self.wearing=['leather_armor', 'dagger', 'dagger']
spells=[
[
'chill_touch', 'mage_hand', 'minor_illusion', 'prestidigitation',
'true_strike',
],
]
|
from math import *
import numpy as np
import sys
def DTW(A, B, window = sys.maxsize, d = lambda x,y: abs(x-y)):
# create the cost matrix
A, B = np.array(A), np.array(B)
M, N = len(A), len(B)
cost = sys.maxsize * np.ones((M, N))
##
'''
for i in range(0,M):
print(A[i]," ",end='')
print("\n")
for i in range(0,N):
print(B[i]," ",end='')
print("\n")
'''
##
# initialize the first row and column
cost[0, 0] = d(A[0], B[0])
for i in range(1, M):
cost[i, 0] = cost[i-1, 0] + d(A[i], B[0])
for j in range(1, N):
cost[0, j] = cost[0, j-1] + d(A[0], B[j])
# fill in the rest of the matrix
for i in range(1, M):
for j in range(max(1, i - window), min(N, i + window)):
choices = cost[i - 1, j - 1], cost[i, j-1], cost[i-1, j]
cost[i, j] = min(choices) + d(A[i], B[j])
##
'''
for i in range(0,M):
for j in range(0,N):
print(cost[i][j]," ",end='')
print('\n')
##
'''
# find the optimal path
n, m = N - 1, M - 1
path = []
while (m, n) != (0, 0):
path.append((m, n))
m, n = min((m - 1, n), (m, n - 1), (m - 1, n - 1), key = lambda x: cost[x[0], x[1]])
#print("m:",m," n:",n)
path.append((0,0))
return cost[-1, -1], path
def main():
A = np.array([1,2,3,4,2,3,1,5,3,4,2,4,3])
B = np.array([70,80,50,90,110,90,20,30,25,45,79,57,30])
for i in range(0,len(B)):
B[i] = B[i] / 10
#A = [1,2,3,4,2,3]
#B = [7,8,5,9,11,9,2,3]
cost, path = DTW(A, B, window = 100)
print('Total Distance is ', cost)
import matplotlib.pyplot as plt
offset = 5
#plt.xlim([-1, max(len(A), len(B)) + 1])
plt.plot(A)
plt.plot(B + offset)
for (x1, x2) in path:
plt.plot([x1, x2], [A[x1], B[x2] + offset])
plt.show()
if __name__ == '__main__':
main()
|
# from demopackage import data
from pkgutil import get_data
with open('demopackage/data/datafile.txt') as f:
for line in f:
try:
print(line)
except ValueError:
print('nothing')
# name of the pakage, relative path
data = get_data('demopackage', 'data/datafile.txt')
print(data)
list = data.splitlines()
print(list)
# unicode text string
data2 = get_data('demopackage', 'data/datafile.txt').decode('utf8')
print(data2)
list = data2.splitlines()
print(list)
with open('demopackage/data/datafile.txt') as f:
list = [line.split(',') for line in f.readlines()]
print(list)
with open('demopackage/data/datafile.txt') as f:
list = []
for line in f:
list.append(line)
print(list)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressionFilterModel(Model):
"""ExpressionFilterModel.
:param clauses: Flat list of clauses in this subscription
:type clauses: list of :class:`ExpressionFilterClause <notification.v4_0.models.ExpressionFilterClause>`
:param groups: Grouping of clauses in the subscription
:type groups: list of :class:`ExpressionFilterGroup <notification.v4_0.models.ExpressionFilterGroup>`
:param max_group_level: Max depth of the Subscription tree
:type max_group_level: int
"""
_attribute_map = {
'clauses': {'key': 'clauses', 'type': '[ExpressionFilterClause]'},
'groups': {'key': 'groups', 'type': '[ExpressionFilterGroup]'},
'max_group_level': {'key': 'maxGroupLevel', 'type': 'int'}
}
def __init__(self, clauses=None, groups=None, max_group_level=None):
super(ExpressionFilterModel, self).__init__()
self.clauses = clauses
self.groups = groups
self.max_group_level = max_group_level
|
from __future__ import unicode_literals
import unittest
import mock
import requests
import time
from requests_oauthlib import OAuth2Session
from requests_oauthlib.compliance_fixes import facebook_compliance_fix
from requests_oauthlib.compliance_fixes import linkedin_compliance_fix
from requests_oauthlib.compliance_fixes import mailchimp_compliance_fix
from requests_oauthlib.compliance_fixes import weibo_compliance_fix
class FacebookComplianceFixTest(unittest.TestCase):
def test_fetch_access_token(self):
facebook = OAuth2Session('foo', redirect_uri='https://i.b')
facebook = facebook_compliance_fix(facebook)
facebook.post = mock.MagicMock()
response = requests.Response()
response.status_code = 200
response.request = mock.MagicMock()
response._content = 'access_token=urlencoded'.encode('UTF-8')
response.headers['Content-Type'] = 'text/plain'
facebook.post.return_value = response
token = facebook.fetch_token('https://mocked.out',
client_secret='bar',
authorization_response='https://i.b/?code=hello')
self.assertEqual(token, {'access_token': 'urlencoded', 'token_type': 'Bearer'})
class LinkedInComplianceFixTest(unittest.TestCase):
def test_fetch_access_token(self):
linkedin = OAuth2Session('foo', redirect_uri='https://i.b')
linkedin = linkedin_compliance_fix(linkedin)
linkedin.post = mock.MagicMock()
response = requests.Response()
response.status_code = 200
response.request = mock.MagicMock()
response._content = '{"access_token":"linkedin"}'.encode('UTF-8')
linkedin.post.return_value = response
token = linkedin.fetch_token('https://mocked.out',
client_secret='bar',
authorization_response='https://i.b/?code=hello')
self.assertEqual(token, {'access_token': 'linkedin', 'token_type': 'Bearer'})
class MailChimpComplianceFixTest(unittest.TestCase):
def test_fetch_access_token(self):
mailchimp = OAuth2Session('foo', redirect_uri='https://i.b')
mailchimp = mailchimp_compliance_fix(mailchimp)
mailchimp.post = mock.MagicMock()
response = requests.Response()
response.status_code = 200
response.request = mock.MagicMock()
response._content = '{"access_token":"mailchimp", "expires_in":0, "scope":null}'.encode('UTF-8')
mailchimp.post.return_value = response
token = mailchimp.fetch_token('https://mocked.out',
client_secret='bar',
authorization_response='https://i.b/?code=hello')
# Times should be close
approx_expires_at = time.time() + 3600
actual_expires_at = token.pop('expires_at')
self.assertAlmostEqual(actual_expires_at, approx_expires_at, places=2)
# Other token values exact
self.assertEqual(token, {'access_token': 'mailchimp', 'expires_in': 3600})
# And no scope at all
self.assertFalse('scope' in token)
class WeiboComplianceFixTest(unittest.TestCase):
def test_fetch_access_token(self):
weibo = OAuth2Session('foo', redirect_uri='https://i.b')
weibo = weibo_compliance_fix(weibo)
weibo.post = mock.MagicMock()
response = requests.Response()
response.status_code = 200
response.request = mock.MagicMock()
response._content = '{"access_token":"weibo"}'.encode('UTF-8')
weibo.post.return_value = response
token = weibo.fetch_token('https://mocked.out',
client_secret='bar',
authorization_response='https://i.b/?code=hello')
self.assertEqual(token, {'access_token': 'weibo', 'token_type': 'Bearer'})
|
import argparse
from aoe2_image_gen.generator import aoe2_image_gen
def main():
function_map = {
"villagers": aoe2_image_gen.generate_villager_dataset,
"multi_label": aoe2_image_gen.generate_multi_label_dataset,
}
parser = argparse.ArgumentParser(
description="Generate machine learning datasets using the Age of Empires 2 map editor running under steam."
)
parser.add_argument("command", choices=function_map.keys())
parser.add_argument(
"-n",
type=int,
nargs=1,
default=[5],
help="Number of images to generate in the dataset.",
)
parser.add_argument(
"-v",
"--visible",
action="store_true",
default=False,
help="Start in a visible window, otherwise it runs in a virtual frame buffer.",
)
args = parser.parse_args()
resolution = (1024, 768)
argument_function = function_map[args.command]
argument_function(numberOfImages=args.n[0], resolution=resolution, visible=args.visible)
if __name__ == "__main__":
main()
|
from django.contrib import admin
from .models import Usuario
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserChangeForm
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = Usuario
class MyUserAdmin(UserAdmin):
form = MyUserChangeForm
fieldsets = UserAdmin.fieldsets + (
(None, {'fields': ('persona',)}),
)
# Register your models here.
admin.site.register(Usuario,MyUserAdmin)
|
import time
import re
import datetime
import plotly.graph_objs as go
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def check_score(old_score):
pass
def Play():
driver.get('http://www.cl.cam.ac.uk/~yf261/2048/')
body = driver.find_element_by_tag_name('body')
t0 = datetime.datetime.now()
score = 0
delay = 0
while True:
old_score = score
try:
rel = driver.find_element_by_xpath('.//span[@class = "rel"]')
except:
delay = 0
else:
try:
if rel.text == "Relationship":
delay = 0.5
else:
delay = 0
except:
pass
time.sleep(delay)
body.send_keys(Keys.ARROW_DOWN)
time.sleep(delay)
body.send_keys(Keys.ARROW_LEFT)
time.sleep(delay)
body.send_keys(Keys.ARROW_DOWN)
time.sleep(delay)
body.send_keys(Keys.ARROW_RIGHT)
for elem in driver.find_elements_by_class_name("score-container"):
try:
score = int(elem.text)
except:
continue
if old_score - score >= 0:
delta = datetime.datetime.now() - t0
if delta.seconds > 11:
body.send_keys(Keys.ARROW_UP)
else:
t0 = datetime.datetime.now()
try:
driver.find_element_by_link_text("Try again")
except:
pass
else:
print "End"
return score
driver = webdriver.Chrome("/home/matthias/bin/chromedriver/chromedriver")
scores = []
# encoding: utf-8
import plotly.plotly as py
import plotly.graph_objs as py_gr
import plotly.tools as py_tls
maxpoints = 10000
streamit = False
stream =[]
if streamit:
stream_id = "" # Put your own token
trace1 = py_gr.Scatter(
x=[],
y=[],
xaxis=dict(title="Game"),
yaxis=dict(title="Score"),
mode='line',
name="Game",
stream=py_gr.Stream(
token=stream_id,
maxpoints=maxpoints
)
)
data = go.Data([trace1])
layout = go.Layout(title='Martins stupid PhD algorithm')
fig = go.Figure(data=data, layout=layout)
py.plot(fig, auto_open=False)
stream = py.Stream(stream_id)
stream.open()
for game in range(maxpoints):
print("game:", game)
scores.append(Play())
print(scores[-1])
if streamit:
stream.write(
dict(
x=game,
y=scores[-1]
)
)
if streamit:
stream.close()
print(scores)
|
import random
from math import log
def primecheck(prime):
v = 1
n = prime - 1
s = 0
while n%2 ==0:
n = n/2
s = s + 1
pot = 100.00
a = random.SystemRandom().randrange(2, int(pot))
for z in range(2, int(pot)):
prtest = False
for w in range(s):
a = z
binp = bin((2**w)*n)
sig = 1
for x in range(len(binp)-2):
a = (a*a)%prime
if binp[len(binp) - (x+1)] is '1':
sig = sig * (a)
if (sig%prime) == 1 or prime-(sig%prime) == 1:
prtest = True
break
return prtest |
import vanilla
from sys import getsizeof
from defconAppKit.windows.baseWindow import BaseWindowController
from lib.scripting.codeEditor import CodeEditor
try:
from plistlib import writePlistToString
except ImportError:
from plistlib import dumps as writePlistToString
from knownKeys import known_keys
class UFOCleaner(BaseWindowController):
def __init__(self):
self._font = CurrentFont()
self._libkeys = []
self._key_contents = {}
self._seen_keys = []
self._known_keys = known_keys
self._key_sizes = {}
self.total_size = 0
self._collect_keys()
self._build_ui()
self._update_total_size_display()
def _build_ui(self):
columnDescriptions = [
{"title": "Delete",
"cell": vanilla.CheckBoxListCell(),
"width": 40},
{"title": "Description",
"typingSensitive": True,
"editable": False,
"width": 210},
{"title": "Size",
"typingSensitive": True,
"editable": False,
"width": 60},
{"title": "Key",
"typingSensitive": True,
"editable": True,
"width": 220},
{"title": "Location",
"typingSensitive": True,
"editable": False,
"width": 40},
]
self._width = 640
self._height = 300
self.w = vanilla.Window((self._width, self._height), "UFO Cleaner", (self._width, self._height))
self.w.key_list = vanilla.List((10, 9, -10, -40),
self._libkeys,
columnDescriptions=columnDescriptions,
drawFocusRing=True,
#editCallback=self._setDecompose,
doubleClickCallback=self._open_sheet,
)
#self.w.xml = CodeEditor((10, -130, -10, -40), "", lexer="xml")
self.w.total_size = vanilla.TextBox((10, -30 , 240, 20), "")
self._update_total_size_display()
self.w.action_button = vanilla.Button((-200, -30 , -10, 20), "Delete checked items from UFO",
callback=self._clean_ufo,
sizeStyle="small",
)
self._sheet = False
self.setUpBaseWindowBehavior()
self.w.open()
def _collect_keys(self):
self._seen_keys = []
self._libkeys = []
self._key_contents = {}
self._key_sizes = {}
if self._font is not None:
# Font lib
for k in self._font.lib.keys():
if not k in self._seen_keys:
self._libkeys.append({
"Delete": False,
"Description": self._known_keys.get(k, "(%s)" % k.split(".")[-1]),
"Size": "? kB",
"Key": k,
"Location": "Font",
})
self._seen_keys.append(k)
self._key_contents[k] = ""
self._key_contents[k] += writePlistToString(self._font.lib[k])[173:-9].decode("utf-8")
self._key_sizes[k] = len(self._key_contents[k])
# Glyph libs
for g in self._font:
for k in g.lib.keys():
if not k in self._seen_keys:
self._libkeys.append({
"Delete": False,
"Description": self._known_keys.get(k, "(%s)" % k.split(".")[-1]),
"Size": "? kB",
"Key": k,
"Location": "Glyph",
})
self._seen_keys.append(k)
self._key_contents[k] = ""
self._key_contents[k] += writePlistToString(g.lib[k])[173:-9].decode("utf-8")
self._key_sizes[k] = len(self._key_contents[k])
# Collect key sizes
total_size = 0
for i in range(len(self._libkeys)):
_key = self._libkeys[i]
size = self._key_sizes[_key["Key"]]
total_size += size
if size < 1024:
_key["Size"] = "%i B" % size
else:
_key["Size"] = "%0.1f kB" % (size / 1024)
self.total_size = total_size
def _open_sheet(self, sender):
self.s = vanilla.Sheet((self._width-50, 220), self.w, (self._width-50, 220))
#self.s.contents = vanilla.EditText((10, 10, -10, -40), continuous=False, sizeStyle="small")
self.s.contents = CodeEditor((10, 10, -10, -40), "", lexer="xml")
self.s.ok_button = vanilla.Button((-80, -30 , -10, 20), "OK",
callback=self._close_sheet,
sizeStyle="small",
)
_key = sender.get()[sender.getSelection()[0]] #["Key"]
self.s.contents.set(self._key_contents[_key["Key"]])
self._sheet = True
self.s.open()
def _close_sheet(self, sender):
self._sheet = False
self.s.close()
def _clean_ufo(self, sender):
#print "Cleaning UFO ..."
keys_to_delete = [k["Key"] for k in self.w.key_list.get() if k["Delete"]]
#print keys_to_delete
for k in self._font.lib.keys():
if k in keys_to_delete:
del self._font.lib[k]
self._font.update()
for g in self._font:
found = False
for k in g.lib.keys():
if k in keys_to_delete:
found = True
del g.lib[k]
if found:
g.update()
self._collect_keys()
self._update_total_size_display()
self.w.key_list.set(self._libkeys)
def _update_total_size_display(self):
self.w.total_size.set("Total library size: %0.1f kB" % (self.total_size / 1024))
def windowCloseCallback(self, sender):
#if self._sheet:
# self.s.close()
super(UFOCleaner, self).windowCloseCallback(sender)
OpenWindow(UFOCleaner)
|
# controlling the browser using the selenium module
# pip install selenium
# we must have Firefox and the geckodriver (geckodriver.exe must be added to the PATH)
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('https://automatetheboringstuff.com')
# assigning an element ( in this case a link) on the page, based on its CSS selector, to a variable.
element = browser.find_element_by_css_selector('.main > div:nth-child(1) > ul:nth-child(19) > li:nth-child(1) > a:nth-child(1)')
# executing action on the element
element.click()
elems = browser.find_elements_by_css_selector('p') #this returns a list of all the elements that match that selector, in this case <p></p>
print( len(elems) ) # amount of elements in the list
browser.get('https://stackoverflow.com')
# finding elements and sending data
'''
this could be used for a comment section
to fill user and passwords fields etc
in this case is a search element
'''
searchElem = browser.find_element_by_css_selector('#search > div > input')
searchElem.send_keys('selenium') # look for selenium on stackoverflow
searchElem.submit()
# playing around with the browser
browser.refresh()
browser.back()
browser.forward()
# browser.quit()
# grabing text
browser.get('https://automatetheboringstuff.com')
var = browser.find_element_by_css_selector('p') # the first paragraph
print( var.text ) |
# -*-coding:utf-8 -*-
#对需要的一些功能进行封装
import hashlib
from .wrappers import *
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from normal_user.models import *
import os
import time
import re
import shutil
# 密码加密
def password_encryption(password):
# 创建加密对象
sha = hashlib.sha256()
# 对明文加密
new_password = 'helloworld' + password
sha.update(new_password.encode('utf-8'))
# 返回密文
return sha.hexdigest()
#CheckSum 加密
def CheckSum_en(str):
sha = hashlib.sha1()
sha.update(str.encode('utf-8'))
return sha.hexdigest()
# 用户是否登录
def login_permission(view_func):
def wrapper(request, *args, **kwargs):
# 检查用户是否登录
if get_session(request, 'user_name') and get_session(request, 'user_id'):
# 如果登录则执行视图函数
#获取登录信息
return view_func(request, *args, **kwargs)
else:
# 如果没有登录,跳转到登录页面
return redirect(reverse('users:login'))
return wrapper
#用户是否登录后进入登录页
def is_login(view_func):
def wrapper(request, *args, **kwargs):
# 检查用户是否登录
if get_session(request, 'user_name') and get_session(request, 'user_id'):
# 如果登录则执行视图函数
return redirect(reverse('users:index'))
else:
# 如果没有登录,跳转到登录页面
return view_func(request, *args, **kwargs)
return wrapper
#创建保存图片目录
def makeNowdir(src):
old_src = re.match('/(static/upload/(.*))',src).group(1)
img_src = re.match('/(static/upload/(.*))', src).group(2)
localstime = time.localtime(time.time())
#设置根目录
g_src= 'static/up_img/'
new_src = g_src+str(localstime[0])+'/'+str(localstime[1])
#判断目录是否存在
if os.path.exists(g_src+'/'+str(localstime[0])):
if os.path.exists(new_src):
move_img(old_src,new_src)
else:
os.makedirs(new_src)
move_img(old_src, new_src)
else:
os.makedirs(new_src)
move_img(old_src, new_src)
return new_src
#转移图片
def move_img(src,new_src):
try:
shutil.move(src,new_src)
except Exception as e:
return new_src
|
"""
CS241 Homework 03
Written by Chad Maceth
"""
# Start with empty lists
odd_numbers = []
even_numbers = []
# Loop until 0 is entered
number = 1
while (number != 0):
number = int(input("Enter a number (0 to quit): "))
# If number is even (and not 0) then add to even list
# if number is odd then add to odd list
if (number != 0 and number % 2 == 0):
even_numbers.append(number)
elif (number % 2 == 1):
odd_numbers.append(number)
# Print out both lists
print()
print("Even numbers:")
for number in even_numbers:
print(number)
print()
print("Odd numbers:")
for number in odd_numbers:
print(number)
|
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('demo', help='demo how to use argparse')
args = parser.parse_args()
print args.demo
#run: python argparse_usage.py Test
#output: Test
|
import os
import pickle
import tensorflow as tf
from utils import export_dicts_helper, load_vocab
class EvalDataLoader:
def __init__(self, path_to_vocab_pickle, path_to_data):
"""
This class is used to load evaluation data and export data to TfRecords.
Important Note: Modifies vocab files!
During processing new words are added to the vocabulary,
therefore they need to be exported again.
:param path_to_vocab_pickle: string
:param path_to_data path: to a txt file with scws evaluation data
"""
self.word2index = pickle.load(open(path_to_vocab_pickle, "rb"))
self.n_words = len(self.word2index)
self.examples = self.parse_eval_data(path_to_data)
print(len(self.word2index))
@staticmethod
def parse_eval_data(path_to_data):
"""
:param path_to_data: path to a txt file with scws evaluation data
:return: list of dicts (example) corresponding to one line of the dataset.
example['idx'] = int(id)
example['word1'] = string
example['word2'] = string
example['pos1'] = string
example['pos2'] = string
example['avg_rating'] = float, average human rating of relation
example["sentence1"] = list of tokens from parsed first sentence without <b>,</b> tokens
example["sentence2"] = list of tokens from parsed second sentence without <b>,</b> tokens
example["word1idx"] = word1 index in the example["sentence1"] after parsing
example["word2idx"] = word2 index in the example["sentence2"] after parsing
"""
_file = open(path_to_data, "r")
examples = []
for line in _file:
example = dict()
_split = line.lower().split("\t")
example['idx'] = int(_split[0])
example['word1'] = _split[1]
example['word2'] = _split[3]
example['pos1'] = _split[2]
example['pos2'] = _split[4]
example['avg_rating'] = float(_split[7])
seq1 = _split[5].split(" ")
seq2 = _split[6].split(" ")
for i, w in enumerate(seq1):
if w == example['word1'] and seq1[i - 1] == "<b>":
# position in the sentence
example["word1idx"] = i - 1 # we are about to remove <b> and </b>
break
seq1.remove('<b>')
seq1.remove('</b>')
for i, w in enumerate(seq2):
if w == example['word2'] and seq2[i - 1] == "<b>":
# position in the sentence
example["word2idx"] = i - 1 # we are about to remove <b> and </b>
break
seq2.remove('<b>')
seq2.remove('</b>')
example["sentence1"] = seq1
example["sentence2"] = seq2
examples.append(example)
return examples
def add_word(self, word):
"""
Helper for building the dictionary
:param word: string
:return: int(id) for the word
"""
old_n_words = self.n_words
self.word2index[word] = self.n_words
self.n_words += 1
return old_n_words
def sequence_to_tf_example(self, example):
"""
:param example: dict
example['idx'] = int(id)
example['word1'] = string
example['word2'] = string
example['pos1'] = string
example['pos2'] = string
example['avg_rating'] = float, average human rating of relation
example["sentence1"] = list of tokens from parsed first sentence without <b>,</b> tokens
example["sentence2"] = list of tokens from parsed second sentence without <b>,</b> tokens
example["word1idx"] = word1 index in the example["sentence1"] after parsing
example["word2idx"] = word2 index in the example["sentence2"] after parsing
:return: TensorFlow Record serialized representation of example above.
Without pos1 and pos2
example[length1] and example[length2] contains corresponding sequence lengths
"""
ex = tf.train.SequenceExample()
# A non-sequential feature of our example
not_found = 0
sequence_1 = []
sequence_2 = []
ex.context.feature["idx"].int64_list.value.append(example["idx"])
for word in example["sentence1"]:
try:
sequence_1.append(self.word2index[word])
except KeyError:
sequence_1.append(self.add_word(word))
for word in example["sentence2"]:
try:
sequence_2.append(self.word2index[word])
except KeyError:
sequence_2.append(self.add_word(word))
sequence_length_1 = len(sequence_1) # list of word ids
sequence_length_2 = len(sequence_2) # list of sense ids
# example id
ex.context.feature["length1"].int64_list.value.append(sequence_length_1)
ex.context.feature["length2"].int64_list.value.append(sequence_length_2)
# position in the sentence
ex.context.feature["word1idx"].int64_list.value.append(example["word1idx"])
ex.context.feature["word2idx"].int64_list.value.append(example["word2idx"])
try:
ex.context.feature["word1"].int64_list.value.append(self.word2index[example["word1"]])
except KeyError:
ex.context.feature["word1"].int64_list.value.append(self.add_word(example["word1"]))
try:
ex.context.feature["word2"].int64_list.value.append(self.word2index[example["word2"]])
except KeyError:
ex.context.feature["word2"].int64_list.value.append(self.add_word(example["word2"]))
ex.context.feature["avg_rating"].float_list.value.append(example["avg_rating"])
# Feature lists for the two sequential features of our example
fl_tokens_1 = ex.feature_lists.feature_list["sentence1"]
fl_tokens_2 = ex.feature_lists.feature_list["sentence2"]
for token in sequence_1:
fl_tokens_1.feature.add().int64_list.value.append(token)
for token in sequence_2:
fl_tokens_2.feature.add().int64_list.value.append(token)
return ex, not_found
def serialize_examples(self):
"""
Exports the dataset to tf records.
:return:
"""
valid_record_filename = 'scws_records/valid/scws_valid.tfrecord'
test_record_filename = 'scws_records/test/scws_test.tfrecord'
test_record = open(test_record_filename, 'w')
valid_record = open(valid_record_filename, 'w')
writer_test = tf.python_io.TFRecordWriter(test_record.name)
writer_valid = tf.python_io.TFRecordWriter(valid_record.name)
not_found = 0
for i, ex in enumerate(self.examples):
# print(sentence[0], sentence[1])
example, nf = self.sequence_to_tf_example(ex)
not_found += nf
if i % 5 == 0:
writer_valid.write(example.SerializeToString())
else:
writer_test.write(example.SerializeToString())
print("Saved eval data to TFRecords. Not found: ", not_found)
if __name__ == "__main__":
vocab_pickle = \
"/Users/daniel/Desktop/Research/WSD_Data/ontonotes-release-5.0/api/corpus/vocab/pickles/word2index.pickle"
scws_path = "/Users/daniel/Desktop/Research/WSD_Data/ontonotes-release-5.0/api/SCWS/ratings.txt"
eval_data_loader = EvalDataLoader(vocab_pickle, scws_path)
eval_data_loader.serialize_examples()
export_dicts_helper(eval_data_loader.word2index, dicts_dir="corpus", type="vocab",
idx2token_path="index2word", token2idx_path="word2index", write_pickle=True)
|
#Roman Sokolovski
#Assignment4
#10185440
import WordLookup
def oneLetterDiff(word):
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' ]
newList = []
wordList = []
for i in range(len(word)): #loops for len of word
for letter in range(len(alphabet)): #loops 25 times len(alphabet)
newList = word.replace(word[i], alphabet[letter])#replaces letters in word with alphabet
if WordLookup.lookup(newList) == True and newList != word: #checks if new words are legit
wordList.append(newList)#adds to new list if words are reald
return wordList
def wordPath(start, end, steps):
#-----------------------Error Checking-------------------
if (steps < 0): #checks for negative steps
return None
if (len(start) != len(end)): #makes sure both words are same len
return None
for i in range(len(start)): #capital letters check
if(start[i].isupper() or end[i].isupper()):
return None
if (WordLookup.lookup(start) == False or WordLookup.lookup(end) == False): #checks if words are legit
return None
#--------------------------End of Error Checking---------------
if (steps == 0): #if steps is 0
if (start == end):
return [start]
else:
return None
else:#steps is greater than 0
nextWord = oneLetterDiff(start)
for newWord in nextWord:
nextPath = wordPath(newWord, end, steps - 1)
if (nextPath != None):
return [start] + nextPath
def allPath(start, end, steps, finalList=None):
return None
|
#!/usr/bin/env python
#
# integration_test.py
#
# This is a system test that uses a bridge (via the linux socat utility) to
# connect a fake tty device with the real system.
#
# TODO: Complete this script
# TODO: Uart Bridge should probably be called something else
#
# Author(s): Kenny Luong <luong97@hawaii.edu>
# Date Created: 2017-09-26
# Date Modified: 2017-10-03
#
import subprocess
import signal
import sys
import time
import os
from tty_bridge import TTYBridge
from threading import Thread
from fake_xbee import FakeXbee
include_path = os.path.dirname(os.path.realpath(__file__)) + "/../src"
sys.path.insert(0, include_path)
from xbee_gateway import XBeeGateway
from decoder import Decoder
test_packet_proc = None
# Setup this handler so that the script ends gracefully
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
signal.signal(signal.SIGINT, signal_handler)
#
# SETUP UART BRIDGE
#
# The UART bridge will open up two connections;
# one on ttyV1 and the other on ttyV2
#
# Here is an example data flow:
#
# |Fake Device| --> ttyV1 --> SOCAT Bridge --> ttyV2 --> |packet tester|
#
#
tty_bridge = TTYBridge()
tty_bridge.start()
print("Waiting for the UART bridge to come up...")
time.sleep(2)
#
# SETUP FAKE DEVICE
#
def start_test_packet():
fake_xbee = FakeXbee('./ttyV1')
fake_xbee.connect()
fake_xbee.start_loop()
test_packet_thread = Thread(target=start_test_packet)
test_packet_thread.start()
#
# SETUP PACKET TESTER
#
# This is the python script that normally runs on the laptop
#
# subprocess.call("cd ../src && python packet_tester.py", shell=True)
def print_data(data, timestamp):
print(timestamp)
print(data)
decoder = Decoder()
decoder.register_callback(decoder.print_dictionary)
gateway = XBeeGateway()
gateway.register_callback(decoder.decode_data)
gateway.setup_xbee('./ttyV2', 9600)
gateway.begin_loop()
#
# BLOCK UNTIL CONTROL-C
#
test_packet_thread.join()
tty_bridge.wait()
|
##### 解けた #####
import numpy as np
import math
N=int(input())
A=np.asarray(list(map(int,input().split(" ")))) # ソフトのリスト
print(math.ceil(A[A>0].mean())) |
import pyfits as pf
import h5py
import numpy as np
import kmeans_radec
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
import treecorr
import seaborn as sns
shear_data = ["../../redsequence/data/KiDS_DR3.1_G9_ugri_shear.fits",
"../../redsequence/data/KiDS_DR3.1_G12_ugri_shear.fits",
"../../redsequence/data/KiDS_DR3.1_G15_ugri_shear.fits",
"../../redsequence/data/KiDS_DR3.1_G23_ugri_shear.fits",
"../../redsequence/data/KiDS_DR3.1_GS_ugri_shear.fits"]
kids_cat = pf.open("../../redsequence/data/KiDS_DR3.1_G9_ugri_shear.fits")
e1 = kids_cat[1].data['e1']
e2 = kids_cat[1].data['e2']
w = kids_cat[1].data['weight']
z_b = kids_cat[1].data['Z_B']
sg = kids_cat[1].data['SG_FLAG']
mask = kids_cat[1].data['MASK']
fitclass = kids_cat[1].data['fitclass']
ra = kids_cat[1].data['RAJ2000']
dec = kids_cat[1].data['DECJ2000']
snr = kids_cat[1].data['model_SNratio']
rm = kids_cat[1].data['bias_corrected_scalelength']
psf1 = kids_cat[1].data['PSF_e1']
psf2 = kids_cat[1].data['PSF_e2']
magr = kids_cat[1].data['MAG_r']
bias = kids_cat[1].data['m']
mask = kids_cat[1].data['MASK']
flag = kids_cat[1].data['Flag']
#sns.distplot(kids_cat[1].data['MASK'], kde= False)
#plt.xscale("log")
#plt.savefig("/home/vakili/public_html/mask.png")
#plt.close()
#sns.distplot(kids_cat[1].data['Flag'], kde= False)
#plt.savefig("/home/vakili/public_html/flag.png")
#plt.close()
star_mask = np.where((sg==1)&(magr>20)&(fitclass==0)&(rm>0.5)&(mask==0)&(flag==0))[0]
e1, e2, w, z_b, ra, dec , snr, rm , psf1, psf2, magr, bias = e1[star_mask], e2[star_mask], w[star_mask], z_b[star_mask], ra[star_mask], dec[star_mask], snr[star_mask],rm[star_mask], psf1[star_mask], psf2[star_mask], magr[star_mask], bias[star_mask]
for i in range(1,5):
kids_cat = pf.open(shear_data[i])
e1c = kids_cat[1].data['e1']
e2c = kids_cat[1].data['e2']
wc= kids_cat[1].data['weight']
z_bc = kids_cat[1].data['Z_B']
sgc = kids_cat[1].data['SG_FLAG']
maskc = kids_cat[1].data['MASK']
fitclassc = kids_cat[1].data['fitclass']
rac = kids_cat[1].data['RAJ2000']
decc = kids_cat[1].data['DECJ2000']
snrc = kids_cat[1].data['model_SNratio']
rmc = kids_cat[1].data['bias_corrected_scalelength']
psf1c = kids_cat[1].data['PSF_e1']
psf2c = kids_cat[1].data['PSF_e2']
magrc = kids_cat[1].data['MAG_r']
biasc = kids_cat[1].data['m']
maskc = kids_cat[1].data['MASK']
flagc = kids_cat[1].data['Flag']
star_maskc = np.where((sgc==1)&(magrc>20)&(fitclassc==0)&(maskc==0)&(flagc==0))[0]
e1c, e2c, wc, z_bc, rac, decc, snrc, rmc, psf1c, psf2c , magrc, biasc = e1c[star_maskc], e2c[star_maskc], wc[star_maskc], z_bc[star_maskc], rac[star_maskc], decc[star_maskc], snrc[star_maskc], rmc[star_maskc], psf1c[star_maskc], psf2c[star_maskc], magrc[star_maskc], biasc[star_maskc]
e1 = np.hstack([e1,e1c])
e2 = np.hstack([e2,e2c])
w = np.hstack([w,wc])
z_b = np.hstack([z_b,z_bc])
ra = np.hstack([ra,rac])
dec = np.hstack([dec,decc])
snr = np.hstack([snr,snrc])
rm = np.hstack([rm,rmc])
psf1 = np.hstack([psf1,psf1c])
psf2 = np.hstack([psf2,psf2c])
bias = np.hstack([bias,biasc])
magr = np.hstack([magr,magrc])
kids_cat[1].header
for i in range(4):
print i
result_file = h5py.File("source_zb_"+str(0.1+i*(0.2))+"_"+str(0.1+(i+1)*(0.2))+".h5" , 'w')
zmask = (z_b>0.1+i*(0.2))&((z_b<0.1+(i+1)*(0.2))|(z_b==0.1+(i+1)*(0.2)))
ns = len(ra[zmask])
result_file.create_dataset("ra", (ns, ) , data = ra[zmask])
result_file.create_dataset("dec",(ns, ) , data = dec[zmask])
result_file.create_dataset("e1", (ns, ) , data = e1[zmask])
result_file.create_dataset("e2", (ns, ) , data = e2[zmask])
result_file.create_dataset("zb", (ns, ) , data = z_b[zmask])
result_file.create_dataset("w", (ns, ) , data = w[zmask])
result_file.create_dataset("rad", (ns,) , data= rm[zmask])
result_file.create_dataset("snr", (ns ,) , data = snr[zmask])
result_file.create_dataset("bias", (ns ,) , data = bias[zmask])
result_file.close()
for i in range(3):
print i
result_file = h5py.File("source_zb_"+str(0.4+i*(0.2))+"_0.9.h5" , 'w')
bad_north = ((dec>-10)&(((ra>145)&(ra<171))|((ra>195)&(ra<210))|(ra>227)))
bad_south = (dec<-28)&(ra<30)
bad = (bad_south)|(bad_north)
zmask = (z_b>0.4+i*(0.2))&((z_b<0.9)|(z_b==0.9))
zmask = zmask&(~bad)
ns = len(ra[zmask])
result_file.create_dataset("ra", (ns, ) , data = ra[zmask])
result_file.create_dataset("dec",(ns, ) , data = dec[zmask])
result_file.create_dataset("e1", (ns, ) , data = e1[zmask])
result_file.create_dataset("e2", (ns, ) , data = e2[zmask])
result_file.create_dataset("zb", (ns, ) , data = z_b[zmask])
result_file.create_dataset("w", (ns, ) , data = w[zmask])
result_file.create_dataset("rad", (ns,) , data= rm[zmask])
result_file.create_dataset("snr", (ns ,) , data = snr[zmask])
result_file.create_dataset("psf1", (ns,) , data= psf1[zmask])
result_file.create_dataset("psf2", (ns ,) , data = psf2[zmask])
result_file.create_dataset("bias", (ns ,) , data = bias[zmask])
result_file.close()
|
import datetime
class Employee:
# Class variables
num_of_employees = 0
raise_amount = 1.04
# Regular variables
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = first.lower() + '.' + last.lower() + '@company.com'
Employee.num_of_employees += 1
def fullname(self):
return f'{self.first} {self.last}'
def pay_raise(self):
self.pay = int(self.pay * Employee.raise_amount) #same like (self.raise_amount)
def __repr__(self):
return f'Employee({self.first}, {self.last}, {self.pay})'
def __str__(self):
return f'{self.fullname()}, {self.email}'
def __add__(self, other):
return self.pay + other.pay
def __len__(self):
return len(self.fullname())
emp1 = Employee('Sam', 'Summers', 1500)
dev2 = Employee('Mary', 'Gold', 2000)
# # Metody dlja ispolzovanija vnutri klassa
# print(repr(emp1))
# print(str(emp1))
# # # same
# # print(emp1.__repr__())
# # print(emp1.__str__())
#
# # Collect solar
# print(emp1 + dev2)
# Dlinna simvolov
print(len(dev2))
print(len(emp1))
|
from typing import Any
class TablePlotter:
cell_width: Any = ...
cell_height: Any = ...
font_size: Any = ...
def __init__(self, cell_width: float=..., cell_height: float=..., font_size: float=...) -> None: ...
def plot(self, left: Any, right: Any, labels: Any = ..., vertical: bool=...) -> Any: ...
|
import logging
"""levels:
debug 10, general information
info 20, confirmation that things are working as expected
warning 30, something unexpected happened, there might be problems, default level of logging()
error 40, serious problem, program couldn't perform a function
critical 50, serious error, the program may not be able to keep running
"""
logging.basicConfig(filename='logfile.log',level=logging.INFO,format='%(asctime)s:%(levelname)s:%(message)s')# setting level, creating logfile,setting format
def squared(x):
return x * x
def add(a,b):
return a + b
def fib(n):
if n <=0:
return 0
elif n==1:
return 1
else:
return fib(n-2) + fib(n-1)
square_result = squared(9)
logging.info(square_result)
logging.debug(square_result) # default is set to warning
logging.warning(square_result)
logging.error(square_result)
logging.critical(square_result)
add_result = add(3,7)
logging.info(add_result)
logging.debug(add_result) # default is set to warning
logging.warning(add_result)
logging.error(add_result)
logging.critical(add_result)
fib_result = fib(6)
print(fib_result)
import random
class Dice:
def dice(self):
face1 = random.randint(1,7)
face2 = random.randint(1,7)
logging.info('You rolled: ' + str((face1, face2)))
def double_score(self):
face1 = random.randint(1,7)
face2 = random.randint(1,7)
logging.info('You rolled a double score: ' + str((face1*2, face2*2)))
current_roll = Dice()
current_roll.double_score()
current_roll.dice() |
#!/usr/bin/python
# -*- coding: utf-8 -*
from ws4py.client.threadedclient import WebSocketClient
import urllib
import urllib.request
import urllib.parse
import json
import time
import logging
import threading
class User(object):
"""
This object class stores all the information needed to keep track of online users.
"""
def __init__(self, name, gender, status, message):
self.name = name
self.gender = gender
self.status = status
self.message = message
def update(self, status, message):
self.status = status
self.message = message
class Channel(object):
def __init__(self, channel_id, title, num_characters):
"""
This object class helps you keep track of all the channels.
NOTICE: Channels have both an "id" and a "title". For public rooms, these will be exactly the same. For private
rooms, they will have the name of the room as the "title", and the "id" will be a string of numbers and
characters.
:param channel_id: Unique ID for the channel.
:param title: Title of the channel.
:param num_characters: Number of characters in the room, in integer form.
"""
self.id = channel_id
self.title = title
self.mode = ""
self.num_characters = num_characters
self.character_list = []
self.owner = {}
self.channel_ops = []
self.description = {}
def update(self, channel_id, title, num_characters):
"""
This command should usually only be used when getting a list of all rooms through either CHA or ORS.
:param channel_id: ID of the room in string form.
:param title: Title of the room in string form.
:param num_characters: Number of characters in the room in int form.
"""
self.id = channel_id
self.title = title
self.num_characters = num_characters
def joined(self, character):
"""
To be called when a character joins a room.
:param character: Character that just joined the room. Use the Character object class.
"""
if character not in self.character_list:
self.character_list.append(character)
self.num_characters += 1
def left(self, character):
"""
To be called when a character leaves a room.
:param character: Character that just left the room. Use the Character object class.
"""
if character in self.character_list:
self.character_list.remove(character)
self.num_characters -= 1
class FChatClient(WebSocketClient):
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
logger = logging.getLogger("fchat")
log_filter = [] # Override and add the three-letter commands you want to add (in string form).
log_pings = False # Set to true if you want to see your outgoing pings every 30 seconds.
def __init__(self, url, account, password, character, client_name="Python FChat Library"):
"""
This object class is the main meat and potatoes of this library. Calling this will initialize a client to
connect one character to the F-Chat websocket.
:param url: URL of the websocket. Should be either 'ws://chat.f-list.net:9722' for the public server or
'ws://chat.f-list.net:8722' for the test server.
:param account: Your account's username.
:param password: Your account's password.
:param character: The character you want to log in to.
:param client_name: Default set to "Python FChat Library".
"""
WebSocketClient.__init__(self, url, ssl_options={"server_hostname": 'chat.f-list.net'})
self.account = account
self.password = password
self.character_name = character
self.client_name = client_name
self.outgoing_pump_running = False
self.connection_test_running = False
self.operators = []
self.server_vars = {}
self.users = {} # Dictionary of online users. Key is username (lower case), object type is "User".
self.channels = {} # Dictionary of channels. Key is channel ID (lower case), object type is "Channel".
self.friends = []
self.ignored_users = []
self.outgoing_buffer = []
self.message_delay = 1
self.ticket_time = 0
self.ticket = ''
self.last_ping_time = time.time()
self.buffer_lock = threading.Lock()
# We want to initialize these variables only if they don't already exist.
try:
self.reconnect_delay = self.reconnect_delay
self.reconnect_attempt = self.reconnect_attempt
except AttributeError or NameError:
self.reconnect_delay = 1
self.reconnect_attempt = 0
def setup(self):
"""
This function should be called before connecting to the websocket. It will get a ticket for connecting and
start up some required threads. It will also initialize some values.
:return: True if able to get a ticket, False if unable to get a ticket.
"""
if self.get_ticket() is None:
return False
else:
self.reconnect = threading.Thread(target=self.connection_test, args=())
self.outgoing_thread = threading.Thread(target=self.outgoing_pump, args=())
# self.outgoing_thread.setDaemon(False)
self.outgoing_thread.start()
self.reconnect_delay = 1
self.reconnect_attempt = 0
# self.reconnect.setDaemon(False)
self.reconnect.start()
return True
def connect(self):
"""
This function is called first thing whenever we're connected. If you want to do something like set your status
or join rooms immediately upon joining F-Chat, you will do it by overriding this function.
"""
super().connect()
time.sleep(3) # We should give the client some time to initialize before trying to do stuff.
def get_ticket(self):
"""
Will request a ticket from F-List.net. This ticket is required to connect to the websocket.
:return: If successful, returns ticket. If not successful, returns None.
"""
if self.ticket and time.time() - self.ticket_time < 30 * 60:
return self.ticket
else:
self.logger.info("Fetching ticket ...")
self.ticket_time = time.time()
data = {'account': self.account, 'password': self.password}
data_enc = urllib.parse.urlencode(data)
data_enc = data_enc.encode("UTF-8")
response = urllib.request.urlopen('https://www.f-list.net/json/getApiTicket.php', data_enc)
text = response.read()
text_parsed = json.loads(text.decode("UTF-8"))
if 'ticket' in text_parsed:
self.ticket = text_parsed['ticket']
return self.ticket
else:
self.logger.error(text_parsed['error'])
return None
def outgoing_pump(self):
self.outgoing_pump_running = True
while self.outgoing_pump_running:
if len(self.outgoing_buffer):
self.send_one()
time.sleep(self.message_delay)
else:
time.sleep(0.01)
def connection_test(self):
self.connection_test_running = True
while self.connection_test_running:
if time.time() - self.last_ping_time > 90:
self.logger.info("Didn't get a ping in time. Restarting.")
self.close_connection()
break
else:
time.sleep(1)
def terminate_threads(self):
"""
This function should be called whenever we close our client, so that threads can safely end.
"""
try:
if self.outgoing_thread.isAlive():
self.outgoing_pump_running = False
self.outgoing_thread.join()
except AttributeError:
pass # Thread doesn't exist yet.
try:
if self.reconnect.isAlive():
self.connection_test_running = False
self.reconnect.join()
except AttributeError:
pass # Thread doesn't exist yet.
def opened(self):
"""
Automatically called when we successfully connect to the server. Resets reconnect delays, and sends sends an
IDN message.
"""
self.reconnect_delay = 1
self.reconnect_attempt = 0
self.logger.info("Connected!")
self.IDN(self.character_name)
def closed(self, code, reason=None):
"""
Automatically called when the client is closed. Terminates threads and logs reason for closing.
:param code:
:param reason:
"""
self.logger.info("Closing (" + str(code) + ", " + str(reason) + ")!")
self.terminate_threads()
super().closed(code, reason)
def received_message(self, m):
"""
Called automatically whenever a message is received from the F-Chat websocket. The first three letters will be
the command given by the message. Everything after it will be the data in JSON form.
:param m: Message received, UTF-8 encoded, in JSON form.
"""
msg = m.data.decode("UTF-8")
command = msg[:3]
try:
json_string = msg[4:]
data = json.loads(json_string)
except:
data = {}
# Print everything not filtered out by log_filter to the logger.
if command not in self.log_filter:
self.logger.debug("<< %s %s" % (command, data))
# Call the function for the command. There's probably a better way to do this, but this is at least stable, and
# multiple if/else string checks like this are actually not very time intensive in python.
if command == "ADL": # Chatops list
self.on_ADL(data['ops'])
elif command == "AOP": # Chatops promotion
self.on_AOP(data['character'])
elif command == "BRO": # Admin broadcast
self.on_BRO(data['message'])
elif command == "CDS": # Channel description change
self.on_CDS(data['channel'], data['description'])
elif command == "CHA": # Public channels list
self.on_CHA(data['channels'])
elif command == "CIU": # Channel invite
self.on_CIU(data['sender'], data['title'], data['name'])
elif command == "CBU": # User banned from channel
self.on_CBU(data['operator'], data['channel'], data['character'])
elif command == "CKU": # User kicked from channel
self.on_CKU(data['operator'], data['channel'], data['character'])
elif command == "COA": # Channel op promotion
self.on_COA(data['character'], data['channel'])
elif command == "COL": # Channel ops list
self.on_COL(data['channel'], data['oplist'])
elif command == "CON": # Number of connected users
self.on_CON(data['count'])
elif command == "COR": # Channel op demotion
self.on_COR(data['character'], data['channel'])
elif command == "CSO": # Channel owner promotion
self.on_CSO(data['character'], data['channel'])
elif command == "CTU": # Channel temp ban
self.on_CTU(data['operator'], data['channel'], data['length'], data['character'])
elif command == "DOP": # Chatops demotion
self.on_DOP(data['character'])
elif command == "ERR": # Error notification
self.on_ERR(data['message'], data['number'])
elif command == "FKS": # Search results
self.on_FKS(data['characters'], data['kinks'])
elif command == "FLN": # User disconnected
self.on_FLN(data['character'])
elif command == "HLO": # Hello command
self.on_HLO(data['message'])
elif command == "ICH": # Initial channel data
self.on_ICH(data['users'], data['channel'], data['mode'])
elif command == "IDN": # Identification successful
self.on_IDN(data['character'])
elif command == "JCH": # User joined channel
self.on_JCH(data['character']['identity'], data['channel'], data['title'])
elif command == "KID": # Kink data
self.on_KID(data['type'], data['message'], data['key'], data['value'])
elif command == "LCH": # User left channel
self.on_LCH(data['channel'], data['character'])
elif command == "LIS": # Online characters list
self.on_LIS(data['characters'])
elif command == "NLN": # User connected
self.on_NLN(data['identity'], data['gender'], data['status'])
elif command == "IGN": # Ignore list
if data['action'] == 'init':
self.on_IGN(data['action'], characters=data['characters'])
elif data['action'] == 'add' or data['action'] == 'delete':
self.on_IGN(data['action'], character=data['character'])
elif command == "FRL": # Friends list
self.on_FRL(data['characters'])
elif command == "ORS": # Private channels list
self.on_ORS(data['channels'])
elif command == "PIN": # Ping from server
self.on_PIN()
elif command == "PRD": # Profile data
self.on_PRD(data['type'], data['message'], data['key'], data['value'])
elif command == "PRI": # Private message
self.on_PRI(data['character'], data['message'])
elif command == "MSG": # Message in channel
self.on_MSG(data['character'], data['message'], data['channel'])
elif command == "LRP": # Ad in channel
self.on_LRP(data['channel'], data['message'], data['character'])
# elif command == "RLL": # Dice roll results
# if data['type'] == 'dice':
# self.on_RLL(data['channel'], data['type'], data['character'], data['message'], results=data['results'],
# rolls=data['rolls'], endresult=data['endresult'])
# elif data['type'] == 'bottle':
# self.on_RLL(data['channel'], data['type'], data['character'], data['message'], target=data['target'])
elif command == "RMO": # Room ad mode changed
self.on_RMO(data['mode'], data['channel'])
elif command == "RTB": # Real-time bridge
if data['type'] in ["trackadd", "trackrem", "friendadd", "friendremove", "friendrequest"]:
self.on_RTB(data['type'], name=data['name'])
elif data['type'] == 'note':
self.on_RTB(data['type'], sender=data['sender'], note_id=data['id'], subject=data['subject'])
elif command == "SFC": # Alert admins and chatops
self.on_SFC(data) # TODO: Add more inputs
elif command == "STA": # User changes status
self.on_STA(data['status'], data['character'], data['statusmsg'])
elif command == "SYS": # Message generated by server
if 'channel' in data:
self.on_SYS(data['message'], channel=data['channel'])
else:
self.on_SYS(data['message'])
elif command == "TPN": # User typing status
self.on_TPN(data['character'], data['status'])
elif command == "UPT": # Server up-time
self.on_UPT(data['time'], data['starttime'], data['startstring'], data['accepted'], data['channels'],
data['users'], data['maxusers'])
elif command == "VAR": # Server variables
self.on_VAR(data['variable'], data['value'])
def send_message(self, cmd, data):
"""
Despite the name, this doesn't immediately send out a message. Instead, it adds a message to be sent to the
websocket to a queue. This message will be sent out with the send_one() function.
:param cmd: The command to be given out, in the form of a string. Ex: "PRI"
:param data: The data for the message in dict form. Ex: {"message": "Hello, world!", "recipient": "John Doe"}
"""
self.buffer_lock.acquire()
self.outgoing_buffer.append((cmd, json.dumps(data)))
self.buffer_lock.release()
def send_one(self):
"""
Used to send the next message in the outgoing_buffer queue to the websocket. This is called in a periodic manner
to prevent violation of the websocket's anti-spam timer.
"""
self.buffer_lock.acquire()
cmd, data = self.outgoing_buffer.pop(0)
if (cmd != "PIN") or self.log_pings:
self.logger.debug(">> %s %s" % (cmd, data)) # Logs every outgoing message except pings.
self.send(cmd + " " + data)
self.buffer_lock.release()
def add_user(self, user):
self.users[user.name.lower()] = user
def remove_user(self, user):
for channel in self.channels.keys():
self.channels[channel].left(user)
del self.users[user.name.lower()]
def user_exists_by_name(self, user_name):
return user_name.lower() in self.users
def get_user_by_name(self, name):
try:
return self.users[name.lower()]
except KeyError:
return None
def add_channel(self, channel):
self.channels[channel.id.lower()] = channel
def channel_exists_by_id(self, channel_id):
return channel_id.lower() in self.channels.keys()
def get_channel_by_id(self, channel_id):
try:
return self.channels[channel_id.lower()]
except KeyError:
return None
# return None
def reconnect_stagger(self):
self.terminate_threads()
self.logger.info("Trying to reconnect in %d seconds (attempt number %d) ..." % (
self.reconnect_delay, self.reconnect_attempt))
time.sleep(self.reconnect_delay)
if self.reconnect_delay < 120:
self.reconnect_delay *= 2
self.reconnect_attempt += 1
"""
--- EVENT HANDLERS ---
These functions will be called automatically when they are sent to us from the server. You should never have to call
these yourself, however, you may override them in a child class. If you do, I would recommend calling super first so
you don't break something important.
"""
def on_ADL(self, ops):
"""
Sends the client the current list of chatops.
:param ops: Array of chat operator names.
"""
self.operators = ops
def on_AOP(self, character):
"""
The given character has been promoted to chatop.
:param character: Name of character promoted to chat operator.
"""
pass
def on_BRO(self, message):
"""
Incoming admin broadcast.
:param message: Message broadcast by chat admin.
"""
pass
def on_CDS(self, channel, description):
"""
Alerts the client that that the channel's description has changed. This is sent whenever a client sends a JCH to
the server.
:param channel: ID of channel getting its description changed.
:param description: Description for the channel.
"""
if self.channel_exists_by_id(channel):
self.get_channel_by_id(channel).description = description
else:
self.logger.error("Error: Got CDS message from a channel we don't know!")
def on_CHA(self, channels):
"""
Sends the client a list of all public channels.
NOTE: For public channels, ID and name are the same!
:param channels: Array of channel dictionaries with keys {"Name", "Mode", "Characters"}.
* "Name" is both the ID and the official name of the channel.
* "Mode" is an enum of type "chat", "ads", or "both".
* "Characters" is an integer representing the current population.
"""
for channel in channels:
if not self.channel_exists_by_id(channel['name']):
self.add_channel(Channel(channel['name'], channel['name'], channel['characters']))
self.get_channel_by_id(channel['name']).mode = channel['mode']
else:
self.get_channel_by_id(channel['name']).update(channel['name'], channel['name'], channel['characters'])
def on_CIU(self, sender, title, name):
"""
Invites a user to a channel.
:param sender: Name of character sending the invite.
:param title: The display name for the room. (ex: "Sex Driven LFRP" or "Test Room")
:param name: The channel ID. (ex: "Sex Driven LFRP" or "ADH-c7fc4c15c858dd76d860")
"""
pass
def on_CBU(self, operator, channel, character):
"""
Removes a user from a channel, and prevents them from re-entering.
:param operator: Channel operator giving the command.
:param channel: ID of channel the character is getting removed from.
:param character: Name of the character getting removed.
"""
pass
def on_CKU(self, operator, channel, character):
"""
Kicks a user from a channel.
:param operator: Channel operator giving the command.
:param channel: ID of the channel the character is getting kicked from.
:param character: Name of the character getting kicked.
"""
if self.channel_exists_by_id(channel):
self.get_channel_by_id(channel).left(self.get_user_by_name(character))
else:
self.logger.error("Error: Got CKU message from a channel we don't know!")
def on_COA(self, character, channel):
"""
Promotes a user to channel operator.
:param character: Name of character getting promoted.
:param channel: ID of the channel the character is now operator of.
"""
if self.channel_exists_by_id(channel):
self.get_channel_by_id(channel).channel_ops.append(character)
else:
self.logger.error("Error: Got COA message from a channel we don't know!")
def on_COL(self, channel, oplist):
"""
Gives a list of channel ops. Sent in response to JCH.
:param channel: ID of the channel.
:param oplist: Array of channel operator names.
Note: First name in oplist will be the owner. If no owner, will be "".
"""
if oplist[0]:
self.get_channel_by_id(channel).owner = oplist[0]
for operator in oplist:
if operator:
self.get_channel_by_id(channel).channel_ops.append(operator)
def on_CON(self, count):
"""
After connecting and identifying you will receive a CON command, giving the number of connected users to the
network.
:param count: Integer for number of connected users.
"""
pass
def on_COR(self, character, channel):
"""
Removes a channel operator.
:param character: Name of character getting removed.
:param channel: ID/name of the channel.
"""
if self.channel_exists_by_id(channel):
self.get_channel_by_id(channel).channel_ops.remove(character)
else:
self.logger.error("Error: Got COR message from a channel we don't know!")
def on_CSO(self, character, channel):
"""
Sets the owner of the current channel to the character provided.
:param character: Name of the character who now owns the channel.
:param channel: ID/name of the channel.
"""
if self.channel_exists_by_id(channel):
self.get_channel_by_id(channel).owner = self.get_user_by_name(character)
else:
self.logger.error("Error: Got CSO message from a channel we don't know!")
def on_CTU(self, operator, channel, length, character):
"""
Temporarily bans a user from the channel for 1-90 minutes. A channel timeout.
:param operator: Name of operator giving the command.
:param channel: ID/name of the channel.
:param length: Integer for number of minutes user is timed out.
:param character: Name of the character being given the timeout.
"""
if self.channel_exists_by_id(channel):
self.get_channel_by_id(channel).left(self.get_user_by_name(character))
else:
self.logger.error("Error: Got CTU message from a channel we don't know!")
def on_DOP(self, character):
"""
The given character has been stripped of chatop status.
:param character: Name of the character stripped of chat operator status.
"""
pass
def on_ERR(self, message, number):
"""
Indicates that the given error has occurred.
:param message: Error message given from server.
:param number: Integer representing error number.
"""
pass
def on_FKS(self, characters, kinks):
"""
Sent by as a response to the client's FKS command, containing the results of the search.
:param characters: Array of character names from search result.
:param kinks: Array of kink IDs from the search result.
"""
pass
def on_FLN(self, character):
"""
Sent by the server to inform the client a given character went offline.
:param character: Name of character that went offline.
"""
user = self.get_user_by_name(character)
if not user:
logging.warning("Error, got FLN for user not in our list: %s" % character)
return
self.remove_user(user)
def on_HLO(self, message):
"""
Server hello command. Tells which server version is running and who wrote it.
:param message: Message sent from the server.
"""
pass
def on_ICH(self, users, channel, mode):
"""
Initial channel data. Received in response to JCH, along with CDS.
:param users: Array of objects with the syntax {'identity'}
:param channel: ID/name of channel.
:param mode: Current mode for the channel. Can be "ads", "chat", or "both".
"""
room = self.get_channel_by_id(channel)
room.num_characters = 0
room.mode = mode
for user in users:
user = self.get_user_by_name(user['identity'])
room.joined(user)
def on_IDN(self, character):
"""
Used to inform the client their identification is successful, and handily sends their character name along with
it.
:param character: Name of your own character that just joined.
"""
pass
def on_JCH(self, character, channel, title):
"""
Indicates the given user has joined the given channel. This may also be the client's character.
:param character: Character that just joined.
:param channel: ID of the channel. Same as title if public, but not if private.
:param title: Name of the channel.
"""
if character.lower() == self.character_name.lower():
# Hey, this person is us! We should check if we know this channel yet or not.
if not self.channel_exists_by_id(channel):
self.add_channel(Channel(channel, title, 0))
self.get_channel_by_id(channel).joined(self.get_user_by_name(character))
def on_KID(self, kid_type, message, key, value):
"""
Kinks data in response to a KIN client command.
:param kid_type: Enum of either "start", "custom", or "end".
:param message: Message sent by server.
:param key: Integer value. Not sure what this is yet.
:param value: Integer value. Not sure what this is yet.
"""
pass
#
def on_LCH(self, channel, character):
"""
An indicator that the given character has left the channel. This may also be the client's character.
:param channel: ID for the channel.
:param character: Name of the character that's left.
"""
self.get_channel_by_id(channel).left(self.get_user_by_name(character))
def on_LIS(self, characters):
"""
Sends an array of all the online characters and their gender, status, and status message.
:param characters: Array of character arrays with format ["Name", "Gender", "Status", "Status Message"].
"""
for user in characters:
self.add_user(User(user[0], user[1], user[2], user[3]))
def on_NLN(self, identity, gender, status):
"""
A user connected.
:param identity: Character name for user connected.
:param gender: Gender of character connected.
:param status: Enum for status. Should always be "online" since they just joined.
"""
if not self.user_exists_by_name(identity):
self.add_user(User(identity, gender, status, ''))
def on_IGN(self, action, character=None, characters=None):
"""
Handles the ignore list.
:param action: String indicating what the message is telling us. Possible values may be:
init: Sends the initial ignore list. Uses characters:[string] to send an array of character names.
add: Acknowledges the addition of a character to the ignore list. Uses character:"string".
delete: Acknowledges the deletion of a character from the ignore list. Uses character:"string".
:param character: Variable used when action is 'add' or 'delete'. The name of the character.
:param characters: Variable used when action is 'init'. Array of character names in ignore list.
"""
if action == 'init' and characters:
self.ignored_users = characters
elif action == 'add' and character:
self.ignored_users.append(character)
elif action == 'delete' and character:
self.ignored_users.remove(character)
def on_FRL(self, characters):
"""
Initial friends list.
:param characters: Array of names of characters in friends list.
"""
self.friends = characters
def on_ORS(self, channels):
"""
Gives a list of open private rooms.
:param channels: Array of channel dictionaries with keys {"Name", "Characters", "Title"}.
Name: ID of private room. Usually a string of random numbers and letters.
Characters: Integer value for number of characters in the room.
Title: Actual name of the room.
"""
for channel in channels:
if not self.channel_exists_by_id(channel['title']):
self.add_channel(Channel(channel['name'], channel['title'], channel['characters']))
else:
self.get_channel_by_id(channel['title']).update(channel['name'], channel['title'],
channel['characters'])
def on_PIN(self):
"""
Ping command from the server, requiring a response, to keep the connection alive.
"""
self.PIN()
self.last_ping_time = time.time()
def on_PRD(self, prd_type, message, key, value):
"""
Profile data commands sent in response to a PRO client command.
:param prd_type: Enumerator of type "start", "info", "select", and "end".
:param message: Message sent by the server.
:param key: Integer. Not sure what this does.
:param value: Integer. Not sure what this does.
"""
pass
def on_PRI(self, character, message):
"""
A private message is received from another user.
:param character: Name of the character sending the message.
:param message: Message sent by the character.
"""
pass
def on_MSG(self, character, message, channel):
"""
A message is received from a user in a channel.
:param character: Name of the character sending the message.
:param message: Message sent by the character.
:param channel: ID of the channel.
"""
pass
def on_LRP(self, channel, message, character):
"""
A roleplay ad is received from a user in a channel.
:param channel: ID of the channel being sent the message.
:param message: Message being sent to the channel.
:param character: Name of the character sending the message.
"""
pass
# def on_RLL(self, channel, rll_type, character, message, results=None, rolls=None, endresult=None, target=None):
# """
# Rolls dice or spins the bottle.
#
# :param channel: ID of channel the roll is happening in.
# :param rll_type: Enumerator of type "dice" or "bottle".
# :param character: Name of the character who called the command.
# :param message: The message the client should print.
# :param results: Optional 'dice' variable. Array of ints for the result for each dice.
# :param rolls: Optional 'dice' variable. An array of dice sets and added numbers.
# :param endresult: Optional 'dice' variable. The sum of all results as a single int.
# :param target: Optional 'bottle' variable. The name of who was selected.
# """
# pass
def on_RMO(self, mode, channel):
"""
Change room mode to accept chat, ads, or both.
:param mode: Enumerator of type "chat", "ads", or "both".
:param channel: ID of the channel being changed.
"""
pass
def on_RTB(self, rtb_type, name=None, sender=None, note_id=None, subject=None):
"""
Real-time bridge. Indicates the user received a note or message, right at the very moment this is received.
:param rtb_type: Enum of either "trackadd", "trackrem", "friendadd", "friendremove", "friendrequest", or "note".
:param name: Optional variable for 'trackadd', 'trackrem', 'friendadd', 'friendremove', or 'friendrequest'. Name
of the character involved.
:param sender: Optional variable for 'note'. Name of the sender of the note.
:param note_id: Optional variable for 'note'. Integer ID for the note, used to link to the contents of the note.
:param subject: Optional variable for 'note'. Subject title for the note received.
"""
pass
def on_SFC(self, data):
"""
Alerts admins and chatops (global moderators) of an issue.
Note: Since I don't think any global mods will use this client, and it's kind of complicated, I'm not going to
bother with this one.
:param data: Raw data (use only if other params do not work).
:return:
"""
pass
def on_STA(self, status, character, statusmsg):
"""
A user changed their status.
:param status: Enumerator of type "online", "looking", "busy", "dnd", "idle", and "away".
:param character: Name of the character setting their message.
:param statusmsg: The custom message set by the character.
"""
user = self.get_user_by_name(character)
if user:
user.update(status, statusmsg)
def on_SYS(self, message, channel=None):
"""
An informative autogenerated message from the server. This is also the way the server responds to some commands,
such as RST, CIU, CBL, COL, and CUB. The server will sometimes send this in concert with a response command, as
with SFC, COA, and COR.
:param channel: Optional argument. ID of the channel, if the notice is related to one.
:param message: Message sent by the server.
"""
pass
def on_TPN(self, character, status):
"""
A user informs you of his typing status.
:param character: Name of the character sending the message.
:param status: Enumerator of type "clear", "paused", and "typing".
"""
pass
def on_UPT(self, current_time, start_time, start_string, accepted, channels, users, max_users):
"""
:param current_time: POSIX timestamp of the current time.
:param start_time: POSIX timestamp of when the server was last started.
:param start_string: Human-readable timestamp of when the server was last started.
:param accepted: How many connections have been accepted since last start.
:param channels: How many channels the server recognizes.
:param users: How many users are currently connected.
:param max_users: The peak count of online users since last restart.
"""
pass
def on_VAR(self, variable, value):
"""
Variables the server sends to inform the client about server variables.
:param variable: Name of the variable being sent.
:param value: The value of the variable being sent.
"""
self.server_vars[variable] = value
# fine tune outgoing message pump
if variable == 'msg_flood':
delay = float(value) * 3.5
self.logger.debug("Fine tuned outgoing message delay to %f." % delay)
# Increase the value by 150%, just to be safe!
# self.outgoing_thread.set_delay(delay)
self.message_delay = delay
"""
--- CLIENT COMMANDS ---
These commands are used to send messages to the server. There really shouldn't be a reason to override any of these.
In addition, many of these are admin, chat operator, or channel operator commands. Please avoid trying to use
commands that you do not have the rights to use.
"""
def ACB(self, character):
"""
--- This command requires chat op or higher. ---
Request a character's account be banned from the server.
:param character: Character to be banned.
"""
data = {'character': character}
self.send_message("ACB", data)
def AOP(self, character):
"""
--- This command is admin only. ---
Promotes a user to be a chatop (global moderator).
:param character: Character to be promoted.
"""
data = {'character': character}
self.send_message("AOP", data)
def AWC(self, character):
"""
--- This command requires chat op or higher. ---
Requests a list of currently connected alts for a characters account.
:param character: Character to search for alts of.
"""
data = {'character': character}
self.send_message("AWC", data)
def BRO(self, message):
"""
--- This command is admin only. ---
Broadcasts a message to all connections.
:param message: Message to broadcast.
"""
data = {'message': message}
self.send_message("BRO", data)
def CBL(self, channel):
"""
--- This command requires channel op or higher. ---
Request the channel banlist.
:param channel: The channel ID you want the banlist for.
"""
data = {'channel': channel}
self.send_message("CBL", data)
def CBU(self, character, channel):
"""
--- This command requires channel op or higher. ---
Bans a character from a channel.
:param character: Character to be banned from the room.
:param channel: The ID for the channel you want the character banned from.
"""
data = {'character': character, 'channel': channel}
self.send_message("CBU", data)
def CCR(self, channel):
"""
Create a private, invite-only channel.
:param channel: The name for the channel you want to create.
"""
data = {'channel': channel}
self.send_message("CCR", data)
def CDS(self, channel, description):
"""
--- This command requires channel op or higher. ---
Changes a channel's description.
:param channel: Channel ID for
:param description:
"""
data = {'channel': channel, 'description': description}
self.send_message("CDS", data)
def CHA(self):
"""
Request a list of all public channels.
"""
self.send_message("CHA", {})
def CIU(self, channel, character):
"""
--- This command requires channel op or higher. ---
Sends an invitation for a channel to a user.
:param channel: ID for the channel you're sending a request for.
:param character: Name of the character you're sending the request to.
"""
data = {'channel': channel, 'character': character}
self.send_message("CIU", data)
def CKU(self, channel, character):
"""
--- This command requires channel op or higher. ---
Kicks a user from a channel.
:param channel: ID for the channel you're kicking someone from.
:param character: Name of the character you're kicking.
"""
data = {'channel': channel, 'character': character}
self.send_message("CKU", data)
def COA(self, channel, character):
"""
--- This command requires channel op or higher. ---
Request a character be promoted to channel operator
:param channel: ID for channel.
:param character: Name of the promoted character.
"""
data = {'channel': channel, 'character': character}
self.send_message("COA", data)
def COL(self, channel):
"""
Requests the list of channel ops (channel moderators).
:param channel: ID for the channel.
"""
data = {'channel': channel}
self.send_message("COL", data)
def COR(self, channel, character):
"""
--- This command requires channel op or higher. ---
Demotes a channel operator (channel moderator) to a normal user.
:param channel: Channel ID
:param character: Character getting demoted.
"""
data = {'channel': channel, 'character': character}
self.send_message("COR", data)
def CRC(self, channel):
"""
--- This command is admin only. ---
Creates an official channel.
:param channel: Channel name, I assume?
"""
data = {'channel': channel}
self.send_message("CRC", data)
def CSO(self, character, channel):
"""
--- This command requires channel op or higher. ---
Set a new channel owner.
:param character: Name of character
:param channel: ID of channel
"""
data = {'character': character, 'channel': channel}
self.send_message("CSO", data)
def CTU(self, channel, character, length):
"""
--- This command requires channel op or higher. ---
Temporarily bans a user from the channel for 1-90 minutes. A channel timeout.
:param channel: Channel ID
:param character: Character to be banned
:param length: Length of time in minutes
"""
data = {'channel': channel, 'character': character, 'length': length}
self.send_message("CTU", data)
def CUB(self, channel, character):
"""
--- This command requires channel op or higher. ---
Unbans a user from a channel.
:param channel: Channel ID
:param character: Character to be unbanned
"""
data = {'channel': channel, 'character': character}
self.send_message("CUB", data)
def DOP(self, character):
"""
--- This command is admin only. ---
Demotes a chatop (global moderator).
:param character: Character to be demoted.
"""
data = {'character': character}
self.send_message("DOP", data)
pass
def FKS(self, kinks, genders, orientations, languages, furryprefs, roles):
"""
Search for characters fitting the user's selections. Kinks is required, all other parameters are optional.
Raw sample:
FKS {
"kinks":["523","66"],
"genders":["Male","Maleherm"],
"orientations":["Gay","Bi - male preference","Bisexual"],
"languages":["Dutch"],
"furryprefs":["Furs and / or humans","Humans ok, Furries Preferred","No humans, just furry characters"],
roles:["Always dominant", "Usually dominant"]
}
:param kinks: identified by kinkids, available here, along with the full list of other parameters.
http://www.f-list.net/json/chat-search-getfields.json?ids=true
:param genders: can be any of "Male", "Female", "Transgender", "Herm", "Shemale", "Male-Herm", "Cunt-boy",
"None"
:param orientations:can be any of "Straight", "Gay", "Bisexual", "Asexual", "Unsure", "Bi - male preference",
"Bi - female preference", "Pansexual", "Bi-curious"
:param languages: can be any of "Dutch", "English", "French", "Spanish", "German", "Russian", "Chinese",
"Japanese", "Portuguese", "Korean", "Arabic", "Italian", "Swedish", "Other"
:param furryprefs: can be any of "No furry characters, just humans", "No humans, just furry characters",
"Furries ok, Humans Preferred", "Humans ok, Furries Preferred", "Furs and / or humans"
:param roles: can be any of "Always dominant", "Usually dominant", "Switch", "Usually submissive",
"Always submissive", "None"
"""
# TODO: Finish writing FKS command.
self.logger.debug("Warning: FKS command not supported yet!")
pass
def IDN(self, character):
"""
This command is used to identify with the server.
If you send any commands before identifying, you will be disconnected.
:param character: Name of character you're logging in as.
"""
data = {'account': self.account,
'character': character,
'ticket': self.ticket,
'cname': self.client_name,
'cversion': '0.2.0',
'method': 'ticket'}
self.send_message("IDN", data)
def IGN(self, action, character):
"""
A multi-faceted command to handle actions related to the ignore list. The server does not actually handle much
of the ignore process, as it is the client's responsibility to block out messages it receives from the server
if that character is on the user's ignore list.
:param action: Enum with the following options...
add: adds the character to the ignore list
delete: removes the character from the ignore list
notify: notifies the server that character sending a PRI has been ignored
list: returns full ignore list. Does not take 'character' parameter.
:param character: Character involved in the action. If action = "list", either leave blank or use None.
"""
if action == "list":
data = {"action": action}
else:
data = {"action": action, "character": character}
self.send_message("IGN", data)
def JCH(self, channel):
"""
Send a channel join request.
:param channel: Channel ID
"""
self.send_message("JCH", {'channel': channel})
def KIC(self, channel):
"""
--- This command requires chat op or higher. ---
Deletes a channel from the server.Private channel owners can destroy their own channels, but it isn't officially
supported to do so.
:param channel: ID of channel.
"""
self.send_message("KIC", {'channel': channel})
def KIK(self, character):
"""
--- This command requires chat op or higher. ---
Request a character be kicked from the server.
:param character: Name of character being kicked.
"""
self.send_message("KIK", {'character': character})
def KIN(self, character):
"""
Request a list of a user's kinks.
:param character: Name of character.
"""
self.send_message("KIN", {'character': character})
def LCH(self, channel):
"""
Request to leave a channel.
:param channel: ID of channel
"""
self.send_message("LCH", {'channel': channel})
def LRP(self, channel, message):
"""
Sends a chat ad to all other users in a channel.
:param channel: ID of channel
:param message: Message to be sent
"""
data = {'channel': channel, 'message': message}
self.send_message("LRP", data)
def MSG(self, channel, message):
"""
Sends a message to all other users in a channel.
:param channel: Channel ID
:param message: Message to be sent
"""
data = {'channel': channel, 'message': message.replace("<", "<").replace(">", ">").replace("&", "&")}
self.send_message("MSG", data)
def ORS(self):
"""
Request a list of open private rooms.
"""
self.send_message("ORS", {})
def PIN(self):
"""
Sends a ping response to the server. These requests usually come every 30 seconds. Failure to respond means
disconnection. Sending multiple pings within 10 seconds will also disconnect you.
"""
self.send_message("PIN", {})
def PRI(self, recipient, message):
"""
Sends a private message to another user.
:param recipient: Name of character receiving message
:param message: Message to be sent
"""
data = {'recipient': recipient, 'message': message.replace("<", "<").replace(">", ">")}
self.send_message("PRI", data)
def PRO(self, character):
"""
Requests some of the profile tags on a character, such as Top/Bottom position and Language Preference.
:param character: Name of character you're getting tags of
"""
self.send_message("PRO", {'character': character})
# def RLL(self, channel, dice):
# """
# Roll dice or spin the bottle.
#
# :param channel: ID of channel
# :param dice: Enum of the following values:
# bottle: selects one person in the room, other than the person sending the command.
# #d##: rolls # dice with ## sides, each.
# #d##+#d##: rolls more than one size of dice.
# #d##+###: adds a number (###) to the roll.
# """
# data = {'channel': channel, 'dice': dice}
# self.send_message("RLL", data)
# pass
def RLD(self):
"""
I have no idea how this command is used. It's chat-op only anyway so idgaf.
"""
pass
def RMO(self, channel, mode):
"""
--- This command requires channel op or higher. ---
Change room mode to accept chat, ads, or both.
:param channel: ID of channel
:param mode: Enum of following values:
chat: Show only MSG.
ads: Show only LRP.
both: Show MSG and LRP.
"""
data = {'channel': channel, 'mode': mode}
self.send_message("RMO", data)
def RST(self, channel, status):
"""
--- This command requires channel op or higher. ---
Sets a private room's status to closed or open. ("private" or "public")
:param channel: ID of channel
:param status: Enum of following values:
private: Only those who are invited can join!
public: Anybody can join!
"""
data = {'channel': channel, 'status': status}
self.send_message("RST", data)
def RWD(self, character):
"""
--- This command is admin only. ---
Rewards a user, setting their status to 'crown' until they change it or log out.
:param character:
"""
self.send_message("RWD", {'character': character})
def SFC(self, action, report, character):
"""
Alerts admins and chatops (global moderators) of an issue.
The webclients also upload logs and have a specific formatting to "report".
It is suspected that third-party clients cannot upload logs.
:param action: the type of SFC. The client will always send "report".
:param report: The user's complaint
:param character: The character being reported
"""
# TODO: Finish SFC command.
self.logger.debug("Warning: SFC command not supported yet!")
pass
def STA(self, status, statusmsg):
"""
Request a new status be set for your character.
:param status: Valid values are "online", "looking", "busy", "dnd", "idle", and "away"
:param statusmsg: Status message to be set
"""
data = {'status': status, 'statusmsg': statusmsg}
self.send_message("STA", data)
def TMO(self, character, timeout_time, reason):
"""
--- This command requires chat op or higher. ---
Times out a user for a given amount minutes.
:param character: Character to be timed out
:param timeout_time: Duration of timeout in minutes, from 1 to 90
:param reason: Reason for timeout
"""
pass
def TPN(self, character, status):
"""
"User ___ is typing/stopped typing/has entered text" for private messages.
It is assumed a user is no longer typing after a private message has been sent, so there is no need to send a
TPN of clear with it.
:param character: Character that you're typing to.
:param status: Enum of "clear", "paused", and "typing".
"""
data = {'character': character, 'status': status}
self.send_message("TPN", data)
def UNB(self, character):
"""
--- This command requires chat op or higher. ---
Unbans a character's account from the server.
:param character: Character to be unbanned
"""
self.send_message("UNB", {'character': character})
def UPT(self):
"""
Requests info about how long the server has been running, and some stats about usage.
"""
self.send_message("UPT", {})
"""
--- JSON ENDPOINT COMMANDS ---
These commands access F-List's JSON data. Most JSON commands require a ticket, which can be fetched with the
get_ticket() function. These commands will return the JSON data retrieved in the form of a dictionary object, which
can be further broken down into useful information. Be careful not to spam these commands!
"""
@staticmethod
def send_JSON_request(url, data=None):
if data is None:
data = {}
data_enc = urllib.parse.urlencode(data)
data_enc = data_enc.encode("UTF-8")
response = urllib.request.urlopen(url, data_enc)
return json.loads(response.read().decode("UTF-8"))
def get_character_profile_data(self, name):
return self.send_JSON_request(
'https://www.f-list.net/json/api/character-data.php',
{
'account': self.account,
'ticket': self.get_ticket(),
'name': name
}
)
def get_character_friends(self, name):
return self.send_JSON_request(
'https://www.f-list.net/json/api/character-friends.php',
{
'account': self.account,
'ticket': self.get_ticket(),
'name': name
}
)
def get_character_images(self, name):
return self.send_JSON_request(
'https://www.f-list.net/json/api/character-images.php',
{
'account': self.account,
'ticket': self.get_ticket(),
'name': name
}
)
def get_character_memo(self, name):
return self.send_JSON_request(
'https://www.f-list.net/json/api/character-memo-get2.php',
{
'account': self.account,
'ticket': self.get_ticket(),
'target': name
}
)
def save_character_memo(self, name, memo):
return self.send_JSON_request(
'https://www.f-list.net/json/api/character-memo-get2.php',
{
'account': self.account,
'ticket': self.get_ticket(),
'target_name': name,
'note': memo
}
)
def get_friend_bookmark_list(self, bookmarklist=False, friendlist=False, requestlist=False, requestpending=False):
data = {"account": self.account,
"ticket": self.get_ticket()}
if bookmarklist:
data['bookmarklist'] = 'true'
if friendlist:
data['friendlist'] = 'true'
if requestlist:
data['requestlist'] = 'true'
if requestpending:
data['requestpending'] = 'true'
return self.send_JSON_request('https://www.f-list.net/json/api/friend-list.php', data)
def get_friend_list(self):
return self.get_friend_bookmark_list(friendlist=True)['friendlist']
def get_bookmark_list(self):
return self.get_friend_bookmark_list(bookmarklist=True)['bookmarklist']
def get_friend_request_list(self):
return self.get_friend_bookmark_list(requestlist=True)['requestlist']
def get_friend_pending_list(self):
return self.get_friend_bookmark_list(requestpending=True)['requestpending']
def add_bookmark(self, name):
return self.send_JSON_request(
'https://www.f-list.net/json/api/bookmark-add.php',
{
'account': self.account,
'ticket': self.get_ticket(),
'name': name
}
)
def remove_bookmark(self, name):
return self.send_JSON_request(
'https://www.f-list.net/json/api/bookmark-remove.php',
{
'account': self.account,
'ticket': self.get_ticket(),
'name': name
}
)
def remove_friend(self, source_name, dest_name):
return self.send_JSON_request(
'https://www.f-list.net/json/api/friend-remove.php',
{
"account": self.account,
"ticket": self.get_ticket(),
"source_name": source_name,
"dest_name": dest_name
}
)
def accept_friend_request(self, request_id):
return self.send_JSON_request(
'https://www.f-list.net/json/api/request-accept.php',
{
"account": self.account,
"ticket": self.get_ticket(),
"request_id": request_id
}
)
def deny_friend_request(self, request_id):
return self.send_JSON_request(
'https://www.f-list.net/json/api/request-deny.php',
{
"account": self.account,
"ticket": self.get_ticket(),
"request_id": request_id
}
)
def cancel_friend_request(self, request_id):
return self.send_JSON_request(
'https://www.f-list.net/json/api/request-cancel.php',
{
"account": self.account,
"ticket": self.get_ticket(),
"request_id": request_id
}
)
def send_friend_request(self, source, target):
return self.send_JSON_request(
'https://www.f-list.net/json/api/request-send2.php',
{
"account": self.account,
"ticket": self.get_ticket(),
"source": source,
"target": target
}
) |
from keras.models import Model
from keras.layers import Conv2D, ZeroPadding2D, BatchNormalization, Input, Dropout, Conv2DTranspose, Reshape, Activation, Cropping2D, Flatten, ReLU, LeakyReLU, Concatenate
from functools import partial
__all__ = ['BASIC_D', 'UNET_G']
batchnorm = partial(BatchNormalization, momentum=0.9, epsilon=1.01e-5)
# Basic discriminator
def BASIC_D(num_channels_in, num_discriminator_filter, max_layers=3, use_sigmoid=True):
"""DCGAN_D(nc, ndf, max_layers=3)
num_channels_in: channels
num_discriminator_filter: filters of the first layer
max_layers: max hidden layers
"""
input_a = Input(shape=(None, None, num_channels_in))
t = input_a
t = Conv2D(num_discriminator_filter, kernel_size=4, strides=2, padding="same", name='First') (t)
t = LeakyReLU(alpha=0.2)(t)
for layer in range(1, max_layers):
out_feat = num_discriminator_filter * min(2**layer, 8)
t = Conv2D(out_feat, kernel_size=4, strides=2, padding="same",
use_bias=False, name='pyramid.{0}'.format(layer)
)(t)
t = batchnorm()(t, training=1)
t = LeakyReLU(alpha=0.2)(t)
out_feat = num_discriminator_filter*min(2**max_layers, 8)
t = Conv2D(out_feat, kernel_size=4, padding='same', use_bias=False, name='pyramid_last')(t)
t = batchnorm()(t, training=1)
t = LeakyReLU(alpha=0.2)(t)
# final layer
t = Conv2D(1, kernel_size=4, name='final'.format(out_feat, 1), padding='same',
activation="sigmoid" if use_sigmoid else None
)(t)
return Model(inputs=[input_a], outputs=t)
def UNET_G(isize, num_channel_in=3, num_channel_out=3, num_generator_filter=64, fixed_input_size=True):
"""U-Net Generator"""
max_num_filter = 8 * num_generator_filter
def block(x, size, num_filter_in, use_batchnorm=True, num_filter_out=None, num_filter_next=None):
assert size >= 2 and size % 2 == 0
if num_filter_next is None:
num_filter_next = min(num_filter_in*2, max_num_filter)
if num_filter_out is None:
num_filter_out = num_filter_in
x = Conv2D(num_filter_next, kernel_size=4, strides=2, use_bias=(not (use_batchnorm and size > 2)),
padding='same', name='conv_{0}'.format(size)
)(x)
if size > 2:
if use_batchnorm:
x = BatchNormalization(momentum=0.9, epsilon=1.01e-5)(x, training=1)
x2 = LeakyReLU(alpha=0.2)(x)
x2 = block(x2, size//2, num_filter_next)
x = Concatenate()([x, x2])
x = Activation("relu")(x)
x = Conv2DTranspose(num_filter_out, kernel_size=4, strides=2, use_bias=not use_batchnorm,
name='convt.{0}'.format(size))(x)
x = Cropping2D(1)(x)
if use_batchnorm:
x = BatchNormalization(momentum=0.9, epsilon=1.01e-5)(x, training=1)
if size <= 8:
x = Dropout(0.5)(x, training=1)
return x
size = isize if fixed_input_size else None
t = inputs = Input(shape=(size, size, num_channel_in))
t = block(t, isize, num_channel_in, False, num_filter_out=num_channel_out, num_filter_next=num_generator_filter)
t = Activation('tanh')(t)
return Model(inputs=inputs, outputs=[t])
|
#coding=utf-8
import os
# pip2 install pywin32com
import win32com.client as win32
import datetime
import readConfig
import getpathInfo
read_conf = readConfig.ReadConfig()
subject = read_conf.get_email('subject') #从配置文件中读取,邮件主题
app = str(read_conf.get_email('app')) #从配置文件读取邮件类型
address = read_conf.get_email('addressee') #从配置文件中读取,邮件收件人
cc = read_conf.get_email('cc') #读取邮件抄送人
mail_path = os.path.join(getpathInfo.get_Path(),'result','report.html') # 获取测试报告路径
class send_email():
def outlook(self):
# olook = win32.Dispatch("%s.Application"%app)
olook=win32.gencache.EnsureDispatch('%s.Application'% app)
mail = olook.CreateItem(win32.constants.olMailItem)
mail.To = address
mail.CC = cc
mail.Subject = str(datetime.datetime.now())[0:19]+'%s'%subject
mail.Attachments.Add(mail_path,1,1,"myFile")
content = """
执行测试中......
测试已完成!!
生成报告中....
报告已生成...
报告已邮件发送!!
"""
mail.Body = content
mail.Send()
if __name__=="__main__":
print(subject)
send_email().outlook()
print('send email ok!!!!!') |
import random, machine
from fonts import six_by_three_font, seven_by_four_font
from time import sleep_ms
import gc
try:
import pyb
except ImportError:
import machine as pyb
#Graduation cap class to control LED matrix functionality
class GradCap:
def __init__(self):
self.data_pin = 18 #pin one-wire interface is connected to
self.led_length = 14*14 #number of LEDs in matrix
self.leds = WS2812(data_pin=self.data_pin, led_count=self.led_length, intensity=.45) #WS2812B LED strip object
#array of colors used by the sprites/text
self.colors = { 0:(0,0,0),\
#1:Green, 2:Red, 3:Blue
1:(0,255,0), 2:(255,0,0), 3:(0,0,255), \
#4:White (255,255,255), 5:Cherry (130,24,42), 6:Gray (165,165,165)
4:(255,255,255), 5:(255,48,84), 6:(10,6,6), \
#flame colors 7:red, 8:orange 192, 9:yellow
7:(255,35,35), 8:(255,120,0), 9:(255,255,0), \
#
10:(0,0,0), 11:(0,0,0), 12:(0,0,0), \
}
#display_6x3_text("ABCD", frame_speed=100)
#display_6x3_text("ABCDAAAAAA")
#display_6x3_text("HELLO WORLD")
#display_6x3_text("Hello World! This thing is really working the way it is supposed to...", frame_speed=40)
#function to display messages with 6x3 pixel fonts
#takes a frame speed (time for a pixel to scroll left), colors 1 and 2 (1 bottom, 2 top), and whether text scrolls offscreen at end of message
def display_6x3_text(self, message, frame_speed=250, color1=None, color2=None, scroll_off=True):
#set default color values
if not color1:
color1 = self.colors[4]
if not color2:
color2 = self.colors[5]
#create an empty matrix to store display binary numbers
matrix = []
#create 14 variables (create the rows of the matrix)
for y in range(0,14):
matrix.append(0)
#iter through the message, getting the character
for ch in message:
#define the char at 0 initially, look for char in 6x3 font, overwrite var if char found
ch_arr = [0b0]
if ch in six_by_three_font:
ch_arr = six_by_three_font[ch]
#for column in pixel font (3 per char)
for ch_seg in ch_arr:
#iter through loop 14 times to shift each row to the left by 1 bit
for m_ind in range(0,14):
#if the index is less than 6 (top row)
if m_ind < 6:
#shift the row 1 bit left
matrix[m_ind] = matrix[m_ind]<<1
#place bottom row bit 14 into top row bit 1
matrix[m_ind] |= (matrix[m_ind+8]&0b10000000000000)>>13
#if the index is greater than 7 (bot row)
if m_ind > 7:
#shift the row 1 bit left
matrix[m_ind] = matrix[m_ind]<<1
#place new ch_seg bit into row
matrix[m_ind] |= 1&(ch_seg>>(m_ind-8))
#generate new matrix to build 1D array for WS2182B module
new_matrix = []
#move through matrix rows
for m_ind, m_row in enumerate(matrix):
#move through row columns
for bit in range(0,14):
#if row is even, invert row and isolate column bit
if m_ind%2 == 0:
m_bit = (m_row>>(13-bit))&1
#otherwise isolate column bit
else:
m_bit = (m_row>>(bit))&1
#if row is less than 6 and bit is 1
if m_ind < 6 and m_bit == 1:
#add color 1 to array
new_matrix.append(color1)
#if row is greater than 7 and bit is 1
elif m_ind > 7 and m_bit == 1:
#add color 2 to array
new_matrix.append(color2)
#otherwise add no color to array
else:
new_matrix.append((0,0,0))
#display new LED matrix and wait for frame_speed duration
self.leds.show(new_matrix)
sleep_ms(frame_speed)
#if text is to scroll offscreen at the end of the message
if scroll_off:
#create iter for 28 indexes
for pixels in range(0,28):
#create iter to move through each matrix row
for m_ind in range(0,14):
#if the index is less than 6 (top row)
if m_ind < 6:
#shift the row 1 bit left
matrix[m_ind] = matrix[m_ind]<<1
#place bottom row bit 14 into top row bit 1
matrix[m_ind] |= (matrix[m_ind+8]&0b10000000000000)>>13
#if the index is greater than 7 (bot row)
if m_ind > 7:
#shift the row 1 bit left
matrix[m_ind] = matrix[m_ind]<<1
#add a 0 to row, will eventually clear bot and top row
matrix[m_ind] |= 0
#generate new matrix to build 1D array for WS2182B module
new_matrix = []
#move through matrix rows
for m_ind, m_row in enumerate(matrix):
#move through row columns
for bit in range(0,14):
#if row is even, invert row and isolate column bit
if m_ind%2 == 0:
m_bit = (m_row>>(13-bit))&1
#otherwise isolate column bit
else:
m_bit = (m_row>>(bit))&1
#if row is less than 6 and bit is 1
if m_ind < 6 and m_bit == 1:
#add color 1 to array
new_matrix.append(color1)
#if row is greater than 7 and bit is 1
elif m_ind > 7 and m_bit == 1:
#add color 2 to array
new_matrix.append(color2)
#otherwise add no color to array
else:
new_matrix.append((0,0,0))
#display new LED matrix and wait for frame_speed duration
self.leds.show(new_matrix)
sleep_ms(frame_speed)
#display_7x4_text("ABCD", frame_speed=100)
#display_7x4_text("ABCD")
#display_7x4_text("ABCDAAAAAA")
#display_7x4_text("HELLO WORLD")
#display_7x4_text("Hello World", frame_speed=60)
#display_7x4_text("Hello World! This thing is really working the way it is supposed to...", frame_speed=40)
#function to display messages with 7x4 pixel fonts
#takes a frame speed (time for a pixel to scroll left), colors 1 and 2 (1 bottom, 2 top), and whether text scrolls offscreen at end of message
def display_7x4_text(self, message, frame_speed=250, color1=None, color2=None, scroll_off=True):
#set default color values
if not color1:
color1 = self.colors[4]
if not color2:
color2 = self.colors[5]
#make all characters in message uppercase
message = message.upper()
#create an empty matrix to store display binary numbers
matrix = []
#create 14 variables (create the rows of the matrix)
for y in range(0,14):
matrix.append(0)
#iter through the message, getting the character
for ch in message:
#define the char at 0 initially, look for char in 6x3 font, overwrite var if char found
ch_arr = [0b0]
if ch in seven_by_four_font:
ch_arr = seven_by_four_font[ch]
#for column in pixel font (4 per char)
for ch_seg in ch_arr:
#iter through loop 14 times to shift each row to the left by 1 bit
for m_ind in range(0,14):
#if the index is less than 7 (top row)
if m_ind < 7:
#shift the row 1 bit left
matrix[m_ind] = matrix[m_ind]<<1
#place bottom row bit 14 into top row bit 1
matrix[m_ind] |= (matrix[m_ind+7]&0b10000000000000)>>13
#if the index is greater than 6 (bot row)
if m_ind > 6:
#shift the row 1 bit left
matrix[m_ind] = matrix[m_ind]<<1
#place new ch_seg bit into row
matrix[m_ind] |= 1&(ch_seg>>(m_ind-7))
#generate new matrix to build 1D array for WS2182B module
new_matrix = []
#move through matrix rows
for m_ind, m_row in enumerate(matrix):
#move through row columns
for bit in range(0,14):
#if row is even, invert row and isolate column bit
if m_ind%2 == 0:
m_bit = (m_row>>(13-bit))&1
#otherwise isolate column bit
else:
m_bit = (m_row>>(bit))&1
#if row is less than 7 and bit is 1
if m_ind < 7 and m_bit == 1:
#add color 1 to array
new_matrix.append(color1)
#if row is greater than 6 and bit is 1
elif m_ind > 6 and m_bit == 1:
#add color 2 to array
new_matrix.append(color2)
#otherwise add no color to array
else:
new_matrix.append((0,0,0))
#display new LED matrix and wait for frame_speed duration
self.leds.show(new_matrix)
sleep_ms(frame_speed)
#if text is to scroll offscreen at the end of the message
if scroll_off:
#create iter for 28 indexes
for pixels in range(0,28):
#create iter to move through each matrix row
for m_ind in range(0,14):
#if the index is less than 7 (top row)
if m_ind < 7:
#shift the row 1 bit left
matrix[m_ind] = matrix[m_ind]<<1
#place bottom row bit 14 into top row bit 1
matrix[m_ind] |= (matrix[m_ind+7]&0b10000000000000)>>13
#if the index is greater than 6 (bot row)
if m_ind > 6:
#shift the row 1 bit left
matrix[m_ind] = matrix[m_ind]<<1
#add a 0 to row, will eventually clear bot and top row
matrix[m_ind] |= 0
#generate new matrix to build 1D array for WS2182B module
new_matrix = []
#move through matrix rows
for m_ind, m_row in enumerate(matrix):
#move through row columns
for bit in range(0,14):
#if row is even, invert row and isolate column bit
if m_ind%2 == 0:
m_bit = (m_row>>(13-bit))&1
#otherwise isolate column bit
else:
m_bit = (m_row>>(bit))&1
#if row is less than 7 and bit is 1
if m_ind < 7 and m_bit == 1:
#add color 1 to array
new_matrix.append(color1)
#if row is greater than 6 and bit is 1
elif m_ind > 6 and m_bit == 1:
#add color 2 to array
new_matrix.append(color2)
#otherwise add no color to array
else:
new_matrix.append((0,0,0))
#display new LED matrix and wait for frame_speed duration
self.leds.show(new_matrix)
sleep_ms(frame_speed)
#function to display a sprite from "paint by numbers" input matrix
#takes a data matrix, expects a 14x14 matrix with index value of color in colors array
def display_sprite(self, data):
#constructed 1D array
const_data = []
#move through each row in matrix
for i, row in enumerate(data):
#if index is 1
if i%2 is 1:
#reverse the row and iter through row
for col_val in reversed(row):
#append color tuple to const_data
const_data.append(self.colors[col_val])
else:
#iter through row
for col_val in row:
#append color tuple to const_data
const_data.append(self.colors[col_val])
#display new LED matrix
self.leds.show(const_data)
#function to update LED intensity
#takes a 0-100 integer
def new_intensity(self, intens):
#scale the value and update the display
self.leds.intensity = intens/100
self.leds.show([])
#function to clear the display
def clear(self):
self.leds.show([])
#Class to handle the WS2812B one-wire communications
class WS2812:
buf_bytes = (0x88, 0x8e, 0xe8, 0xee)
def __init__(self, data_pin, led_count=1, intensity=1):
"""
Params:
* spi_bus = SPI bus ID (1 or 2)
* led_count = count of LEDs
* intensity = light intensity (float up to 1)
"""
self.led_count = led_count
self.intensity = intensity
# prepare SPI data buffer (4 bytes for each color)
self.buf_length = self.led_count * 3 * 4
self.buf = bytearray(self.buf_length)
# SPI init
self.spi = pyb.SPI(1, baudrate=3200000, polarity=0, phase=1,mosi=machine.Pin(data_pin))
# turn LEDs off
self.show([])
def show(self, data):
"""
Show RGB data on LEDs. Expected data = [(R, G, B), ...] where R, G and B
are intensities of self.colors in range from 0 to 255. One RGB tuple for each
LED. Count of tuples may be less than count of connected LEDs.
"""
self.fill_buf(data)
self.send_buf()
def send_buf(self):
"""
Send buffer over SPI.
"""
self.spi.write(self.buf)
gc.collect()
def update_buf(self, data, start=0):
"""
Fill a part of the buffer with RGB data.
Order of self.colors in buffer is changed from RGB to GRB because WS2812 LED
has GRB order of self.colors. Each color is represented by 4 bytes in buffer
(1 byte for each 2 bits).
Returns the index of the first unfilled LED
Note: If you find this function ugly, it's because speed optimisations
beated purity of code.
"""
buf = self.buf
buf_bytes = self.buf_bytes
intensity = self.intensity
mask = 0x03
index = start * 12
#print(data)
for red, green, blue in data:
red = int(red * intensity)
green = int(green * intensity)
blue = int(blue * intensity)
buf[index] = buf_bytes[green >> 6 & mask]
buf[index+1] = buf_bytes[green >> 4 & mask]
buf[index+2] = buf_bytes[green >> 2 & mask]
buf[index+3] = buf_bytes[green & mask]
buf[index+4] = buf_bytes[red >> 6 & mask]
buf[index+5] = buf_bytes[red >> 4 & mask]
buf[index+6] = buf_bytes[red >> 2 & mask]
buf[index+7] = buf_bytes[red & mask]
buf[index+8] = buf_bytes[blue >> 6 & mask]
buf[index+9] = buf_bytes[blue >> 4 & mask]
buf[index+10] = buf_bytes[blue >> 2 & mask]
buf[index+11] = buf_bytes[blue & mask]
index += 12
return index // 12
def fill_buf(self, data):
"""
Fill buffer with RGB data.
All LEDs after the data are turned off.
"""
end = self.update_buf(data)
# turn off the rest of the LEDs
buf = self.buf
off = self.buf_bytes[0]
for index in range(end * 12, self.buf_length):
buf[index] = off
index += 1 |
from __future__ import absolute_import
import os
import logging
from splunk.appserver.mrsparkle.controllers import BaseController
from splunk.appserver.mrsparkle.lib.routes import route
from splunk.appserver.mrsparkle.list_helpers import generators
from splunk.appserver.mrsparkle.lib import util
logger = logging.getLogger('splunk.appserver.controllers.lists')
# Get the generators module path and import all of the ListGeneratorControllers
# underneath it. Eventually we may move these to an external location for end users.
LIST_GENERATOR_PATH = os.path.dirname(os.path.abspath(generators.__file__))
class ListsController(BaseController):
"""
/lists acts as a meta-endpoint, it loads its underlying endpoints at runtime
so users can add their own listing endpoints at will.
Adding new list generating controllers:
Controllers meant to live underneath the /lists controller are normal
controllers aside from the following exceptions:
* They inherit from (ListGeneratorController)
* They wrap their endpoints in the decorator @format_list_response
* List generating controller endpoints should always return Python lists of
dict objects like:
[{'foo':'bar'}, ...]
How it works:
The ListsController loads any list_generator modules it finds and attaches the internal
classes that decend from ListGeneratorController onto the list endpoint.
Responses are routed through the @format_list_response and are converted into a string,
allowing the controller endpoint to return Python objects.
Generating the response:
/lists attempts to return the response in the requested format. If that format is not available it
will attempt to inspect any Accept headers in the request and return a valid response based on the acceptible
formats. If the response cannot be fulfilled it will return a 406 response with headers revealing the available
content types. If a request is made to a list endpoint that does not exist, lists returns a 404
"""
def __init__(self, path=None):
BaseController.__init__(self)
self.addListGeneratingControllers(path=path)
def addListGeneratingControllers(self, path=None):
'''
Find all of the generators (aka specialized controllers that generate lists)
and load them into the lists controller as normal endpoints.
Yey for cool routes!
'''
path = path or LIST_GENERATOR_PATH
for mod in util.import_from_path(path):
for c in util.get_module_classes(mod, generators.ListGeneratorController):
logger.info('List controller loaded: %s' % c.__name__)
if c.endpoint == None:
logger.debug("ListGeneratorController %s does not define an endpoint property." % c.__name__)
continue
else:
endpoint_name = c.endpoint
logger.info('Setting lists/%s' % endpoint_name)
setattr(self, endpoint_name, c())
@route('/')
def index(self):
return "You are at the lists endpoint."
|
# -*- coding: utf-8 -*-
import re
fred = re.compile('(.+)(lala)')
rainbow = re.compile('(miao\.)(.+)', re.DOTALL)
n = int(raw_input())
result = []
for i in xrange(n):
sentence = raw_input()
freds = fred.match(sentence)
rainbows = rainbow.match(sentence)
if freds and rainbows:
print('OMG>.< I don\'t know!')
elif not freds and not rainbows:
print('OMG>.< I don\'t know!')
elif freds:
print('Freda\'s')
elif rainbows:
print('Rainbow\'s')
|
from PIL import Image
from tkinter import Tk, PhotoImage, Canvas, NW
def algoritm (txtname, imagename, mode, size = (960,540)):
size = size[::-1] if format == 1 else size
img = Image.new("RGB", size, "white")
f = open(txtname,"r")
data = f.read().split("\n")
for i in range(len(data)-1):
data[i] = data[i].split(" ")
if mode == 1:
img.putpixel((int(data[i][0]), int(data[i][1])), (0, 0, 0))
else:
img.putpixel((int(data[i][1]), 540 - int(data[i][0]) if format == 3 else int(data[i][0])), (0, 0, 0))
img.save(f"{imagename}.png")
return size
def program ():
txt = input("Введіть назву файлу с датасетом. Приклад: Name.txt\n")
image = input("Введіть назву зображення:\n")
mode = (int(input("Режими роботи програми:\n1. Пряме відображення датасету не змінюючи координат х та у\n"
"2. Відображення датасету змінюючи координати місцями\n"
"3. Відображення датасету змінюючи координати місцями та з нормальним відображенням зображеенням\n"
"\nВведіть 1, 2 або 3 для вибору режиму роботи програми:\n")))
while mode not in (1, 2, 3):
mode = (int(input("Режими роботи програми:\n1. Пряме відображення датасету не змінюючи координат х та у\n"
"2. Відображення датасету змінюючи координати місцями\n"
"3. Відображення датасету змінюючи координати місцями та з нормальним відображенням зображеенням\n"
"\nВведіть 1, 2 або 3 для вибору режиму роботи програми:\n")))
size = algoritm(txt, image, mode)
image_input = input("Хочете продивитися фотографію? (Введіть 'Так' щоб відкрити вікно):\n")
if image_input == "Так":
windowMain = Tk()
windowMain.geometry(f'{size[0]}x{size[1]}+50+50')
ph_im = PhotoImage(file=f'{image}.png')
canv = Canvas(windowMain, width=size[0], height=size[1])
canv.create_image(1, 1, anchor=NW, image=ph_im)
canv.place(x=10, y=10)
windowMain.mainloop()
restart = input("Хочете відобразити ще один датасет? (Введіть 'Так' щоб почати заново):\n")
if restart == "Так":
program()
program() |
from django import forms
from Reports.models import *
class AuctionQueryForm(forms.ModelForm):
class Meta:
model = AuctionQuery
fields = '__all__'
|
import pandas as pd
import matplotlib
import os
INPUT_FOLDER = "../preprocessing/aggregated/weekly/classes/"
OUTPUT_FOLDER = "../preprocessing/distributions/weekly/classes/"
COLUMNS_TO_HISTOGRAM = {
"MaxChurnInBurst",
"TotalChurnInBurst",
"ChurnTotal",
"TLOC"
}
# expects a .csv filename, whose file has ; seperators
def draw_graphs_for_columns(file_name, file_path, output_folder):
print("working on: " + file_path)
data = pd.read_csv(file_path, sep=" ")
df = pd.DataFrame(data)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for colname in df.columns:
if colname == "GapSize" or colname == "BurstSize":
continue
graph_name = output_folder+colname+"-"+file_name.replace(".csv","")
column = df[[colname]]
colcount = column[colname]
print("working on: " + colname)
if colname in COLUMNS_TO_HISTOGRAM:
# create the logarithmic bins up to the max value
maxval = colcount.max()
bins = [-1,0,1]
initial = 1
while initial < maxval :
initial *= 10
bins.append(initial)
# cut data into bins
colcount = pd.cut(colcount, bins=bins).value_counts().sort_index()
fig = colcount.plot.bar(rot=-45,logy=True).get_figure()
else:
colcount = colcount.value_counts().sort_index()
fig = colcount.plot.bar().get_figure()
fig.tight_layout()
fig.savefig(graph_name)
fig.clf()
column = None
fig = None
colcount = None
def find_max_val_for_columns(file_path, cols):
print("working on: " + file_path)
data = pd.read_csv(file_path, sep=" ")
df = pd.DataFrame(data)
for colname in df.columns:
column = df[[colname]]
colcount = column[colname]
if colname in cols:
cols[colname] = max(cols[colname],colcount.max())
else:
cols[colname] = colcount.max()
def find_max_forall_in_folder(input_path):
if not os.path.isdir(input_path):
print("Error: path specified is not a folder.")
return
cols = {}
for file_name in os.listdir(input_path):
full_path = input_path+file_name
if not os.path.isdir(full_path):
find_max_val_for_columns(full_path, cols)
for col in cols:
print(col + " : " + str(cols[col]))
def draw_graphs_from_folder(input_path, output_folder):
if not os.path.isdir(input_path):
print("Error: path specified is not a folder.")
return
for file_name in os.listdir(input_path):
full_path = input_path+file_name
if not os.path.isdir(full_path):
draw_graphs_for_columns(file_name, full_path, output_folder)
# draw_graphs_from_folder(INPUT_FOLDER, OUTPUT_FOLDER)
find_max_forall_in_folder(INPUT_FOLDER)
|
import os
from typing import Dict
import numpy as np
from bert_api.client_lib import BERTClient
from cache import load_pickle_from
from cpath import data_path, pjoin
from cpath import output_path
from data_generator.bert_input_splitter import split_p_h_with_input_ids
from data_generator.tokenizer_wo_tf import convert_ids_to_tokens, get_tokenizer, EncoderUnitPlain, pretty_tokens
from port_info import FDE_PORT
from tlm.data_gen.doc_encode_common import split_by_window
from tlm.qtype.analysis_fde.analysis_a import embeddings_to_list
from tlm.qtype.analysis_fde.runner.build_q_emb_from_samples import load_q_emb_qtype_2X_v_train_200000, load_q_bias
from tlm.qtype.analysis_qde.contribution_module import print_base_info
from tlm.qtype.analysis_qemb.save_parsed import get_voca_list
from tlm.qtype.content_functional_parsing.qid_to_content_tokens import QueryInfo
from tlm.qtype.contribution_common import enum_window_drop, contribution_by_change
from tlm.qtype.enum_util import enum_samples, enum_interesting_entries
class FDEClientWrap:
def __init__(self):
max_seq_length = 512
self.client = BERTClient("http://localhost", FDE_PORT, max_seq_length)
voca_path = pjoin(data_path, "bert_voca.txt")
self.q_encoder = EncoderUnitPlain(128, voca_path)
self.d_encoder = EncoderUnitPlain(max_seq_length, voca_path)
def request(self, seg1, seg2):
def flat(d):
return d["input_ids"], d["input_mask"], d["segment_ids"]
qe_input_ids, qe_input_mask, qe_segment_ids = self.q_encoder.encode_pair("", "")
de_input_ids, de_input_mask, de_segment_ids = flat(self.d_encoder.encode_inner(seg1, seg2))
one_inst = qe_input_ids, qe_input_mask, qe_segment_ids, de_input_ids, de_input_mask, de_segment_ids
payload_list = [one_inst]
ret = self.client.send_payload(payload_list)[0]
return ret
def run_contribution_analysis(qtype_entries,
query_info_dict: Dict[str, QueryInfo],
q_embedding_d: Dict[str, np.array],
q_bias_d: Dict[str, np.array],
):
tokenizer = get_tokenizer()
voca_list = get_voca_list(tokenizer)
unknown_token = tokenizer.convert_tokens_to_ids(["[UNK]"])[0]
window_size = 20
fde_client = FDEClientWrap()
empty_func_span = "[MASK]"
func_span_list, qtype_embedding_np = embeddings_to_list(q_embedding_d)
def compute_score(func_span, q_bias_d, seg1_np, seg2):
ret = fde_client.request(seg1_np, seg2)
doc_vector = ret['qtype_vector2']
d_bias = ret['d_bias']
empty_q_vector = q_embedding_d[empty_func_span]
target_q_vector = q_embedding_d[func_span]
score = np.dot(target_q_vector, doc_vector) + d_bias + q_bias_d[func_span]
score_type_less = np.dot(empty_q_vector, doc_vector) + d_bias + q_bias_d[empty_func_span]
return score, score_type_less
for e in enum_interesting_entries(qtype_entries, query_info_dict):
info = query_info_dict[e.qid]
input_ids = e.de_input_ids
seg1_np, seg2_np = split_p_h_with_input_ids(input_ids, input_ids)
seg1 = seg1_np.tolist()
seg2 = seg2_np.tolist()
func_span = info.get_func_span_rep()
if func_span not in q_embedding_d:
print(func_span, "NOT FOUND")
continue
base_score, base_score_typeless = compute_score(func_span, q_bias_d, seg1, seg2)
single_sent_list = list(split_by_window(seg2, window_size))
dropped_seg2s = list(enum_window_drop(seg2, unknown_token, window_size))
sent_drop_result_pairs = [compute_score(func_span, q_bias_d, seg1, window) for window in dropped_seg2s]
single_sent_result_pairs = [compute_score(func_span, q_bias_d, seg1, window) for window in single_sent_list]
sent_drop_result, sent_drop_result_typeless = zip(*sent_drop_result_pairs)
single_sent_result, single_sent_result_typeless = zip(*single_sent_result_pairs)
contrib_single = contribution_by_change(base_score,
single_sent_result)
contrib_type_less_single = contribution_by_change(base_score_typeless,
single_sent_result_typeless, )
contrib_drop = contribution_by_change(base_score,
sent_drop_result)
contrib_drop_type_less = contribution_by_change(base_score_typeless,
sent_drop_result_typeless, )
print_base_info(e, query_info_dict)
print("Base score: {0:.2f}".format(base_score))
head0 = ['by single', '', '', 'by drop', '', '']
head1 = ['full', 'no_func', 'diff', 'full', 'no_func', 'diff']
print("\t".join(head0))
print("\t".join(head1))
for window_idx, window in enumerate(split_by_window(seg2, window_size)):
seg2_tokens = convert_ids_to_tokens(voca_list, window)
passage: str = pretty_tokens(seg2_tokens, True)
numbers = [
contrib_single[window_idx],
contrib_type_less_single[window_idx],
contrib_single[window_idx] - contrib_type_less_single[window_idx],
contrib_drop[window_idx],
contrib_drop_type_less[window_idx],
contrib_drop[window_idx] - contrib_drop_type_less[window_idx],
]
s = "\t".join(["{0:.2f}".format(v) for v in numbers]) + "\t" + passage
print(s)
print("")
def main():
# run_name = "qtype_2X_v_train_200000"
run_name = "qtype_2Y_v_train_120000"
q_embedding_d: Dict[str, np.array] = load_q_emb_qtype_2X_v_train_200000()
save_dir = os.path.join(output_path, "qtype", run_name + '_sample')
_, query_info_dict = load_pickle_from(os.path.join(save_dir, "0"))
qtype_entries = enum_samples(save_dir)
q_bias_d: Dict[str, np.array] = load_q_bias(run_name)
run_contribution_analysis(qtype_entries, query_info_dict, q_embedding_d, q_bias_d)
if __name__ == "__main__":
main()
|
# -*- coding: latin-1 -*-
# ler MNIST em formato mat e pickle it
import numpy as np
import matplotlib.pyplot as plt
import pickle
plt.close('all')
pName='MNISTsmall.p'
D=pickle.load(open(pName,'rb'))
X=D['X']*1.
y=D['trueClass']
f1=D['foldTrain']
X1=X[:,f1]
y1=y[f1]
#matriz com AND
Cand=np.ones((28,28)).astype('bool')
#matriz com OR
Cor=np.zeros((28,28)).astype('bool')
for i in range(10):
dados=X1[:,y1==i]
C=np.cov(dados)
#matriz (boolean)
I0=np.reshape(np.diag(C)!=0,(28,28))
Cand=Cand&I0
Cor=Cor|I0
#print '(%d) %d\n'%(contador,np.sum(I0==False))
plt.figure(figsize=(5,5))
plt.imshow(I0,cmap='gray',interpolation='none')
plt.box('on')
plt.title('%d'%i)
plt.xticks(np.arange(0,28)-.5,'')
plt.yticks(np.arange(0,28)-.5,'')
plt.grid('on')
plt.figure(figsize=(5,5))
plt.imshow(Cand,cmap='gray',interpolation='none')
plt.box('on')
plt.xticks(np.arange(0,28)-.5,'')
plt.yticks(np.arange(0,28)-.5,'')
plt.grid('on')
plt.title('AND')
plt.figure(figsize=(5,5))
plt.imshow(Cor,cmap='gray',interpolation='none')
plt.title('OR')
plt.box('on')
plt.xticks(np.arange(0,28)-.5,'')
plt.yticks(np.arange(0,28)-.5,'')
plt.grid('on')
plt.show()
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from fs.errors import FSError
from .console import Cell
from .compat import text_type, implements_to_string
from .reader import DataReader
import weakref
import re
_re_fs_path = re.compile(r'^(?:\{(.*?)\})*(.*$)')
def parse_fs_path(path):
fs_name, fs_path = _re_fs_path.match(path).groups()
return fs_name or None, fs_path
class FSContainer(dict):
def __moyaconsole__(self, console):
table = [[Cell("Name", bold=True),
Cell("Type", bold=True),
Cell("Location", bold=True)]]
list_filesystems = self.items()
for name, fs in sorted(list_filesystems):
syspath = fs.getsyspath('/', allow_none=True)
if syspath is not None:
location = syspath
fg = "green"
else:
try:
location = fs.desc('/')
except FSError as e:
location = text_type(e)
fg = "red"
else:
fg = "blue"
table.append([Cell(name),
Cell(fs.get_type_name()),
Cell('%s' % location, bold=True, fg=fg)
])
console.table(table, header=True)
def close_all(self):
for fs in self.items():
try:
fs.close()
except:
pass
self.clear()
@implements_to_string
class FSWrapper(object):
def __init__(self, fs, ref=False):
self._fs = weakref.ref(fs)
if ref:
self.ref = self.fs
@property
def fs(self):
return self._fs()
def get_type_name(self):
return type(self.fs).__name__
def __str__(self):
return self.fs.desc('')
def __repr__(self):
return repr(self.fs)
def __contains__(self, path):
return self.fs.isfile(path)
def __getitem__(self, path):
if self.fs.isfile(path):
return self.fs.getcontents(path)
return self.__class__(self.fs.opendir(path), ref=True)
def __getattr__(self, name):
return getattr(self.fs, name)
def __moyaconsole__(self, console):
console((self.fs.desc('.'))).nl()
self.fs.tree(max_levels=1)
def keys(self):
return self.fs.listdir()
def values(self):
return [self.fs.desc(p) for p in self.fs.listdir()]
def items(self):
return [(p, self.fs.desc(p)) for p in self.fs.listdir()]
@property
def reader(self):
return DataReader(self.fs)
if __name__ == "__main__":
print(parse_fs_path("{templates}/widgets/posts.html"))
print(parse_fs_path("/media/css/blog.css"))
print(parse_fs_path("{}/media/css/blog.css"))
|
'''
Generator for the image data
'''
import numpy as np
from PIL import Image, ImageOps
import io
from sympy import preview
import pandas as pd
import click
import uuid
class create_data:
r""" Create a random set of images with a black background and white letters.
# Arguments
image_size: A tuple of the image size eg. (H,W)
output_csv: The name of the output csv that contains the names of the images e.g 'latex_imgs.csv'
output_dir: Path to the Existing directory to place the images e.g '../data/latex_imgs'
formual_file: Text file containing the formulas on every line e.g 'formula.txt'
# Example
.. code:: python
creator = create_data()
creator.create()
"""
def __init__(self,
image_size,
output_csv,
output_dir,
formula_file
):
self.__image_size = image_size
self.__output_csv = output_csv
self.__output_dir = output_dir
self.__formula_file = formula_file
def latex_to_img(self, tex, image_name, image_size=(64,512), background_colour=(255,255,255)):
'''
Generate a png image of latex text
# Arguments
tex: The tex code to display in the image
image_name: The file name i.e 'image'
image_size: The image size (H,W) in pixels
background_colour: The background colour to use for the image
'''
buf = io.BytesIO()
preview(f'${tex}$', viewer='BytesIO', outputbuffer=buf, euler=False)
image = Image.open(buf)
padded_image = self.pad_to_target(image, image_size[0], image_size[1], background_colour)
ImageOps.grayscale(padded_image).save(image_name)
#image.save(image_name)
def create(self):
'''
Create the directory containing the images as well as the
corresponding csv which contains
image_name and latex_equation as headers
'''
formulaFile = open(self.__formula_file, 'r', encoding="utf8")
formulas = formulaFile.read().split('\n')
formulaFile.close()
full_len = len(formulas)
dataset = {'image_name': [], 'latex_equations': []}
with click.progressbar(range(40000)) as bar:
for i in bar:
if len(formulas[i].split(' ')) > 200:
continue
try:
im_name = str(uuid.uuid4().hex) + '.png'
self.latex_to_img(f"{formulas[i]}", f'{self.__output_dir}/' + im_name, self.__image_size)
dataset['image_name'].append(im_name)
dataset['latex_equations'].append(f"{formulas[i]}")
except Exception as e:
pass
pd.DataFrame(data=dataset).to_csv(path_or_buf=self.__output_csv, index=False)
def pad_to_target(self, img, target_height, target_width, background_colour=(255,255,255)):
'''
Pad image with 255 to the specified height and width
This op throws an assertion error if target_height or
target width is larger than current size
# Arguments
img: The PIL image instance to pad
target_height: The integer target height
target_width: The integer target width
background_colour: The background colour to use for the pad
# Returns
The padded PIL image
'''
w, h = img.size
left = top = right = bottom = 0
pad_image = False
assert w <= target_width, 'image width is larger than target'
assert h <= target_height, 'image width is larger than target'
if target_width > w:
delta = target_width - w
left = delta // 2
right = delta - left
pad_image = True
if target_height > h:
delta = target_height - h
top = delta // 2
bottom = delta - top
pad_image = True
if pad_image:
padded_image = ImageOps.expand(img, border=(left, top, right, bottom), fill=background_colour)
return padded_image
|
from StatisticFunctions.StandardDeviation import StandardDeviation
from StatisticFunctions.Mean import Mean
class samplecorrelation:
@staticmethod
def samplecorrelation(data1, data2):
numerator = 0
n = 0
meanofx = Mean.mean(data1)
meanofy = Mean.mean(data2)
stdevx = StandardDeviation.stardardDev(data1)
stdevy = StandardDeviation.stardardDev(data2)
if len(data1) == len(data2):
for x, y in zip(data1, data2):
n += 1
numerator += abs((int(x - meanofx) * int(y - meanofy)) / (n - 1))
return numerator / (stdevx * stdevy) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.