query
stringlengths
33
521
document
stringlengths
8
49.6k
metadata
dict
negatives
listlengths
5
101
negative_scores
listlengths
5
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Compute the matrixvector product y = Cu where C is a circulant matrix All matrices are real
def circulant_multiplication(u, a): return real(ifft(fft(a)*fft(u)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def covar(fx,cx):\n \n fx = np.array(fx)\n cx = np.array(cx)\n \n shape_fx = fx.shape\n shape_cx = cx.shape\n \n \n if shape_fx[1] != shape_cx[0]:\n print('-----------------------------------------')\n print(\"Shapes of fx and cx cannot be multiplied:\")\n print(shap...
[ "0.650418", "0.650212", "0.6441079", "0.6313763", "0.6310517", "0.62949276", "0.62782884", "0.62631303", "0.61975265", "0.6096459", "0.608041", "0.606508", "0.6038961", "0.6011421", "0.60068315", "0.59920776", "0.59303707", "0.58836865", "0.5879482", "0.58772385", "0.58575416...
0.6389226
3
Compute the matrixvector product y = Tu where T is a Toeplitz matrix All matrices are real
def toeplitz_multiplication(u, c, r=None): n = len(u) if r is None: r = c u1 = zeros((2*n)) u1[0:n] = u c = np.concatenate((c, [0], r[-1:0:-1])) y1 = circulant_multiplication(u1, c) return y1[0:n]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product", "def matmul(x, y):\n if len(list(y.size())) == 2:\n # if one of them is a vector (i.e. wanting to do MV mult)\n z = torch.zeros(2, x.size()[1], dtype=torch...
[ "0.7003199", "0.6513981", "0.64759356", "0.6454179", "0.6377554", "0.6326698", "0.6245358", "0.620894", "0.6208685", "0.61977005", "0.6195611", "0.61694974", "0.6168602", "0.6134469", "0.6106113", "0.60868716", "0.6082444", "0.60823506", "0.6070701", "0.60688484", "0.6063607"...
0.63380134
5
Read in labels from digitStruct.mat file to create a dict of image file name and corresponding labels
def read_labels(digitstruct_file): labels = dict() for dsObj in tdqm(yieldNextDigitStruct(digitstruct_file), ncols=50): image_labels = [] for bbox in dsObj.bboxList: image_labels.append(bbox.label) labels[dsObj.name] = image_labels return labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_imagenet_as_dict(self):\n real_file_path = os.path.realpath(self.map_file)\n if not os.path.exists(real_file_path):\n raise IOError(\"map file {} not exists\".format(self.map_file))\n\n label_dict = {}\n with open(real_file_path) as fp:\n line = fp.readlin...
[ "0.7442581", "0.67145514", "0.6680717", "0.66700083", "0.6651974", "0.6599294", "0.65706545", "0.6568262", "0.65624034", "0.65466106", "0.6527709", "0.65229243", "0.65100825", "0.6500305", "0.649048", "0.6466592", "0.6466018", "0.6442053", "0.6429563", "0.6409631", "0.6398935...
0.8415674
0
ref CLRS pg326, solution to the basic supply chain problem using the book notation for variables name
def fastestWay( a, t, e, x, n ): import pdb;pdb.set_trace() f1.append( ( e[0] , 1 ) ) f2.append( ( e[1] , 2 ) ) for i in xrange(n): f11 = f1[i][0]+a[0][i] f12 = f2[i][0]+a[1][i]+t[1][i+1] f22 = f2[i][0]+a[1][i] f21 = f1[i][0]+a[0][i]+t[0][i+1] f1.append( ( min( f11, f12 ), 1 ) if f11 < f12 else ( min( f11, f12 ), 2 ) ) f2.append( ( min( f21, f22 ), 2 ) if f22 < f21 else ( min( f22, f21 ), 1 ) ) f1x, f2x = f1[n][0]+x[0], f2[n][0]+x[1] return ( min( f1x, f2x ) , f1 ) if f1x < f2x else ( min( f1x, f2x ), f2 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exercise_b2_39():\r\n pass", "def exercise_b2_113():\r\n pass", "def exercise_b2_93():\r\n pass", "def exercise_b2_98():\r\n pass", "def exercise_b2_27():\r\n pass", "def exercise_b2_106():\r\n pass", "def exercise_b2_53():\r\n pass", "def exercise_b2_43():\r\n pass", "d...
[ "0.5789567", "0.5612758", "0.56002617", "0.5582453", "0.5527549", "0.5454671", "0.5450963", "0.5440792", "0.5437476", "0.54072136", "0.5388782", "0.53822136", "0.53622717", "0.5361482", "0.53562933", "0.5325314", "0.5280684", "0.52761763", "0.5207379", "0.5178423", "0.5170255...
0.0
-1
This function computes the fundamental matrix by computing the SVD of Ax = 0 ; 8point algorithm
def computeFundamentalMatrix(pts1, pts2): A = np.empty((8, 9)) for i in range(len(pts1)-1): x1 = pts1[i][0] x2 = pts2[i][0] y1 = pts1[i][1] y2 = pts2[i][1] A[i] = np.array([x1 * x2, x2 * y1, x2, y2 * x1, y2 * y1, y2, x1, y1, 1]) # Compute F matrix by evaluating SVD U, S, V = np.linalg.svd(A) F = V[-1].reshape(3, 3) # Constrain the F matrix to rank 2 U1, S1, V1 = np.linalg.svd(F) # print('Old S', S) # S[2] = 0 S2 = np.array([[S1[0], 0, 0], [0, S1[1], 0], [0, 0, 0]]) # print('New S', S) F = np.dot(np.dot(U1, S2), V1) return F
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svd0(A):\n M,N = A.shape\n if M>N: return sla.svd(A, full_matrices=True)\n else: return sla.svd(A, full_matrices=False)", "def invert_L1_svd():", "def visualize_svd():", "def svd(self):\n U, s, Vh = la.svd(self)\n S = np.zeros(self.shape)\n np.fill_diagonal(S, s)\n return...
[ "0.66048837", "0.6466162", "0.6259937", "0.6250825", "0.62505597", "0.62274474", "0.6104567", "0.6089218", "0.6025379", "0.5982765", "0.597328", "0.590215", "0.58907986", "0.58582675", "0.58575904", "0.584388", "0.58408606", "0.58376825", "0.581499", "0.58008623", "0.5792866"...
0.68444854
0
Leverages the 8point algorithm and implement RANSAC algorithm to find the inliers and the best fundamental matrix
def getInlierRANSAC(pts1, pts2): # global finalFundamentalMatrix iterations = 50 threshold = 0.01 max_count = 0 n = len(pts1) finalFundamentalMatrix = np.zeros((3, 3)) for i in range(iterations): count = 0 idx = random.sample(range(n - 1), 8) left_pts = pts1[idx] right_pts = pts2[idx] F = computeFundamentalMatrix(left_pts, right_pts) left_feature_inlier = [] right_feature_inlier = [] # print("Sample index: ", len(idx)) for j in range(0, n): homogeneous_right = np.array([pts2[j, 0], pts2[j, 1], 1]) homogeneous_left = np.array([pts1[j, 0], pts1[j, 1], 1]) fit = np.dot(homogeneous_right.T, np.dot(F, homogeneous_left)) # print("Fit for iteration ", i," ", np.abs(fit)) if np.abs(fit) < threshold: left_feature_inlier.append(pts1[j]) right_feature_inlier.append(pts2[j]) count = count + 1 # print('Inlier count', count) inlier_Left = np.array(left_feature_inlier) inlier_Right = np.array(right_feature_inlier) if count > max_count: max_count = count finalFundamentalMatrix = F final_inlier_Left = inlier_Left final_inlier_Right = inlier_Right return finalFundamentalMatrix, final_inlier_Left, final_inlier_Right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ransac(data, hypothesis, metric, sample_size, num_iter, inlier_thresh):\n N,d = data.shape\n best_frac, best_hypothesis, best_mask = 0, None, None\n for i in range(num_iter):\n js = np.random.choice(N,size=sample_size,replace=False)\n hypothesis_elements = data[js,:]\n H = hypothe...
[ "0.6203597", "0.5916464", "0.5894118", "0.5867515", "0.5715989", "0.56956524", "0.56905115", "0.5686345", "0.56403846", "0.55984086", "0.5590803", "0.5577823", "0.5559308", "0.5542196", "0.5525735", "0.55202436", "0.55189615", "0.55174667", "0.5481329", "0.5479639", "0.546803...
0.699575
0
=========================================================== DateFormatedSQL(x) =========================================================== this function converts the the date read from a list to a datetime format
def DateFormatedSQL(x): x=[i[0] for i in x] x1=[] for i in x: if len(i)==19: x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) )) # elif len(i)==13: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) )) # else: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) )) # del i,x return x1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_format_from_input_to_datetime(list_d_t_t):\n data_output = []\n\n for row in list_d_t_t:\n data_output.append([datetime.datetime.strptime(row[0] + \" \" + row[1], \"%Y-%m-%d %H:%M:%S\"),\n datetime.datetime.strptime(row[0] + \" \" + row[2], \"%Y-%m-%d %H:%M:%S\")]...
[ "0.66734904", "0.65000284", "0.6259414", "0.59757656", "0.5600508", "0.5579302", "0.5578522", "0.5551475", "0.5513122", "0.54512274", "0.5435365", "0.52705914", "0.52298", "0.5214014", "0.5199284", "0.51939476", "0.5177129", "0.5144611", "0.51139647", "0.5111645", "0.5084982"...
0.7940835
0
=========================================================== dateformated(x) =========================================================== this function converts the the date read from a list to a datetime format
def DateFormated(x): x1=[] for i in x: if len(i)==19: x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(i[14:16]),int(i[17:18]) )) # elif len(i)==13: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(i[11:13]),int(0),int(0) )) # else: # x1.append(datetime.datetime(int(i[:4]),int(i[5:7]),int(i[8:10]),int(0),int(0),int(0) )) # del i,x return x1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_format_from_input_to_datetime(list_d_t_t):\n data_output = []\n\n for row in list_d_t_t:\n data_output.append([datetime.datetime.strptime(row[0] + \" \" + row[1], \"%Y-%m-%d %H:%M:%S\"),\n datetime.datetime.strptime(row[0] + \" \" + row[2], \"%Y-%m-%d %H:%M:%S\")]...
[ "0.73220545", "0.6644235", "0.64673054", "0.63785565", "0.6323779", "0.63159305", "0.6256937", "0.6174311", "0.5986844", "0.58866596", "0.5878423", "0.58775616", "0.58339506", "0.579193", "0.57800907", "0.57769805", "0.5757611", "0.57572365", "0.57409817", "0.5732213", "0.572...
0.75249213
0
Mimic the & operator in R. This has to have Expression objects to be involved to work
def _op_and_(self, left: Any, right: Any) -> Any: if isinstance(left, list): # induce an intersect with Collection return Intersect(left, right) left, right = _recycle_left_right(left, right) left = Series(left).fillna(False) right = Series(right).fillna(False) return left & right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AND(f, g):\n def _and(x):\n return f(x) & g(x)\n return _and", "def and_(a, b):", "def __and__(self, other):\n return self.fam.c_binop('and', self, other)", "def __and__(self, obj):\n return self._boolean_operation(obj, operator.__and__)", "def _and(cls, arg1, arg2):\n ...
[ "0.6913351", "0.6844835", "0.6834847", "0.68041515", "0.6614185", "0.6585983", "0.65602845", "0.65299505", "0.6528684", "0.6510841", "0.6501651", "0.64942497", "0.6466398", "0.6432646", "0.639087", "0.6376711", "0.6334177", "0.6331774", "0.63316846", "0.6288898", "0.62308896"...
0.62697506
20
Mimic the & operator in R. This has to have Expression objects to be involved to work
def _op_or_(self, left: Any, right: Any) -> Any: if isinstance(left, list): return Collection(left, right) left, right = _recycle_left_right(left, right) left = Series(left).fillna(False) right = Series(right).fillna(False) return left | right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AND(f, g):\n def _and(x):\n return f(x) & g(x)\n return _and", "def and_(a, b):", "def __and__(self, other):\n return self.fam.c_binop('and', self, other)", "def __and__(self, obj):\n return self._boolean_operation(obj, operator.__and__)", "def _and(cls, arg1, arg2):\n ...
[ "0.6913351", "0.6844835", "0.6834847", "0.68041515", "0.6614185", "0.6585983", "0.65602845", "0.65299505", "0.6528684", "0.6510841", "0.6501651", "0.64942497", "0.6466398", "0.6432646", "0.639087", "0.6376711", "0.6334177", "0.6331774", "0.63316846", "0.6288898", "0.62697506"...
0.0
-1
type + sequence_number + key_size + key + value_size + value 1bit 63bit 32bit varlength 32bit varlength
def __init__(self, key, sequence_number, type=KeyType.PUT, value=None): assert key is not None assert sequence_number >= 0 self.type = type self.sequence_number = sequence_number self.key = key self.value = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_meta_chunk(key, value):\n bkey = key.encode(\"utf-8\")\n bvalue = value.encode(\"utf-8\")\n return (wozardry.to_uint32(len(bkey) + len(bvalue) + 2) + bkey + b'\\x09' + bvalue + b'\\x0A').hex()", "def _pack_dict( self, table, pad = False ) :\r\n\r\n keys, values = zip( *table...
[ "0.5809457", "0.5597021", "0.55033654", "0.5478504", "0.5475797", "0.5431724", "0.54011375", "0.53711766", "0.535695", "0.53174025", "0.53174025", "0.5285025", "0.5257738", "0.52197987", "0.5217525", "0.52163196", "0.51897675", "0.51784354", "0.5164822", "0.5161085", "0.51330...
0.48401326
43
Find all occurences of val on list lo Returns a list of indices of val on lo.
def findall(lo,val): u = [] i = -1 while( i < len(lo)-1): try: i = lo.index(val,i+1) u.append(i) except: i += 1 return u
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findall(l, o):\n return [i for i, u in enumerate(l) if u==o]", "def getIndexes(self, val):\n # Find where this value is listed. \n valNdx = (self.values == val).nonzero()[0]\n \n # If this value is not actually in those listed, then we \n # must return empty indexes\n ...
[ "0.70793176", "0.7071109", "0.66156113", "0.6319743", "0.6250533", "0.620535", "0.62024206", "0.619781", "0.6169729", "0.60985184", "0.6077737", "0.6067603", "0.5907215", "0.5842827", "0.58379203", "0.5817054", "0.58101517", "0.5786621", "0.577902", "0.5734879", "0.5654707", ...
0.8087934
0
Generate an identity key pair. Clients should only do this once, at install time. the generated IdentityKeyPair.
def generateIdentityKeyPair(): keyPair = Curve.generateKeyPair() publicKey = IdentityKey(keyPair.getPublicKey()) serialized = '0a21056e8936e8367f768a7bba008ade7cf58407bdc7a6aae293e2c' \ 'b7c06668dcd7d5e12205011524f0c15467100dd603e0d6020f4d293' \ 'edfbcd82129b14a88791ac81365c' serialized = binascii.unhexlify(serialized.encode()) identityKeyPair = IdentityKeyPair(publicKey, keyPair.getPrivateKey()) return identityKeyPair # return IdentityKeyPair(serialized=serialized)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_key_pair(self, keysize, cb):\n\n def gen_key_pair_pub_cb(data, ctx):\n if not data:\n warning('keymanagement: Could not generate a key pair\\n')\n cb(None, None)\n else:\n cb(ctx, data)\n\n def gen_key_pair_priv_cb(data, ctx):...
[ "0.71120954", "0.68204904", "0.65850353", "0.6553852", "0.65218014", "0.6443924", "0.6378423", "0.63755655", "0.6358998", "0.6324435", "0.63147116", "0.6309187", "0.62952316", "0.6251726", "0.61868006", "0.61828184", "0.61474425", "0.6133282", "0.61287004", "0.60888094", "0.6...
0.8180875
0
Test pointwise arithmetic with stencil offsets across a single functions with buffering dimension in indexed expression format
def test_indexed_buffered(self, expr, result): i, j, l = dimify('i j l') a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base fa = a.function eqn = eval(expr) Operator(eqn)(fa) assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n...
[ "0.70109046", "0.5731927", "0.5725207", "0.5534058", "0.5485873", "0.54508567", "0.5420271", "0.5402067", "0.53938526", "0.5361206", "0.53606236", "0.5359261", "0.5359202", "0.5320887", "0.5293664", "0.5291382", "0.5265721", "0.52385396", "0.523383", "0.52290106", "0.52282166...
0.5903518
1
You can override this method if you want to change the format of outputs (e.g., storing gradients)
def update_output(self, ): input_ids, outputs, grads, adv_tokens = self.batch_output probs = softmax(outputs, dim=-1) probs, labels = torch.max(probs, dim=-1) tokens = [ self.tokenizer.convert_ids_to_tokens(input_ids_) for input_ids_ in input_ids ] embedding_grads = grads.sum(dim=2) # norm for each sequence norms = torch.norm(embedding_grads, dim=1, p=2) # need check hyperparameter # normalizing for i, norm in enumerate(norms): embedding_grads[i] = torch.abs(embedding_grads[i]) / norm batch_output = [] # check probs, labels shape labels = torch.reshape(labels, (1, -1)) probs = torch.reshape(probs, (1, -1)) iterator = zip(tokens, probs, embedding_grads, labels) for example_tokens, example_prob, example_grad, example_label in iterator: example_dict = dict() # as we do it by batches we has a padding so we need to remove it example_tokens = [t for t in example_tokens if t != self.tokenizer.pad_token] example_dict['tokens'] = example_tokens example_dict['grad'] = example_grad.cpu().tolist()[:len(example_tokens)] example_dict['label'] = example_label.cpu().tolist()[:len(example_tokens)] # example_label.item() example_dict['prob'] = example_prob.cpu().tolist()[:len(example_tokens)] # example_prob.item() batch_output.append(example_dict) return batch_output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def out(self, inputs):", "def _save_grad_output(self, mod, grad_input, grad_output):\n if mod.training:\n self.state[mod][\"gy\"] = grad_output[0] * grad_output[0].size(0)", "def _output_update(self):\n self._outputtype = self.inputs.outputtype", "def outputs(self):\n return s...
[ "0.6842611", "0.63906467", "0.62235004", "0.62234104", "0.62234104", "0.62234104", "0.62234104", "0.6200916", "0.61630815", "0.6138572", "0.6102943", "0.6075281", "0.6048927", "0.60099345", "0.60045785", "0.59973353", "0.5995878", "0.59723157", "0.5926936", "0.5917695", "0.59...
0.0
-1
If USE_PATH is True rely on PATH to look for binaries. Otherwise ../src/ is used by default.
def binary_location(cmd, USE_PATH=False): if USE_PATH: return cmd else: return os.path.join(BIN_PREFIX, cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linkpath(srcdir, pkg):\n home = os.getenv('HOME')\n if srcdir:\n rval = '{}/{}'.format(srcdir, pkg)\n else:\n rval = '{}/bin/{}'.format(home, pkg)\n return rval", "def binary_location(cmd, USE_PATH=False):\n return os.path.join(BIN_PREFIX, cmd)", "def set_path():\n impor...
[ "0.64583826", "0.6119704", "0.60500836", "0.5732794", "0.5658576", "0.56554246", "0.5512849", "0.5505444", "0.54935056", "0.5483672", "0.5481498", "0.5455168", "0.5439266", "0.5433778", "0.54287785", "0.5424196", "0.5423426", "0.5394256", "0.53781176", "0.5329684", "0.5325822...
0.6302872
1
Remove the user from 'workers' or 'prospects', if applicable. user A TcsUser instance to remove from workers
def removeWorker(self, user): if user == self.owner: return None # Without these queries, there's no way to tell if anything actually gets removed. # Calling remove() on a user that is not in the set does not raise an error. if self.workers.filter(pk=user.id).exists(): self.workers.remove(user) return self if self.prospects.filter(pk=user.id).exists(): self.prospects.remove(user) return self return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_candidate(self, user):\n self.assignment_related_users.filter(user=user).delete()\n inform_changed_data(self)", "def remove(self, user):\n self.packet.send_room([\"rp\", user.get_int_id(self.rooms),\n user.data.id], user.room)\n self.rooms[user...
[ "0.6894938", "0.68542784", "0.685336", "0.67998946", "0.6638343", "0.6393456", "0.63634795", "0.6293484", "0.6282741", "0.6280759", "0.6247314", "0.62332475", "0.6231962", "0.6227879", "0.61912465", "0.6178004", "0.6158775", "0.6148308", "0.6134449", "0.6126714", "0.6120114",...
0.7817377
0
Unfreeze a FrozenDict. Makes a mutable copy of a `FrozenDict` mutable by transforming it into (nested) dict.
def unfreeze(x: Union[FrozenDict, Dict[str, Any]]) -> Dict[Any, Any]: if isinstance(x, FrozenDict): # deep copy internal state of a FrozenDict # the dict branch would also work here but # it is much less performant because jax.tree_util.tree_map # uses an optimized C implementation. return jax.tree_util.tree_map(lambda y: y, x._dict) # type: ignore elif isinstance(x, dict): ys = {} for key, value in x.items(): ys[key] = unfreeze(value) return ys else: return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_freeze(xs: Any) -> Any:\n if isinstance(xs, FrozenDict):\n # we can safely ref share the internal state of a FrozenDict\n # because it is immutable.\n return xs._dict # pylint: disable=protected-access\n if not isinstance(xs, dict):\n # return a leaf as is.\n return xs\n # recursive...
[ "0.678866", "0.63871247", "0.63075334", "0.6243246", "0.61387753", "0.61324066", "0.5833921", "0.56678116", "0.5610028", "0.5581237", "0.547987", "0.528664", "0.5166779", "0.51488274", "0.5131935", "0.513004", "0.5127286", "0.51143354", "0.5108335", "0.5104038", "0.50658894",...
0.80311483
0
This method is used to postprocess the form data. By default, it returns the raw `form.data` dictionary.
def process_step(self, form): #print(form.data) #print(form.data) #print(self) institution = {} inst_list = [] if self.steps.current == '1': institution['institution'] = form.data['1-0-institution'] institution['date_from'] = form.data['1-0-date_from'] institution['date_to'] = form.data['1-0-date_to'] inst_list.append(institution) inst_keys = dict(form.data.lists()) #Create dictionary dynamically for the other institutions incase more than two institutions are entered if inst_keys.get('1-NaN-institution') and type(inst_keys.get('1-NaN-institution')) is list: inst_list2 = [] #Add institutions for i,insti in enumerate(inst_keys.get('1-NaN-institution')): inst_i = {} #print(i) date_from = inst_keys['1-NaN-date_from'][i] date_to = inst_keys['1-NaN-date_to'][i] course_duration = inst_keys['1-NaN-course_duration'][i] inst_i['institution'] = insti inst_i['date_from'] = date_from inst_i['date_to'] = date_to inst_list2.append(inst_i) #print(inst_list2) inst_list.extend(inst_list2) #Create dictionary dynamically for the other institutions incase more than two institutions are entered if inst_keys.get('1-NaN-institution') and type(inst_keys.get('1-NaN-institution')) is not list: inst_0 = {} inst_0['institution'] = form.data['1-NaN-institution'] inst_0['date_from'] = form.data['1-NaN-date_from'] inst_0['date_to'] = form.data['1-NaN-date_to'] inst_0['course_duration'] = form.data['1-NaN-course_duration'] #inst_0['achievements'] = '' inst_list.append(inst_0) #Add the entered information to a session object self.request.session['institution'] = inst_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_post_data(self):\n if not self._form_data:\n self._form_data = async_to_sync(self.request.form)()\n return self._form_data", "def get_form_data(self) -> dict:\n with logging.LogCall(__file__, \"get_form_data\", self.__class__):\n return self.serialize()", "def...
[ "0.79507184", "0.76475173", "0.738958", "0.73285085", "0.7258168", "0.6955681", "0.6792374", "0.65343493", "0.65144867", "0.6490603", "0.6459408", "0.6415936", "0.63819605", "0.63031137", "0.6298642", "0.6277921", "0.62373143", "0.6232082", "0.62192225", "0.60904664", "0.6056...
0.0
-1
Ensure that awsmarketplace actions from all the different awsmarketplace SAR pages are present in the IAM definition.
def test_services_with_multiple_pages_aws_marketplace(self): # Overlap: AWS Marketplace, Marketplace Catalog, and AWS Marketplace Entitlement service, AWS Marketplace Image Building Service, AWS Marketplace Metering Service, AWS Marketplace Private Marketplace, and AWS Marketplace Procurement Systems # AWS Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplace.html self.assertTrue("aws-marketplace:AcceptAgreementApprovalRequest" in self.all_actions) # AWS Marketplace Catalog: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacecatalog.html self.assertTrue("aws-marketplace:CancelChangeSet" in self.all_actions) # AWS Marketplace Entitlement Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceentitlementservice.html self.assertTrue("aws-marketplace:GetEntitlements" in self.all_actions) # AWS Marketplace Image Building Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceimagebuildingservice.html self.assertTrue("aws-marketplace:DescribeBuilds" in self.all_actions) # AWS Marketplace Metering Service: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplacemeteringservice.html self.assertTrue("aws-marketplace:BatchMeterUsage" in self.all_actions) # AWS Marketplace Private Marketplace: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprivatemarketplace.html self.assertTrue("aws-marketplace:AssociateProductsWithPrivateMarketplace" in self.all_actions) # AWS Marketplace Procurement Systems: https://docs.aws.amazon.com/service-authorization/latest/reference/list_awsmarketplaceprocurementsystemsintegration.html self.assertTrue("aws-marketplace:DescribeProcurementSystemConfiguration" in self.all_actions) results = get_actions_for_service("aws-marketplace") actions = [ "aws-marketplace:AcceptAgreementApprovalRequest", "aws-marketplace:BatchMeterUsage", "aws-marketplace:CancelAgreementRequest", "aws-marketplace:CancelChangeSet", "aws-marketplace:CompleteTask", "aws-marketplace:DescribeAgreement", "aws-marketplace:DescribeBuilds", "aws-marketplace:DescribeChangeSet", "aws-marketplace:DescribeEntity", "aws-marketplace:DescribeProcurementSystemConfiguration", "aws-marketplace:DescribeTask", "aws-marketplace:GetAgreementApprovalRequest", "aws-marketplace:GetAgreementRequest", "aws-marketplace:GetAgreementTerms", "aws-marketplace:GetEntitlements", "aws-marketplace:ListAgreementApprovalRequests", "aws-marketplace:ListAgreementRequests", "aws-marketplace:ListBuilds", "aws-marketplace:ListChangeSets", "aws-marketplace:ListEntities", "aws-marketplace:ListTasks", "aws-marketplace:MeterUsage", "aws-marketplace:PutProcurementSystemConfiguration", "aws-marketplace:RegisterUsage", "aws-marketplace:RejectAgreementApprovalRequest", "aws-marketplace:ResolveCustomer", "aws-marketplace:SearchAgreements", "aws-marketplace:StartBuild", "aws-marketplace:StartChangeSet", "aws-marketplace:Subscribe", "aws-marketplace:Unsubscribe", "aws-marketplace:UpdateAgreementApprovalRequest", "aws-marketplace:UpdateTask", "aws-marketplace:ViewSubscriptions", ] for action in actions: self.assertTrue(action in results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_actions_with_arn_type_and_access_level_case_3(self):\n desired_output = [\n 's3:PutAccountPublicAccessBlock',\n 's3:PutAccessPointPublicAccessBlock'\n ]\n output = get_actions_with_arn_type_and_access_level(\n # \"ram\", \"resource-share\", \"Write...
[ "0.58062184", "0.57554114", "0.5695702", "0.5673701", "0.5447731", "0.5414008", "0.53181934", "0.5090683", "0.5057209", "0.49688128", "0.49623215", "0.49491638", "0.49443293", "0.49354288", "0.4900723", "0.48934472", "0.48312107", "0.48288488", "0.47950754", "0.4768656", "0.4...
0.66148233
0
Called when a mouse button is pressed in the widget. Adjust method signature as appropriate for callback.
def button_press_event(self, widget, event): x, y = event.x, event.y # x, y = coordinates where the button was pressed self.last_win_x, self.last_win_y = x, y button = 0 # Prepare a button mask with bits set as follows: # left button: 0x1 # middle button: 0x2 # right button: 0x4 # Others can be added as appropriate self.logger.debug("button down event at %dx%d, button=%x" % (x, y, button)) data_x, data_y = self.check_cursor_location() return self.make_ui_callback('button-press', button, data_x, data_y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_mouse_press(self, x, y, button):\n\n pass", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def handle_mouse_press(self, event):", "def mouse_press_event(self, x: int, y: int, button: int):\n pass", "def on_mouse_press(self, x, y, button, key_modifiers):\r\n pa...
[ "0.8186688", "0.7803107", "0.7685904", "0.7667033", "0.7550329", "0.75264764", "0.74540734", "0.74537903", "0.7434162", "0.71306", "0.71141076", "0.7086629", "0.70717835", "0.70475805", "0.70216006", "0.70136315", "0.69730556", "0.69179136", "0.69159424", "0.69106615", "0.690...
0.786055
1
Called when a mouse button is released after being pressed. Adjust method signature as appropriate for callback.
def button_release_event(self, widget, event): x, y = event.x, event.y # x, y = coordinates where the button was released self.last_win_x, self.last_win_y = x, y button = 0 # prepare button mask as in button_press_event() data_x, data_y = self.check_cursor_location() return self.make_ui_callback('button-release', button, data_x, data_y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mouse_release_event(self, x: int, y: int, button: int):\n pass", "def on_mouse_release(self, x, y, button):\n pass", "def on_mouse_release(self, x, y, button, key_modifiers):\r\n pass", "def OnMouseUp(self, evt):\n self.ReleaseMouse()", "def emitReleaseEvent(self, clickLocat...
[ "0.80996853", "0.80075884", "0.7933358", "0.74254155", "0.7286587", "0.7269564", "0.72388613", "0.72388613", "0.72174096", "0.71852064", "0.7037113", "0.7027572", "0.7019888", "0.69756335", "0.6968941", "0.6932969", "0.69013923", "0.68389016", "0.6834489", "0.68217915", "0.68...
0.7786435
3
Called when a mouse cursor is moving in the widget. Adjust method signature as appropriate for callback.
def motion_notify_event(self, widget, event): x, y = event.x, event.y # x, y = coordinates of cursor self.last_win_x, self.last_win_y = x, y button = 0 # prepare button mask as in button_press_event() data_x, data_y = self.check_cursor_location() return self.make_ui_callback('motion', button, data_x, data_y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mouse_move_callback(self, event):\n # TODO drag and drop figuriek\n print(\"moving at \", event.x + self.offset_x, event.y + self.offset_y)", "def on_mouse_motion(self, x, y, delta_x, delta_y):\r\n pass", "def __mouseMoved(self, x, y):\n # Are we on the bounding box?\n if...
[ "0.7342505", "0.7273285", "0.7090941", "0.70634186", "0.7034477", "0.69676363", "0.6943248", "0.68984425", "0.669294", "0.659831", "0.65874934", "0.6549803", "0.6516948", "0.6500888", "0.6497365", "0.6471458", "0.6461945", "0.64583486", "0.6448528", "0.6439387", "0.63827825",...
0.64577484
18
Called when a drop (drag/drop) event happens in the widget. Adjust method signature as appropriate for callback.
def drop_event(self, widget, event): # make a call back with a list of URLs that were dropped #self.logger.debug("dropped filename(s): %s" % (str(paths))) #self.make_ui_callback('drag-drop', paths) raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dropEvent(self, de):\n # dragging a track\n if hasattr(Globals.dragObject, \"trackFrame\"):\n de.accept()\n trackFrame = Globals.dragObject.trackFrame\n oldParent = trackFrame.parentWidget()\n if oldParent:\n args = (trackFrame, self, old...
[ "0.72224736", "0.7178574", "0.7033632", "0.69308865", "0.69137365", "0.68517953", "0.66538286", "0.660371", "0.6481881", "0.6399347", "0.6314507", "0.63019335", "0.6220606", "0.6072313", "0.60482043", "0.6036351", "0.59223694", "0.59200025", "0.58687717", "0.58277893", "0.573...
0.81672764
0
Creates a data folder containing a 100class subset of ImageNet, then creates a zipped copy of it
def zip_imagenet100c(): #First make sure the directory we are given is correct! if not os.path.isdir(DATA_SRC_ROOT): raise Exception("Bad filepath given") #create the destiantion directories if they don't exist if not os.path.isdir(IMAGENET100_DIR): os.mkdir(IMAGENET100_DIR) #grab the subset wnids for the 100 class-subset with open(IMAGENET100_CLASSES) as f: subset_wnids = f.readlines() subset_wnids = [x.strip() for x in subset_wnids] #list of the 100 WNIDs we grab #Grab the names of all of the folders inside the root data source #Structure is distortion/sub_distortion/level/wnids for distortion in os.listdir(DATA_SRC_ROOT): if distortion != "meta.bin": print(distortion) folder_path = os.path.join(DATA_SRC_ROOT, distortion) if not os.path.isdir(folder_path): continue for sub_distortion in os.listdir(folder_path): print(sub_distortion) subfolder_path = os.path.join(folder_path, sub_distortion) if not os.path.isdir(subfolder_path): continue for level in os.listdir(subfolder_path): print(level) level_path = os.path.join(subfolder_path, level) #grab the correcrt validation d9recotires for wnid in os.listdir(level_path): wnid_path = os.path.join(level_path, wnid) if not os.path.isdir(wnid_path): continue if wnid in subset_wnids: dest_path = os.path.join(IMAGENET100_DIR, distortion, sub_distortion, level, wnid) shutil.copytree(wnid_path, dest_path) #copy the metadata bin file meta_file = os.path.join(DATA_SRC_ROOT, 'meta.bin') meta_dest = os.path.join(IMAGENET100_DIR, 'meta.bin') shutil.copy(meta_file, meta_dest) #Zip the destinatio file shutil.make_archive(ZIP_PATH + '/ImageNet100C', 'tar', IMAGENET100_DIR)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_dataset(data_folder: str, dataset_file: str, targets_file: str = os.path.join('data', 'targets.pkl')):\n files = sorted(glob.glob(os.path.join(data_folder, '**/*.jpg'), recursive=True))\n images = []\n crop_sizes = []\n crop_centers = []\n targets = []\n for image in tqdm(files, desc='...
[ "0.6529704", "0.6481513", "0.6435279", "0.6418067", "0.63397163", "0.6227401", "0.62035155", "0.61983734", "0.6165103", "0.61225355", "0.612228", "0.60964", "0.60848254", "0.6073827", "0.60563177", "0.6047265", "0.60234", "0.6019958", "0.60174394", "0.6004536", "0.5983722", ...
0.7347322
0
Given a wav file, use Praat to return a dictionary containing pitch (in Hz) at each millisecond.
def praat_analyze_pitch(audio_file): praatpath = path.abspath('Praat.app/Contents/MacOS/Praat') # locate Praat executable pl = PraatLoader(praatpath=praatpath) # create instance of PraatLoader object praat_output = pl.run_script('pitch.praat', audio_file) # run pitch script in Praat pitch_data = pl.read_praat_out(praat_output) # turn Praat's output into Python dict return pitch_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_pitch_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):\n if os.path.isfile(filename) is False:\n raise Exception('File not found with filename = %s' % filename)\n\n print(\"====> reading pitch from sound file\")\n win_s = 4096 // DOWN_SAMPLE # fft size\n hop_s = 5...
[ "0.70273054", "0.70135546", "0.68500084", "0.6821027", "0.6812894", "0.6796246", "0.65991676", "0.6327907", "0.6325302", "0.62763965", "0.62052196", "0.61778903", "0.60919714", "0.60821027", "0.60718834", "0.60426664", "0.59239006", "0.59204537", "0.5904547", "0.58931905", "0...
0.6584172
7
Checks whether the given ISBN13 code is valid. >>> isISBN13('9789743159664') True >>> isISBN13('9787954527409') False >>> isISBN13('8799743159665') False
def isISBN13(code): # helper function for computing ISBN-10 check digit def check_digit(code): # compute check digit check = sum((3 if i % 2 else 1) * int(code[i]) for i in range(12)) # convert check digit into a single digit return str((10 - check) % 10) # check whether given code is a string if not isinstance(code, str): return False # check whether given code contains 10 characters if len(code) != 13: return False # check whether first nine characters of given code are digits if not code[:12].isdigit(): return False # check the check digit return check_digit(code) == code[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_isbn_13(isbn13):\r\n isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))\r\n if len(isbn13) != 13: return False\r\n if isbn13[0:3] not in ('978', '979'): return False\r\n return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True", "def isISBN(code, isbn13=True):\n\n ret...
[ "0.8294502", "0.8156061", "0.7813687", "0.7593365", "0.7539352", "0.7096975", "0.70109516", "0.6998053", "0.6760535", "0.6702344", "0.6442007", "0.64231825", "0.6403078", "0.6313602", "0.6305555", "0.6260889", "0.6107894", "0.5969835", "0.5896946", "0.57390374", "0.56742215",...
0.86895674
0
Parse OpenSSLstyle foo.0, foo.1, ... subscripted options. Returns a list of values matching the specified option name.
def multiget(self, option, section = None): matches = [] if section is None: section = self.default_section if self.cfg.has_option(section, option): matches.append((-1, self.get(option, section = section))) for key, value in self.cfg.items(section): s = key.rsplit(".", 1) if len(s) == 2 and s[0] == option and s[1].isdigit(): matches.append((int(s[1]), self.get(option, section = section))) matches.sort() return [match[1] for match in matches]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_option(self, name):\r\n if not isinstance(name, str):\r\n name = \" \".join(name)\r\n lines = self.sendAndRecv(\"GETCONF %s\\r\\n\" % name)\r\n\r\n r = []\r\n for _,line,_ in lines:\r\n try:\r\n key, val = line.split(\"=\", 1)\r\n r.append((key,val))\r\n except Valu...
[ "0.5681802", "0.5620391", "0.5565046", "0.541359", "0.53751975", "0.5340231", "0.5283279", "0.5276728", "0.52554685", "0.5251639", "0.51984483", "0.5196909", "0.519201", "0.5174309", "0.5122386", "0.51024044", "0.51020473", "0.5095427", "0.5034037", "0.5030856", "0.50224656",...
0.5834238
0
namespaces typecodes representing global elements with literal encoding. typeCode typecode representing an element. namespaceURI namespace literal True/False
def _globalElement(self, typeCode, namespaceURI, literal): if literal: typeCode.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s="%(namespaceURI)s"' \ %{'prefix':self._getPrefix(namespaceURI), 'name':typeCode.oname, 'namespaceURI':namespaceURI}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def XmlTypeNamespace(self) -> str:", "def is_namespace_type(self):\n raise exceptions.NotImplementedError()", "def GetNamespaces(self):\n return list(self.type_namespaces_map.values())", "def element_type(self) -> global___Type:", "def patch_well_known_namespaces(etree_module):\n etree_module....
[ "0.66442066", "0.5593534", "0.5443724", "0.5412149", "0.5365882", "0.5329653", "0.5311929", "0.5237586", "0.5178215", "0.5165827", "0.5055696", "0.5044016", "0.5007665", "0.4926739", "0.48959085", "0.48641986", "0.48631665", "0.4855509", "0.48434836", "0.48183277", "0.4792356...
0.6797826
0
Returns a True if a customer identifier does not belongs to dataframe used to build classifier model.
def is_customer_out_sample(self, customerID): listCustomer = list(self._df_invoice_line_out_sample.CustomerID.unique()) is_flag = customerID in listCustomer return is_flag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_customer(self) -> bool:\n return self.customer_id is not None", "def has_customer(self):\n return self.customer is not None", "def is_customer(self):\n return self.user_type == 'C'", "def is_label_dataframe(label, df):\n\n setdiff = set(label) - set(df.columns.tolist())\n\n ...
[ "0.661681", "0.6462838", "0.59257627", "0.5741564", "0.5628499", "0.55574435", "0.5537199", "0.5509296", "0.5507439", "0.5461328", "0.5405652", "0.53825015", "0.5370098", "0.5365358", "0.5328702", "0.53143424", "0.529039", "0.52698815", "0.52663475", "0.52634853", "0.52370775...
0.63775516
2
Returns RFM score from dataframe given from parameter. RFM score is computed from local RFM matrix threshold.
def get_rfm(self, df): df_tmp, df_RFM, df_RFM_threshold, day_now \ = p5_util.p5_df_rfm_build(df, df_RFM_threshold=self.df_RFM_quantiles ,day_now = self._day_now) RFM = df_RFM.RFM.iloc[0] return str(RFM)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_rfm_score(dataframe):\n\n dataframe[\"recency_score\"] = pd.qcut(dataframe['recency'].rank(method=\"first\"), 5, labels=[5, 4, 3, 2, 1])\n dataframe[\"frequency_score\"] = pd.cut(dataframe['frequency'], bins=[0, 4, 8, 13, 17, 20], labels=[1, 2, 3, 4, 5])\n dataframe[\"RFM_SCORE\"] = (dataframe[...
[ "0.6958095", "0.61751866", "0.60585284", "0.5860262", "0.58369166", "0.5765395", "0.57476515", "0.5725581", "0.5717504", "0.5708911", "0.5702308", "0.5689124", "0.5686297", "0.5685354", "0.56756556", "0.5671112", "0.5645583", "0.5639483", "0.56138635", "0.56126195", "0.559187...
0.60182154
3
Returns the data representation of the timer, will be used to send it over the web socket.
def get_list_data(self): key = 'timer' if self.repeated: key += '_repeat' return '%s %s' % (key, self.data.get_list_data())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_timer_data(self):\n return {\"sRef\": self._timer_service_ref_entry.get_text(),\n \"begin\": int(datetime.strptime(self._timer_begins_entry.get_text(), self._TIME_STR).timestamp()),\n \"end\": int(datetime.strptime(self._timer_ends_entry.get_text(), self._TIME_STR).time...
[ "0.7045231", "0.6424909", "0.63322824", "0.61530703", "0.61145544", "0.6017771", "0.592645", "0.59186465", "0.59073746", "0.5903025", "0.5891447", "0.58581436", "0.5799672", "0.5785056", "0.575003", "0.5743803", "0.5738986", "0.5706574", "0.5649952", "0.5620999", "0.5616731",...
0.63464636
2
Executing every forest in collection, activating their networks. By the way collecting data about best fitness function.
def execute(self): process_list = [] forests_queue = Queue(self.power) iterational = 0 print '| |-starting evaluation, training and validation' for one_forest in self._forests: process_list.append( Process(target=main_async_method, args=(forests_queue, copy(one_forest.to_portal()), iterational, self.settings))) iterational += 1 for proc in process_list: proc.start() for proc in process_list: proc.join() for smth in range(forests_queue.qsize()): tmp = forests_queue.get() self._forests[tmp['place']].fitness = tmp['fitness'] fitness_summ = sum(map(lambda forest: forest.fitness, self._forests)) fss = map(lambda x: x.fitness, self._forests) print 'avg = ', str(sum(fss) / len(fss)), 'max = ', max(fss) self.roulet = map(lambda x: x.fitness / fitness_summ, self._forests)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutate(self):\n for forest in self._forests:\n forest.mutate(self._fullInput)", "def run(self, num_iterations = 50, **kwargs):\n \n #setup system\n self.cost_calculator = t.CostCalculator(self.suppliers_allcards, self.all_ensembles_dict)\n bounds = np.array(self....
[ "0.66012484", "0.62504995", "0.61932045", "0.6135642", "0.6134355", "0.61263996", "0.6049594", "0.5867963", "0.5865974", "0.5832648", "0.581788", "0.5802029", "0.57808244", "0.5779419", "0.5708533", "0.56901664", "0.5634386", "0.56181246", "0.558044", "0.553923", "0.55354095"...
0.7236054
0
Expects a list of signals and a list of bkgs (Dataset objects), and a cut_function and cut_values.
def roccurve(signals, bkgs, cut_function, cut_values): eff_sig, n_pass_sig, n_total_sig = get_eff(svjflatanalysis.iterate(signals), cut_function, cut_values) eff_bkg, n_pass_bkg, n_total_bkg = get_eff(svjflatanalysis.iterate(bkgs), cut_function, cut_values) return eff_sig, eff_bkg, n_pass_sig, n_pass_bkg, n_total_sig, n_total_bkg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cuts(data, args, verbose):\n\n if args['experiment']['cut_finding'] == CutFinding.features:\n\n values = (data.xs == True).T\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.binning:\n\n values, names = binning(xs=data.xs,\n ...
[ "0.55629873", "0.5544333", "0.54654413", "0.53966707", "0.5293295", "0.5175403", "0.5154368", "0.50937045", "0.5059277", "0.5027049", "0.50096345", "0.49881732", "0.49795693", "0.4978986", "0.49496424", "0.4933744", "0.4921186", "0.49105307", "0.49013457", "0.48807377", "0.48...
0.6186934
0
Audit the commit for proper endofline characters. The UNIX type EOL is the only allowed EOL character.
def audit_eol(self): # Regex's.... re_commit = re.compile("^\xff(.+)\xff$") re_filename = re.compile("^diff --(cc |git a\/.+ b\/)(.+)$") blocked_eol = re.compile(r"(?:\r\n|\n\r|\r)$") # Bool to allow special files such as vcards to bypass the check eol_allowed = False # Do EOL audit! process = get_change_diff( self.repository, ["-p"] ) for line in process.stdout: commit_change = re.match( re_commit, line ) if commit_change: commit = commit_change.group(1) continue file_change = re.match( re_filename, line ) if file_change: filename = file_change.group(2) eol_violation = False eol_allowed = False # Check if it's an allowed mimetype # First - check with the mimetypes system, to see if it can tell guessed_type, _ = mimetypes.guess_type(filename) if guessed_type in self.ALLOWED_EOL_MIMETYPES: eol_allowed = True continue # Second check: by file extension # NOTE: This uses the FIRST dot as extension splitted_filename = filename.split(os.extsep) # Check if there's an extension or not # NOTE This assumes that files use dots for extensions only! if len(splitted_filename) > 1: extension = splitted_filename[1] if extension in self.ALLOWED_EOL_EXTENSIONS: eol_allowed = True continue # Unless they added it, ignore it if not line.startswith("+"): continue if re.search( blocked_eol, line ) and not eol_violation: # Is this an allowed filename? if eol_allowed: continue # Failure has been found... handle it eol_violation = True self.__log_failure(commit, "End of Line Style (non-Unix): " + filename);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eol(self):\n if self.current not in EOL:\n self.on_parser_error(\"EOL expected\")\n self.maybe_eol()", "def _output_commit_line(self): # noqa: C901, E501 pylint: disable=too-many-branches\n seen_this = False\n chars_written = 0\n for i in range(self.num_columns ...
[ "0.6151211", "0.6065239", "0.57468516", "0.5741316", "0.5723494", "0.5639385", "0.5574638", "0.5561204", "0.5554823", "0.55486727", "0.553186", "0.5530341", "0.55275774", "0.5481987", "0.54660696", "0.540383", "0.5398025", "0.5388231", "0.53565466", "0.53498983", "0.5348075",...
0.83223575
0
Takes a user and a group name, and returns `True` if the user is in that group.
def is_in_group(user, group_name): return is_in_group_user_id(user.id, group_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_user_in_group(user, group):\n users = group.get_users()\n if user in users:\n return True\n return False", "def is_in_group(user, group_name):\n return user.groups.filter(name__exact=group_name).exists()", "def is_in_group(user, group_name):\n return Group.objects.get(name=group_na...
[ "0.90404195", "0.89909446", "0.8921993", "0.88956964", "0.8691311", "0.8651488", "0.85226196", "0.8471016", "0.8424121", "0.83981097", "0.83976525", "0.8325658", "0.8266118", "0.8199991", "0.81931895", "0.7539715", "0.7505139", "0.7332905", "0.726839", "0.7217727", "0.7149777...
0.90948594
0
Score vector of model. Default implementation sums score_obs. The gradient of loglike with respect to each parameter.
def score(self, params, *args, **kwargs): try: # If an analytic score_obs is available, try this first before # falling back to numerical differentiation below return self.score_obs(params, *args, **kwargs).sum(0) except NotImplementedError: # Fallback in case a `loglike` is implemented but `loglikeobs` # is not. approx_func = (approx_fprime_cs if self._use_approx_cs else approx_fprime) return approx_func(params, self.loglike, args=args, kwargs=kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def score(self, s):\n fv = s.feature_vector\n product = fv.dot(self.params.T)[0, 0]\n return s.score(lmwt=self.lmwt) + product", "def svm_loss(scores, y):\r\n\r\n N = scores.shape[0]\r\n\r\n # Compute svm data loss\r\n correct_class_scores = scores[range(N), y]\r\n margins = np.m...
[ "0.6385058", "0.6369366", "0.6337341", "0.62241894", "0.6202223", "0.6085212", "0.6069365", "0.5963406", "0.59437096", "0.59082556", "0.5902111", "0.58229357", "0.5803482", "0.57961464", "0.5775284", "0.5754475", "0.5751942", "0.5720772", "0.5715828", "0.57079184", "0.5697270...
0.6393054
0
Evaluate the size expected from the FP relation for a given velocity dispersion and Vband apparent magnitude
def get_effective_radius(self, vel_disp, m_V): log_vel_disp = np.log10(vel_disp) log_R_eff = self.a*log_vel_disp + self.b*m_V + self.c + np.random.randn()*self.intrinsic_scatter R_eff = 10**log_R_eff return R_eff
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fluxRatio_fromVmag(Vmag):\n fluxRatio = 10.**(-0.4*Vmag)\n return fluxRatio", "def width_v_a(model: SingleRhNeutrinoModel) -> float:\n u = 0.5 * np.tan(2 * model.theta)\n return 9 * ALPHA_EM * GF**2 / (256 * np.pi**4) * model.mx**5 * u**2", "def testCalspecMags(self):\n std = MKIDStd.MKI...
[ "0.6291265", "0.5848869", "0.5809261", "0.5802869", "0.57336414", "0.5729062", "0.569543", "0.56734866", "0.5662906", "0.56628376", "0.56435436", "0.56359833", "0.5614053", "0.5611046", "0.55810857", "0.55808634", "0.5556087", "0.55491066", "0.55242413", "0.5521659", "0.55142...
0.0
-1
Parse name and seed for uci regression data. E.g. yacht_2 is the yacht dataset with seed 2.
def _parse_uci_regression_dataset(name_str): pattern_string = "(?P<name>[a-z]+)_(?P<seed>[0-9]+)" pattern = re.compile(pattern_string) matched = pattern.match(name_str) if matched: name = matched.group("name") seed = matched.group("seed") return name, seed return None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_uci_regression_dataset(name,\n split_seed,\n train_fraction=0.9,\n data_dir=\"uci_datasets\"):\n path = os.path.join(data_dir,\n _UCI_REGRESSION_FILENAMES[UCIRegressionDatasets(name)])\n dat...
[ "0.6082497", "0.5352037", "0.5263292", "0.5021193", "0.49601898", "0.48798177", "0.4859456", "0.48388806", "0.48280886", "0.48230565", "0.48205665", "0.4811698", "0.4808677", "0.4778711", "0.477406", "0.4759555", "0.47592923", "0.47398236", "0.47390524", "0.4733845", "0.47287...
0.730633
0
Converts a string of text into a numerical vector of features based on the word embedding LTM.
def vectorize(self,text): lv_active = set() words = word_tokenize(text) for word in words: if word in self.tree: ancestors = self.tree.word_ancestors(word) lv_active.update(ancestors) return self.nl.isin(lv_active).values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_vector(text, model, idf, is_tokenized=False):\n if not is_tokenized: text= text.split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in text: # iterates over the sentence\n if (word in model) & (word in id...
[ "0.70021975", "0.6557391", "0.65419817", "0.65404963", "0.6515521", "0.64831823", "0.6474055", "0.64408255", "0.64127994", "0.6378021", "0.63599265", "0.63383466", "0.6331677", "0.6309186", "0.63056415", "0.6245646", "0.6237467", "0.62303", "0.62136185", "0.61973566", "0.6189...
0.0
-1
Creates a Tensor for use as an Embedding initialization from the source vocabulary and predefined word embeddings.
def get_pretrained_embeddings(source_vocab,embed_df): num_tokens = len(source_vocab) embedding_dim = embed_df.shape[1] weights = np.zeros((num_tokens,embedding_dim),dtype=np.float32) for idx in range(num_tokens): token = source_vocab.lookup_index(idx) if token in embed_df.index: weights[idx,:] = embed_df.loc[token] else: weights[idx,:] = np.random.randn(1,embedding_dim) embed_tensor = torch.FloatTensor(weights) return embed_tensor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_embedding_layer(inputs_, vocab_size, embed_size):\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_size), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, inputs_)\n \n return embed", "def init_word_embed(config):\n embedding_mat_val = np.load(config.wordembed_params)\n ...
[ "0.74340177", "0.7306449", "0.72952855", "0.70558393", "0.7044115", "0.68895066", "0.67812735", "0.6750932", "0.67492104", "0.67476356", "0.67427427", "0.6717155", "0.6711093", "0.6680487", "0.6676144", "0.6672202", "0.6647878", "0.6621589", "0.66202843", "0.65980107", "0.658...
0.66311824
17
Retrieve the dictform of all of the transactions in a given bar or for the whole simulation.
def transactions(self, dt=None): if dt is None: # flatten the by-day transactions return [ txn for by_day in itervalues(self._processed_transactions) for txn in by_day ] return self._processed_transactions.get(dt, [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transaction_data(self):\n return list(map(lambda transaction:transaction.to_json(), self.transaction_map.values()))", "def transaction_base() -> Dict[str, Any]:\n return {\n \"first_name\": \"Donald\",\n \"last_name\": \"Duck\",\n \"company\": \"Duck Co\",\n \"email\": \...
[ "0.63048834", "0.602332", "0.5960751", "0.56639814", "0.56584823", "0.5563182", "0.5531879", "0.55191296", "0.55072176", "0.5477383", "0.54718333", "0.5458796", "0.5426807", "0.54197216", "0.541453", "0.5338547", "0.5327534", "0.53225285", "0.53161556", "0.5279584", "0.526199...
0.0
-1
Add 1 to the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
def _increment_file_counter(self): self._add_to_file_counter(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ...
[ "0.83802605", "0.6079216", "0.60073787", "0.5972287", "0.5871502", "0.58649766", "0.58356875", "0.5813897", "0.5656929", "0.5650024", "0.5628978", "0.5588129", "0.5575472", "0.55536973", "0.55466735", "0.55383646", "0.5529106", "0.5502226", "0.54975855", "0.5481056", "0.54359...
0.71174276
1
Subtract 1 from the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
def _decrement_file_counter(self): self._add_to_file_counter(-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ...
[ "0.7747842", "0.63648546", "0.6072333", "0.60351396", "0.5918244", "0.57059807", "0.56674904", "0.5664642", "0.56031275", "0.5573989", "0.5520654", "0.5453089", "0.5448838", "0.54281026", "0.5422772", "0.5378375", "0.5307265", "0.5244931", "0.52217174", "0.5220032", "0.521629...
0.6648334
1
Add the lock files listed in lock_files to the list of lock files managed by other ranks.
def _update_lock_files(self, lock_files): _, _lock_file, _other_lock_files = _temporary_files[ self._subarray._partition_file ] _other_lock_files.update(set(lock_files)) if _lock_file in _other_lock_files: # If the lock file managed by this rank is in the list of # lock files managed by other ranks, remove it from there _other_lock_files.remove(_lock_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LockFiles(self, entries):\n self._model.lock(entries)", "def add_mock_files(self, file_list):\n self._mock_file_list.extend(file_list)", "def thread_file_list(self):\n # Establish connection for this thread\n connection = self.connect()\n\n # Set working directory on serv...
[ "0.5858453", "0.5857905", "0.5442941", "0.5427439", "0.5369485", "0.534254", "0.5242692", "0.5233011", "0.52133423", "0.51889026", "0.51700175", "0.51625514", "0.5118121", "0.5110031", "0.50916535", "0.5077471", "0.5056525", "0.5047517", "0.50415224", "0.50407803", "0.5031041...
0.785469
0
If we don't define this, it will use the regular dictionary __iter__ which does not call SortedDictionary.keys().
def __iter__(self): for each in list(self.keys()): yield each
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n return self.ordered_keys.__iter__()", "def __iter__(self):\n return iter(self.keys())", "def __iter__(self):\n if self._len_keys == 1:\n yield from self._dict.keys()\n else:\n for key in self._dict.keys():\n yield tuple(sort...
[ "0.8112754", "0.7881003", "0.7754839", "0.76607627", "0.764876", "0.7564836", "0.7504256", "0.75019187", "0.74793774", "0.7474488", "0.74243957", "0.7364466", "0.7364466", "0.73356414", "0.7253679", "0.72350866", "0.7233559", "0.7106118", "0.7090561", "0.7053434", "0.69872856...
0.7525102
6
Create a CourseGraph, fetching unitary weights and edge weights from database, creating CourseNodes for each course, and
def __init__(self, database, session, max_suggestions=5, max_courses=30, cache_mult=4): self._nodes = dict() # dict with courseid keys, CourseNode vals self._max_suggestions = max_suggestions self._max_courses = max_courses self._cache_mult = cache_mult db = database # Get dict mapping courses to unitary weights unitary_dict = db.get_unitary_dict(session) # Get dict mapping courses to adjacent courses and weights edge_dict = db.get_edges_dict(session) # Create CourseNodes for courseid in unitary_dict: courseNode = CourseGraph.CourseNode(courseid=courseid, edges=dict(), popularity=unitary_dict[courseid]) self._nodes[courseid] = courseNode # Create course edge dict for each CourseNode for courseid in edge_dict: node = self._nodes[courseid] # get node of interest adj_courses = edge_dict[courseid] # get inner dict {otherid: edge_weight} for otherid in adj_courses: other_node = self._nodes[otherid] node.addEdge(other_node, adj_courses[otherid])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to...
[ "0.610946", "0.6068693", "0.5970327", "0.59139115", "0.5910554", "0.59065074", "0.5842171", "0.58402723", "0.5831642", "0.5821705", "0.58180076", "0.5813589", "0.5796841", "0.57905227", "0.5715409", "0.5708784", "0.56940794", "0.5655929", "0.5635845", "0.5629975", "0.56149757...
0.7083467
0
Process a full set of images, with parallelization if multiple CPU threads are available on this machine
def _process_images( raw_image_paths: pd.Series, raw_images_dir: str, ROI_definitions: Dict[str, Tuple], flat_field_filepath_or_none: Union[str, None], save_ROIs: bool, save_dark_frame_corrected_images: bool, save_flat_field_corrected_images: bool, ) -> Tuple[pd.DataFrame, pd.DataFrame]: def _process_image_local(raw_image_path): """ Version of process_image with all of the local configuration variables packed in. Also encapsulates the opening of the image. """ return process_image( original_rgb_image=raw.open.as_rgb(raw_image_path), original_image_filepath=raw_image_path, raw_images_dir=raw_images_dir, ROI_definitions=ROI_definitions, flat_field_filepath_or_none=flat_field_filepath_or_none, save_ROIs=save_ROIs, save_dark_frame_corrected_image=save_dark_frame_corrected_images, save_flat_field_corrected_image=save_flat_field_corrected_images, ) with ThreadPoolExecutor() as executor: # We want identical warnings to be shown only for the first image they occur on (the default), # but we also want subsequent calls to process_experiment to start with a fresh warning store # so that warnings don't stop showing after the first run. # catch_warnings gives us this fresh warning store. with warnings.catch_warnings(): # process_image returns roi_summary_data df, image_diagnostics df -> this will be a list of 2-tuples roi_summary_data_and_image_diagnostics_dfs_for_files = list( tqdm( executor.map(_process_image_local, raw_image_paths), total=len(raw_image_paths), ) ) roi_summary_data_for_files, image_diagnostics_for_files = zip( *roi_summary_data_and_image_diagnostics_dfs_for_files ) roi_summary_data_for_all_files = _stack_dataframes(roi_summary_data_for_files) image_diagnostics_for_all_files = _stack_serieses(image_diagnostics_for_files) return roi_summary_data_for_all_files, image_diagnostics_for_all_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scan_images_multiprocessed(images, clf, processes, vstep=15, hstep=15, dnum=5):\n pool = Pool(processes=processes) # start 4 worker processes\n results = []\n for i in range(0, processes):\n begin = i * int(len(images) / processes)\n if i == processes - 1:\n end = len(images)...
[ "0.7019778", "0.701473", "0.68229294", "0.6802597", "0.67542833", "0.6570712", "0.6545006", "0.65141946", "0.6469355", "0.6455748", "0.6437736", "0.6373732", "0.6337872", "0.6320698", "0.6302606", "0.6255835", "0.6229472", "0.62148833", "0.6202636", "0.61866295", "0.61764526"...
0.56445336
94
Return the serializer instance that should be used for validating and deserializing input, and for serializing output.
def get_serializer_in(self, *args, **kwargs): serializer_class = self.get_serializer_class_in() kwargs['context'] = self.get_serializer_context() return serializer_class(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSerializer():", "def get_serializer(self, *args, **kwargs):\n serializer_class = self.get_serializer_class()\n kwargs['context'] = self.get_serializer_context()\n return serializer_class(*args, **kwargs)", "def serializer_for(self, obj):\n # 1-NULL serializer\n if obj ...
[ "0.72009987", "0.7115752", "0.69366866", "0.6924428", "0.68449354", "0.6820688", "0.67195165", "0.6692233", "0.6686074", "0.65120816", "0.6473344", "0.6473284", "0.6444412", "0.6443525", "0.6418904", "0.63524395", "0.6348317", "0.63381505", "0.6311651", "0.63109744", "0.62789...
0.67095476
7
Description When is given a directory name that exist Expected Result Shows log that directory was found
def test_has_directory_log(self, check_fn_true, caplog): #setup records = caplog.records has_directory = extractor.make_has_directory(os.path.isdir) directory_path = "./data/observed" #when test1 = has_directory(directory_path) #result assert len(records) == 1 assert records[0].message == f"It was found directory {directory_path}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_doesnt_have_directory_log(self, check_fn_false, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/tests\"\n \n #when\n test2 = has_directory(directory_path)\n\n #r...
[ "0.74612963", "0.66516936", "0.6648649", "0.6637668", "0.6592905", "0.65451527", "0.6507091", "0.6476768", "0.6445445", "0.64397144", "0.641731", "0.6323137", "0.6317305", "0.6291415", "0.6287829", "0.6264551", "0.62638617", "0.62398607", "0.6232754", "0.6224753", "0.62079227...
0.7359447
1
Create a new websocket and connect its input and output to the subprocess with the specified PID.
async def websocket_handler(self, request, ws): if self.repl_mgr is None: return sanic.response.HTTPResponse(status=404) log.info('initiating websocket') await self.repl_mgr.process_websocket(ws) log.info('terminating websocket')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def launch_web_socket(vnc_port, web_socket_port, server):\n\n path = os.path.abspath(os.path.dirname(__file__))\n ws = os.path.join(path, \"../../webConsole/bin/websockify.py\")\n\n web_socket_path = os.path.abspath(ws)\n\n cmd = \"%s %s:%s %s:%s --idle-timeout=120 &\" % (web_socket_path, server, vnc_p...
[ "0.61489266", "0.6004585", "0.58952093", "0.5711386", "0.56981546", "0.5681631", "0.5521888", "0.55072397", "0.5495588", "0.54655325", "0.5456767", "0.5442702", "0.539571", "0.53936225", "0.5373037", "0.5360796", "0.53407484", "0.53166866", "0.52801496", "0.5262081", "0.52579...
0.0
-1
Creates a group for a given node list. So far this is only an AiiDA verdi command.
def create_group(name, nodes, description=None): group, created = Group.get_or_create(name=name) if created: print('Group created with PK={} and name {}'.format(group.pk, group.name)) else: print('Group with name {} and pk {} already exists. Do you want to add nodes?[y/n]'.format(group.name, group.pk)) answer = raw_input() if answer.strip().lower() == 'y': pass else: return nodes2 = [] nodes2_pks = [] for node in nodes: try: node = int(node) except ValueError: pass nodes2_pks.append(node) try: nodes2.append(load_node(node)) except:# NotExistentError: pass group.add_nodes(nodes2) print('added nodes: {} to group {} {}'.format(nodes2_pks, group.name, group.pk)) if description: group.description = description return group
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def create(self, context=None):\n values = self.obj_get_changes()\n db_nodegroup = self.dbapi.create_nodegroup(values)\n ...
[ "0.7485883", "0.72396135", "0.71599764", "0.66984046", "0.66795355", "0.65447414", "0.6374939", "0.6371427", "0.63429767", "0.63429767", "0.6336112", "0.6318668", "0.631537", "0.63136333", "0.6290033", "0.6238642", "0.6233603", "0.62333775", "0.616521", "0.61182487", "0.61024...
0.7037025
3
returns a list of node uuids for a given group as, name, pk, uuid or group object
def get_nodes_from_group(group, return_format='uuid'): from aiida.orm import Group from aiida.common.exceptions import NotExistent nodes = [] g_nodes = [] try: group_pk = int(group) except ValueError: group_pk = None group_name = group if group_pk is not None: try: str_group = Group(dbgroup=group_pk) except NotExistent: str_group = None message = ('You have to provide a valid pk for a Group ' 'or a Group name. Reference key: "group".' 'given pk= {} is not a valid group' '(or is your group name integer?)'.format(group_pk)) print(message) elif group_name is not None: try: str_group = Group.get_from_string(group_name) except NotExistent: str_group = None message = ('You have to provide a valid pk for a Group or a Group name.' 'given group name= {} is not a valid group' '(or is your group name integer?)'.format(group_name)) print(message) elif isinstance(group, Group): str_group = group else: str_group = None print('I could not handle given input, either Group, pk, or group name please.') return nodes g_nodes = str_group.nodes for node in g_nodes: if return_format == 'uuid': nodes.append(node.uuid) elif return_format == 'pk': nodes.append(node.pk) return nodes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getGroup(group: int, name=\"\") -> list:\n groups = mongo.db.groups.find({'id':group},{'_id':0})\n userID_list = []\n user_list = []\n for entry in groups:\n if entry[\"id\"] == group:\n userID_list = userID_list + entry[\"members\"]\n if len(userID_list) != 0:\n for ent...
[ "0.6142465", "0.5999166", "0.59815145", "0.58869386", "0.5741488", "0.5735394", "0.5697292", "0.5640951", "0.563847", "0.56142646", "0.55772024", "0.5554826", "0.5521682", "0.5469941", "0.5396493", "0.5393709", "0.53740793", "0.5341698", "0.5340419", "0.5333693", "0.53306866"...
0.73784983
0
Creates an interior node with the given operator (a token), and left and right operands (other nodes).
def __init__(self, opToken, leftOper, rightOper): self.operator = opToken self.leftOperand = leftOper self.rightOperand = rightOper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __create_internal_node_by_operator(operator: PatternStructure, sliding_window: timedelta, parent: Node = None):\n operator_type = operator.get_top_operator()\n if operator_type == SeqOperator:\n return SeqNode(sliding_window, parent)\n if operator_type == AndOperator:\n ...
[ "0.6768396", "0.6402284", "0.6212105", "0.6038182", "0.6029708", "0.59814626", "0.59771395", "0.5969799", "0.57881653", "0.57876146", "0.57825124", "0.57195956", "0.56935775", "0.56912225", "0.56751347", "0.559061", "0.5583933", "0.5532599", "0.54641896", "0.5460819", "0.5419...
0.66272795
1
Write a custom auth property where we grab the auth token and put it in the headers
def authenticate(self): #it's weird i have to do this here, but the code makes this not simple auth_json={'email':self.user, 'password':self.password} #send a post with no auth. prevents an infinite loop auth_response = self.post('/auth', data = json.dumps(auth_json), auth = None) _token = auth_response.json['token'] self._token = _token self._wrapped.auth = SpringAuth(_token)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, r):\n r.headers[\"x-aims-auth-token\"] = self._token\n return r", "def auth_header_value(self):\n return f\"token {self.API_TOKEN}\"", "def authorization(self):\n return {'auth-token': '{token}'.format(token=self.token)}", "def authorization(self):\n retu...
[ "0.7161087", "0.7057166", "0.6970341", "0.6970341", "0.6921795", "0.68667036", "0.67996633", "0.6784417", "0.6764053", "0.66727406", "0.66410804", "0.6630715", "0.66150844", "0.6613414", "0.6583362", "0.65666574", "0.6546947", "0.6528426", "0.6521229", "0.6509429", "0.6488572...
0.5789005
80
if the person removed is an owner of flockr, check that they were actually removed from flockr
def test_channel_leave_normal_case_owner(): clear() leaver = auth_register('leaver@gmail.com', '123abc!@#', 'first', 'last') user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', True) channel_join(leaver['token'], userchannel_id['channel_id']) channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id']) channel_leave(leaver['token'], userchannel_id['channel_id']) randChannel_details = channel_details(user['token'], userchannel_id['channel_id']) assert(randChannel_details['owner_members'] == [ { 'u_id' : user['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' } ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register...
[ "0.6447291", "0.6402777", "0.637274", "0.61781394", "0.6064277", "0.59293276", "0.58794194", "0.58277196", "0.58173835", "0.5780744", "0.5778175", "0.5695273", "0.5692215", "0.56889635", "0.56681126", "0.5662332", "0.5635315", "0.56060123", "0.5600414", "0.5581912", "0.557282...
0.5255025
65
Needs a ATOM.atom instance as argument. Returns the names of the framework atoms bound to that atom.
def get_framework_neighbours(atom, useH=True): neighbourlist = [] for atom2 in atom.partner[:5]: #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6: if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float( covalence_radius[atom2.element]) + .1: if not 'H' == atom2.element or useH: neighbourlist.append(atom2) return neighbourlist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_atom_labels(self, full=False):\n import numpy\n\n labels = self.get_attr(\"atom_labels\")\n if full:\n return labels\n return numpy.array(labels)[self._get_equivalent_atom_list()].tolist()", "def getAtomNames(self):\n return self._raw_data['ATOM_NAME']", "d...
[ "0.5786468", "0.5760434", "0.5465059", "0.54395527", "0.5285232", "0.52612984", "0.52286106", "0.5170458", "0.5095022", "0.5048037", "0.5035679", "0.5034012", "0.49822837", "0.49687123", "0.4942803", "0.49194297", "0.49104977", "0.49047342", "0.4869437", "0.48587328", "0.4852...
0.45080042
97
Returns the rotation matrix equivalent of the given quaternion. This function is used by the get_refined_rotation() function.
def get_rotation_matrix_from_quaternion(q): R = np.matrix([[q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3], 2 * (q[1] * q[2] - q[0] * q[3]), 2 * (q[1] * q[3] + q[0] * q[2])], [2 * (q[2] * q[1] + q[0] * q[3]), q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3], 2 * (q[2] * q[3] - q[0] * q[1])], [2 * (q[3] * q[1] - q[0] * q[2]), 2 * (q[3] * q[2] + q[0] * q[1]), q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]]]) return R
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def quaternion_to_rotation_matrix(quaternion):\n\n q_w, q_x, q_y, q_z = quaternion\n sqw, sqx, sqy, sqz = np.square(quaternion)\n norm = (sqx + sqy + sqz + sqw)\n rotation_matrix = np.zeros((3, 3))\n\n # division of square length if quaternion is not already normalized\n rotation_matrix[0, 0] = (...
[ "0.8077829", "0.79751414", "0.7973847", "0.797261", "0.79455817", "0.79306656", "0.79097867", "0.780534", "0.7727341", "0.77201724", "0.77022475", "0.7419111", "0.74067664", "0.7311962", "0.7208562", "0.7142397", "0.71323454", "0.71113116", "0.70910096", "0.7048041", "0.69940...
0.8253612
0
Returns a count of the number of unique metrics currently recorded for apdex, time and value metrics.
def metrics_count(self): return len(self.__stats_table)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def number_of_running_metrics(self):\n try:\n return len(self.get_classads(\"OSGRSV==\\\"metrics\\\"\"))\n except TypeError:\n self.rsv.log(\"ERROR\", \"Classad parsing failed, unable to count running metrics\")", "def metric_data_count(self):\n\n if not self.__settings...
[ "0.6411366", "0.6283042", "0.62296087", "0.6220417", "0.61111647", "0.6047972", "0.60304636", "0.5969979", "0.59513307", "0.5878805", "0.5827968", "0.5753369", "0.5703473", "0.56953144", "0.56933933", "0.56933933", "0.5690032", "0.5668702", "0.56455106", "0.5636696", "0.56353...
0.6419223
0
Run the script at given path catching exceptions. This function should only be used internally by Pyto.
def runScriptAtPath(path): sys.argv = [path] for arg in PytoClasses.Python.shared.args: sys.argv.append(str(arg)) def run() -> None: os.system = PytoClasses.Python.shared.system directory = os.path.expanduser(os.path.dirname(path)) sys.path.insert(0, directory) try: global __script__ spec = importlib.util.spec_from_file_location("__main__", path) __script__ = importlib.util.module_from_spec(spec) spec.loader.exec_module(__script__) PytoClasses.Python.shared.values = [item for item in dir(__script__) if not item.startswith("__")] except SystemExit: print("SystemExit") except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() extracts = traceback.extract_tb(sys.exc_info()[2]) count = len(extracts) lineNumber = -1 fileName = path for i, extract in enumerate(extracts): if extract[0] == fileName: lineNumber = extract[1] break count -= 1 if (type(e) == SyntaxError): # The last word in a `SyntaxError` exception is the line number lineNumber = [int(s) for s in (str(e)[:-1]).split() if s.isdigit()][-1] PytoClasses.Python.shared.errorType = exc_type.__name__ PytoClasses.Python.shared.errorReason = str(e) PytoClasses.EditorViewController.visible.showErrorAtLine(lineNumber) print(traceback.format_exc(limit=-count)) sys.path.remove(directory) PytoClasses.ReviewHelper.shared.launches = PytoClasses.ReviewHelper.shared.launches+1 PytoClasses.ReviewHelper.shared.requestReview() PytoClasses.Python.shared.isScriptRunning = False thread = threading.Thread(target=run, args=()) def loop(): while PytoClasses.Python.shared.isScriptRunning: time.sleep(1) ignoredThreads.append(thread) raise Exception("Stopped script!") def runLoop(): try: loop() except: pass thread.start() runLoop() return __script__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runScript(path=None):\n if path:\n exec(compile(open(path, \"rb\").read(), path, 'exec'))", "def do_exec(self, arg):\n self.run_file(arg['path'])", "def _run_file(file_path, globals_):\n script_name = os.path.basename(file_path)\n\n sys.path = (_PATHS.script_paths(script_name) +\n ...
[ "0.72653073", "0.6912623", "0.68354243", "0.67718136", "0.67703193", "0.65939707", "0.6413898", "0.63070154", "0.61949843", "0.61937946", "0.6164765", "0.6151639", "0.60480416", "0.60265625", "0.60191596", "0.6006073", "0.5994495", "0.59873414", "0.5959248", "0.58620226", "0....
0.7851395
0
Fast translation, rotation & scale in 2D using np.einsum in case input is not a single point
def fast_TRS_2d(input, transform_matrix, input_is_point=False): if input_is_point: return np.delete(np.dot(transform_matrix, np.insert(input, 2, 1)), 2) else: return np.delete(np.einsum('jk,ik->ij', transform_matrix, np.insert(input, 2, 1, axis=1)), 2, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transAffine2D( iScale=(1, 1), iTrans=(0, 0), iRot=0, iShear=(0, 0) ): \n iRot = iRot * np.pi / 180\n oMatScale = np.matrix( ((iScale[0],0,0),(0,iScale[1],0),(0,0,1)) )\n oMatTrans = np.matrix( ((1,0,iTrans[0]),(0,1,iTrans[1]),(0,0,1)) )\n oMatRot = np.matrix( ((np.cos(iRot),-np.sin(iRot),0),\\\n...
[ "0.65614295", "0.634234", "0.61342865", "0.61295545", "0.61271703", "0.59918904", "0.5945617", "0.5937629", "0.5931673", "0.59223235", "0.5891538", "0.58714736", "0.5866033", "0.58649784", "0.58559966", "0.5835464", "0.5825817", "0.58026475", "0.5768168", "0.575795", "0.57447...
0.5372871
64
Filter multiple iterable at once, selecting values at index i such that func(iterables[0][i], iterables[1][i], ...) is True
def sync_filter(func, *iterables): return tuple(zip(*tuple(i for i in zip(*iterables) if func(*i)))) or ((),) * len( iterables )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filtern(func: Callable, iterable: Iterable):\n return next(filter(func, iterable))", "def cfilter(func,iterable):\n result = []\n\n for i in iterable:\n\n if func(i) == True:\n result.append(i)\n\n return result", "def filter(function, iterable):\n\n if function is bool:\n ...
[ "0.7254669", "0.70515805", "0.6633785", "0.6542129", "0.65260065", "0.6445253", "0.6422799", "0.6301492", "0.628052", "0.6261402", "0.6222613", "0.62073445", "0.6094451", "0.60292643", "0.60198414", "0.5971072", "0.59694105", "0.5959575", "0.5932348", "0.5888483", "0.5888037"...
0.781177
0
This will log a user out and redirect them to log in again via the AuthN server.
def logout_redirect(request): logout(request) # Build the URL login_url = furl(login_redirect_url(request, next_url=request.build_absolute_uri())) # Check for branding if hasattr(settings, 'SCIAUTH_BRANDING'): logger.debug('SciAuth branding passed') # Encode it and pass it branding = base64.urlsafe_b64encode(json.dumps(settings.SCIAUTH_BRANDING).encode('utf-8')).decode('utf-8') login_url.query.params.add('branding', branding) # Set the URL and purge cookies response = redirect(login_url.url) response.delete_cookie('DBMI_JWT', domain=dbmi_settings.JWT_COOKIE_DOMAIN) logger.debug('Redirecting to: {}'.format(login_url.url)) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logout():\n logout_user()\n return redirect(url_for('auth.index'))", "def signout():\n session.pop('oauth2_state', None)\n session.pop('oauth2_token', None)\n session.pop('discord_user', None)\n return redirect('/')", "def logout_user(request):\r\n # We do not log here, because we have...
[ "0.74786663", "0.7398715", "0.7309837", "0.7262442", "0.7236717", "0.72231877", "0.72231495", "0.7213908", "0.72030735", "0.7196016", "0.71688986", "0.71640676", "0.7163589", "0.7163589", "0.7159972", "0.71333206", "0.71177167", "0.7101278", "0.709512", "0.7080758", "0.706229...
0.0
-1
Given the positions of a list of the indices, create a unique key to register the position.
def placementKey( geo): def diagcmp( xyA, xyB): """ Compare two positions based on x + y. If x + y is the same for the two, compare based on x. """ return cmp(xyA[0] + xyA[1], xyB[0] + xyB[1]) or cmp(xyA[0], xyB[0]) sorted = [ tuple(geo[i]) for i in xrange(geo.shape[0]) ] sorted.sort( diagcmp) return hash(tuple(sorted))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_new_key(idx, key, d):\n\n new_key = \"%s_%d\" % (key, idx)\n if new_key in d:\n return make_new_key(idx + 1, key, d)\n return new_key", "def _make_key(self):\n all_position_values = (chromosome_sort_key(self.chromosome), self.min_position, self.max_position, \n ...
[ "0.6364943", "0.59690577", "0.58550936", "0.58094066", "0.5801118", "0.5798134", "0.5770239", "0.57411945", "0.57236964", "0.5721956", "0.56699497", "0.56316316", "0.5616401", "0.5541196", "0.55281365", "0.5510795", "0.5509949", "0.54963547", "0.5487912", "0.5483186", "0.5469...
0.53552693
35
Updates the environment according to action and returns a `TimeStep`. See `step(self, action)` docstring for more details.
def _step(self, action: types.NestedArray) -> ts.TimeStep:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step(self, action: types.NestedArray) -> ts.TimeStep:\n if self._current_time_step is None or self.should_reset(\n self._current_time_step\n ):\n return self.reset()\n\n self._current_time_step = self._step(action)\n return self._current_time_step", "def step(\n self,\n action: ...
[ "0.78822833", "0.7701546", "0.74653417", "0.735205", "0.73112786", "0.7296633", "0.72683334", "0.72642654", "0.72118324", "0.71811354", "0.7162281", "0.71551687", "0.71234053", "0.7118485", "0.70683175", "0.6997027", "0.6996414", "0.699088", "0.6952764", "0.6945095", "0.69395...
0.70150334
15
Starts a new sequence, returns the first `TimeStep` of this sequence. See `reset(self)` docstring for more details
def _reset(self) -> ts.TimeStep:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self) -> ts.TimeStep:\n self._current_time_step = self._reset()\n return self._current_time_step", "def start(self):\n\t\tself._start = time.clock()\n\t\tif self._initial is None:\n\t\t\tself._initial = self._start\n\t\treturn self", "def get_first_step(self):\n return self.get_step_by_i...
[ "0.6659816", "0.64686", "0.61162376", "0.6072993", "0.60596496", "0.58604884", "0.56744826", "0.5647901", "0.5628664", "0.5623361", "0.56153256", "0.55805075", "0.5575825", "0.5544547", "0.54956526", "0.5452446", "0.5385656", "0.53372717", "0.53334737", "0.5326875", "0.532644...
0.5530532
14
r"""Calculate the cold plasma dispersion surfaces according to equation 2.64 in Plasma Waves by Swanson (2nd ed.)
def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e): # Make vectors of the wave numbers kc_z = np.linspace(1e-6, kc_z_max, 35) kc_x = np.linspace(1e-6, kc_x_max, 35) # Turn those vectors into matrices kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z) # Find some of the numbers that appear later in the calculations kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B wc_i = 1 / m_i # The ion gyro frequency wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # For every k_perp and k_par, turn the dispersion relation into a # polynomial equation and solve it. # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # The polynomial coefficients are calculated pol_koeff_8 = -2 * kc_ ** 2 pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape) pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2) pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2 pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2) pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2 pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * ( 1 + np.cos(theta_) ** 2) pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2 pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos( theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i)) pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * ( 1 + np.cos(theta_) ** 2) pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2 w_final = np.zeros((10, len(kc_z), len(kc_x))) # For each k, solve the equation for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))): disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0, pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x], 0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]] # theoretically should be real (A. Tjulin) w_temp = np.real(np.roots(disp_polynomial)) # We need to sort the answers to get nice surfaces. w_final[:, k_z, k_x] = np.sort(w_temp) n2_ = kc_ ** 2 / w_final ** 2 v_ph_c = np.sqrt(1. / n2_) va_c = 1 / (wp_e * np.sqrt(m_i)) v_ph_va = v_ph_c / va_c diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i) e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor) e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_ b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat, w_final, e_x, e_y, e_z) dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]] dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)] dw_x[:, :, 1:] = np.diff(w_final, axis=2) dw_z[:, 1:, :] = np.diff(w_final, axis=1) v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])] s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z) # Compute ion and electron velocities v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final, e_x, e_y, e_z) # Ratio of parallel and perpendicular to B speed vepar_perp = v_ez * np.conj(v_ez) vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey)) vipar_perp = v_iz * np.conj(v_iz) vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy)) # Total particle speeds v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez) v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz) # Ion and electron energies m_e = -1 en_e = 0.5 * m_e * v_e2 en_i = 0.5 * m_i * v_i2 # Ratio of particle and field energy densities ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot) # Continuity equation dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final, v_ex, v_ez, v_ix, v_iz) dn_e_n_db_b = dn_e_n / b_tot dn_i_n_db_b = dn_i_n / b_tot dn_e_n_dbpar_b = dn_e_n / b_par dn_i_n_dbpar_b = dn_i_n / b_par dn_e = dn_e_n * wp_e ** 2 k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e)) # Build output dict extra_param = {"Degree of electromagnetism": np.log10(b_tot / e_tot), "Degree of longitudinality": np.abs(e_par) / e_tot, "Degree of parallelity E": e_z / e_tot, "Degree of parallelity B": np.sqrt( b_z * np.conj(b_z)) / b_tot, "Ellipticity E": e_pol, "Ellipticity B": b_pol, "E_part/E_field": np.log10(ratio_part_field), "v_g": np.sqrt(v_x ** 2 + v_z ** 2), "v_ph/v_a": np.log10(v_ph_va), "E_e/E_i": np.log10(en_e / en_i), "v_e/v_i": np.log10(np.sqrt(v_e2 / v_i2)), "v_epara/v_eperp": np.log10(vepar_perp), "v_ipara/v_iperp": np.log10(vipar_perp), "dn_e/dn_i": np.log10(dne_dni), "(dn_e/n)/ (dB/B)": np.log10(dn_e_n_db_b), "(dn_i/n)/(dB/B)": np.log10(dn_i_n_db_b), "(dn_i/n)/(dBpar/B)": np.log10(dn_i_n_dbpar_b), "(dn_e/n)/(dB/B)": np.log10(dn_e / k_dot_e), "(dn_e/n)/(dBpar /B)": np.log10(dn_e_n_dbpar_b), " Spar/Stot": s_par / s_tot} for k, v in zip(extra_param.keys(), extra_param.values()): extra_param[k] = np.transpose(np.real(v), [0, 2, 1]) kx_ = np.transpose(kc_x_mat) kz_ = np.transpose(kc_z_mat) wf_ = np.transpose(w_final, [0, 2, 1]) return kx_, kz_, wf_, extra_param
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_mixing_coefficients_surf(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n\n # SET UP NEW MIXING COEFFICIENT ARRAYS\n self.Kv_surf = np.zeros([Ly,N+1])\n self.Kt_surf = np.zeros([Ly,N+1])\n \n self.ghat = np.zeros([Ly,N+1])\n \n\n ...
[ "0.6823951", "0.68156433", "0.64645", "0.62532675", "0.5977594", "0.5888927", "0.5858084", "0.5850966", "0.5778458", "0.5767043", "0.5753279", "0.5737354", "0.5723255", "0.5714657", "0.57088953", "0.5705945", "0.56355387", "0.56164163", "0.561608", "0.56118447", "0.5599761", ...
0.7354267
0
Test of cooking the same product twice. Test passed if second cooking of same product raise ValueError
def test_cook_twice(cook_not_busy, product_for_cook): cook_not_busy.cook_dish(product_for_cook) with pytest.raises(ValueError): cook_not_busy.cook_dish(product_for_cook)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checker(self, product):\n for item in self.instock:\n if item == product:\n return True\n return False", "def test_buyTicket_AlreadySold():\n assert not testUser2.buyTicket(testTicket1)\n assert testTicket1 in testUser1.inventory\n assert testTicket1 not in te...
[ "0.6690166", "0.63332933", "0.62514263", "0.61649024", "0.6153124", "0.605767", "0.6029322", "0.60229874", "0.6018796", "0.6007936", "0.5988192", "0.5973974", "0.5963615", "0.5908742", "0.58811826", "0.58582234", "0.585461", "0.5827044", "0.5807381", "0.58039653", "0.579343",...
0.7491278
0
Signs a transaction (in format of build_tx) with the given node, and returns the decoderawtransactiontype result again.
def sign (self, node, tx): signed = node.signrawtransactionwithwallet (tx["hex"]) res = node.decoderawtransaction (signed["hex"]) res.update (signed) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sign_transaction():\n data = request.get_json()\n\n try:\n tx = Transaction.from_dict(data)\n except TypeError:\n response = dict(message='Improper transaction json provided.')\n status_code = 400\n return jsonify(response), status_code\n\n signature = tx.sign(node.walle...
[ "0.6919308", "0.6778266", "0.6380237", "0.627283", "0.61029476", "0.6083491", "0.5987487", "0.58167666", "0.57365465", "0.5712428", "0.56981504", "0.56660604", "0.56095326", "0.55846405", "0.55624753", "0.5552044", "0.54918426", "0.5468147", "0.54531074", "0.54221904", "0.536...
0.77226585
0
Return a list of the ids of outer divs with the specified text in a child element.
def ids_of_outer_divs_with_inner_text(self, child_text): return self.q(css='div.outer').filter( lambda el: child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')] ).attrs('id')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ids(self):\n page = r.get(self.url)\n tree = html.fromstring(page.content)\n ids_elements = tree.xpath(\"//div[@id='selectedcontent']/div/ul/li/a\")\n return [self._e_to_id(e) for e in ids_elements]", "def get_child_ids(id,conn):\n\n child_ids = ('WITH RECURSIVE children AS '\n ...
[ "0.55548036", "0.5479126", "0.54318756", "0.54250884", "0.53954", "0.5378344", "0.5358235", "0.532862", "0.53090286", "0.52930194", "0.5247154", "0.5223819", "0.5181133", "0.51650614", "0.5156875", "0.51566947", "0.512827", "0.5109152", "0.5081774", "0.5059653", "0.5021192", ...
0.877173
0
Wait for scripts to finish and then return the contents of the ``output`` div on the page.
def output(self): return super(RequireJSPage, self).output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait(self):\n\t\tself.wait_window(self)\n\t\treturn self.result", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def trigger_output(self):\n\n EmptyPromise(self.q(css='div#ready').is_present, \"Click ready\").fulfill()\n ...
[ "0.58988714", "0.5896934", "0.5896934", "0.5896934", "0.5896934", "0.5818085", "0.5690976", "0.56339425", "0.5543673", "0.5390721", "0.5287882", "0.52217585", "0.5208909", "0.51837784", "0.5157287", "0.5146636", "0.51380193", "0.51380193", "0.51380193", "0.51380193", "0.51246...
0.0
-1
x Position x on the map y Position y on the map theta Direction on the map
def scan(self, x, y, theta): # create ray list max_theta = theta + self.fov/2.0 min_theta = theta - self.fov/2.0 thetas = np.arange(min_theta, max_theta, self.theta_inc, dtype=np.float32) self.input_vector[:, 0] = x self.input_vector[:, 1] = y self.input_vector[:, 2] = thetas # run ray marching self.scan_method.calc_range_many(self.input_vector, self.output_vector) return self.output_vector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def theta(self):\n return atan2(self.y, self.x)", "def theta(self):\n return float(np.arctan2(self.y, self.x))", "def xy(self,theta,phi):\n dist=great_circle_distance(self.theta0,theta,self.phi0,phi)\n [yt,xt]=np.unravel_index(np.argmin(dist),dist.shape)\n return xt,yt", "d...
[ "0.691251", "0.683143", "0.6619195", "0.6595212", "0.63898385", "0.628581", "0.6238489", "0.61594254", "0.61381114", "0.61379653", "0.61337817", "0.6127042", "0.6115421", "0.6079736", "0.60695887", "0.6060793", "0.6034483", "0.60037607", "0.60031086", "0.59396416", "0.5934096...
0.0
-1
Saves ciphers or keys or any text to the given file path; more efficient than manual saving.
def save(string, file): save_file = open(file, 'w') save_file.write(string) save_file.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_file(path, text):\n with path.open(mode='w') as f_stream:\n f_stream.write(text)", "def store_file(text: str, file_path: str) -> None:\n with open(file=file_path, mode='w', encoding='utf8') as f:\n f.write(text)", "def save_text_file(text, path):\n os.makedirs(os.path.dirname(pa...
[ "0.68838364", "0.6661782", "0.65330315", "0.6515433", "0.6431692", "0.64074653", "0.6158728", "0.61471856", "0.6121656", "0.6058847", "0.6049562", "0.60482824", "0.60414743", "0.6010596", "0.598768", "0.59727114", "0.59727114", "0.59727114", "0.59499484", "0.5907642", "0.5892...
0.55175346
68
Prompts for confirmation (yes/no question). If the user aborts the input by sending a interrupt signal this
def confirm( text: str, default: bool = False, abort: bool = False, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, ): prompt = _build_prompt(text, prompt_suffix, show_default, "Y/n" if default else "y/N") while True: try: value = _prompt(prompt, err=err, hide_input=False).lower().strip() except (KeyboardInterrupt, EOFError): raise click.Abort() if value in ('y', "yes"): rv = True elif value in ('n', "no"): rv = False elif value == '': rv = default else: click.echo("Error: invalid input", err=err) continue break if abort and not rv: raise click.Abort() return rv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confirmation(self, question, answer):\n confirm_flag = False\n while confirm_flag not in ['y', 'n']:\n confirm_flag = raw_input(question + ' [y/n]: ')\n if confirm_flag == 'y':\n print answer\n elif confirm_flag == 'n':\n print 'The u...
[ "0.7569344", "0.7401378", "0.72859263", "0.7283516", "0.7266474", "0.719268", "0.71785426", "0.7086956", "0.70464075", "0.7021681", "0.6976385", "0.69672275", "0.69638824", "0.6946273", "0.6888155", "0.6809539", "0.6780994", "0.67785114", "0.66649485", "0.6664532", "0.6661782...
0.62040895
47
Prompts a user for input. If the user aborts the input by sending an interrupt signal, this
def choice( options: Union[List[str], Mapping[str, str]], text: str = '', default: Optional[str] = None, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, start_index: int = 0 ) -> Union[str, int]: # TODO: completer for numbers? type_: click.ParamType if isinstance(options, Mapping): # (Y/I/N/O/D/Z) [default=N] text = f"{text} ({'/'.join(options.keys())})" type_ = click.STRING for choice, descripton in options.items(): click.echo(f" {choice} : {descripton}") else: type_ = click.IntRange(start_index, len(options) + 1 - start_index) for idx, descripton in enumerate(options): idx += start_index click.echo(f" [{idx}] {descripton}") if default is not None and show_default: text += f" [default={default}]" while True: selection = prompt( text=text, default=default, type=type_, prompt_suffix=prompt_suffix, show_default=False, err=err, ) if isinstance(options, Mapping): selection = selection.strip().upper() if selection not in options: click.echo(f"Please enter a valid option.") else: return selection else: return selection - start_index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pause(question='PRESS ENTER TO CONTINUE ...'):\n try: input(question)\n except KeyboardInterrupt:\n global shutDown\n shutDown = True\n except: pass", "def wait_for_user_input():\n\n input(\"Pulse ENTER para continuar...\")", "def cont():\n\n try:\n input = raw_input()\n...
[ "0.7103245", "0.6709058", "0.6595114", "0.6572123", "0.65524346", "0.6543129", "0.65140307", "0.6374105", "0.63740534", "0.631756", "0.6300449", "0.628559", "0.6279168", "0.6268421", "0.62003326", "0.61359113", "0.6018758", "0.6006392", "0.5985524", "0.5977991", "0.5903809", ...
0.0
-1
Calculates the next state of a given 'board' following the classic rules of Conway's Game Of Life
def original(arr): height = np.shape(arr)[0] width = np.shape(arr)[1] result = np.array(arr) for row in range(height): for col in range(width): neighbors = 0 val = result[row][col] for i in range(-1, 2): for j in range(-1, 2): if i == 0 and j == 0: # The cell itself cannot be counted as a neighbor continue if row + i < 0 or col + j < 0 or row + i > height or col + j > width: # Out of bounds continue with suppress(IndexError): if arr[row + i][col + j] == 1: neighbors += 1 if neighbors == 3 and val == 0: # Cell becomes alive result[row][col] = 1 elif neighbors > 3 and val == 1 or neighbors < 2 and val == 1: # Cell dies result[row][col] = 0 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_next_board_state(self):\n new_board_state = np.zeros_like(self.board_state)\n\n for x in range(self.board_size[0]):\n for y in range(self.board_size[0]):\n new_board_state[x][y] = self.next_state_of_cell(x,y)\n \n self.set_state(new_board_state)",...
[ "0.8105419", "0.74080914", "0.7388347", "0.73284864", "0.7323322", "0.72788733", "0.7252915", "0.7215804", "0.72030765", "0.72030765", "0.7184667", "0.70937914", "0.70153326", "0.7014133", "0.7008801", "0.7003514", "0.7002293", "0.69419354", "0.693517", "0.69348997", "0.69085...
0.0
-1
Solve tile in column zero on specified row (> 1) Updates puzzle and returns a move string
def solve_col0_tile(self, target_row): move_str = 'ur' self.update_puzzle(move_str) cur_row, cur_col = self.current_position(target_row, 0) if cur_row == target_row and cur_col == 0: move_str += 'r' * (self._width - 2) else: move_str += self.position_tile(target_row-1, 1, cur_row, cur_col) move_str += 'ruldrdlurdluurddlur' move_str += 'r' * (self._width - 2) self.update_puzzle(move_str[2:]) return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n...
[ "0.80090946", "0.8002715", "0.79543203", "0.7940283", "0.7870043", "0.7847743", "0.7818097", "0.77486104", "0.774018", "0.7731879", "0.77268267", "0.7707487", "0.77027786", "0.76879483", "0.7634697", "0.76258874", "0.7590558", "0.7560754", "0.7522215", "0.75079787", "0.750233...
0.777891
7
Solve the tile in row zero at the specified column Updates puzzle and returns a move string
def solve_row0_tile(self, target_col): move_str = 'ld' self.update_puzzle(move_str) cur_row, cur_col = self.current_position(0, target_col) if cur_row == 0 and cur_col == target_col: return move_str else: move_str += self.position_tile(1, target_col-1, cur_row, cur_col) move_str += 'urdlurrdluldrruld' self.update_puzzle(move_str[2:]) return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_col0_tile(self, target_row):\r\n moves_str = \"\"\r\n # move the zero tile from (i,0) to (i−1,1) \r\n # using the move string \"ur\"\r\n moves_str += \"ur\"\r\n temp_grid = Puzzle(self._height, self._width, self._grid)\r\n temp_grid.update_puzzle(moves_str)\r\n ...
[ "0.7689822", "0.76361793", "0.7602665", "0.759841", "0.7547982", "0.75370145", "0.75116783", "0.7487752", "0.7478812", "0.74497175", "0.7435299", "0.7407118", "0.7397174", "0.73776317", "0.73732615", "0.7324699", "0.72756827", "0.72559106", "0.7206573", "0.71551996", "0.71403...
0.745573
9
Solve the tile in row one at the specified column Updates puzzle and returns a move string
def solve_row1_tile(self, target_col): cur_row, cur_col = self.current_position(1, target_col) move_str = self.position_tile(1, target_col, cur_row, cur_col, need_ld=False) self.update_puzzle(move_str) return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_row1_tile(self, target_col):\r\n moves_str = \"\"\r\n current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += ...
[ "0.7611342", "0.7530595", "0.7489633", "0.74769557", "0.7346029", "0.7312759", "0.72872084", "0.72763294", "0.72613305", "0.72458375", "0.72417307", "0.7236731", "0.72223794", "0.7210395", "0.71957016", "0.71925163", "0.7142041", "0.71206504", "0.70840454", "0.7064091", "0.70...
0.74047565
4
Solve the upper left 2x2 part of the puzzle Updates the puzzle and returns a move string
def solve_2x2(self): cur_row, cur_col = self.current_position(0, 0) move_str = 'u' * cur_row + 'l' * cur_col self.update_puzzle(move_str) if self.check_2x2_solved(): return move_str else: while not self.check_2x2_solved(): move_str += 'rdlu' self.update_puzzle('rdlu') return move_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n...
[ "0.86358064", "0.84391785", "0.820359", "0.81158245", "0.8079695", "0.7876614", "0.7855013", "0.77959996", "0.7629816", "0.75706357", "0.7446708", "0.71919894", "0.70876247", "0.6987711", "0.6876523", "0.686447", "0.686447", "0.686447", "0.6857152", "0.6857152", "0.6825898", ...
0.8153259
3
This function will return data about the number of triangles on each vertex in a file inputs
def get_triangles_per_vertex(my_core, native_ranges): t_p_v_data = [] tri_dimension = 2 for vertex in native_ranges[types.MBVERTEX]: t_p_v_data.append(my_core.get_adjacencies(vertex, tri_dimension).size()) return np.array(t_p_v_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_triangles(self, file):\n self.nibble(80)\n return struct.unpack(\"@i\", self.nibble(4))[0]", "def get_num_vertices(triangles):\n return numpy.amax(numpy.reshape(triangles, -1)) + 1", "def count_cells(fpath):\n cells = []\n for i in range(40):\n fname = f\"{fpath}/Mesh2d_...
[ "0.7772032", "0.68836695", "0.6398507", "0.6369426", "0.6362666", "0.6353974", "0.6323438", "0.6275137", "0.6170703", "0.6165812", "0.6144632", "0.613628", "0.61066425", "0.6011404", "0.59843576", "0.59520715", "0.5947087", "0.594126", "0.5935394", "0.59270984", "0.59270984",...
0.64086694
2
This function will return data about the number of triangles on each surface in a file inputs
def get_triangles_per_surface(my_core, entity_ranges): t_p_s = {} for surface in entity_ranges['Surfaces']: t_p_s[surface] = my_core.get_entities_by_type( surface, types.MBTRI).size() return t_p_s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_triangles(self, file):\n self.nibble(80)\n return struct.unpack(\"@i\", self.nibble(4))[0]", "def get_num_vertices(triangles):\n return numpy.amax(numpy.reshape(triangles, -1)) + 1", "def count_cells(fpath):\n cells = []\n for i in range(40):\n fname = f\"{fpath}/Mesh2d_...
[ "0.78216827", "0.65307426", "0.64875203", "0.6224915", "0.6100354", "0.60187995", "0.59512895", "0.5924314", "0.59203315", "0.5906817", "0.59059787", "0.5882943", "0.5867339", "0.585497", "0.58549577", "0.5812365", "0.57997227", "0.57795846", "0.57746047", "0.57551175", "0.57...
0.6084134
5
Open a PCAP, seek to a packet offset, then get all packets belonging to the same connection
def packets_for_stream(fobj, offset): pcap = dpkt.pcap.Reader(fobj) pcapiter = iter(pcap) ts, raw = pcapiter.next() fobj.seek(offset) for p in next_connection_packets(pcapiter, linktype=pcap.datalink()): yield p
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_packets(pcap):\n\n # For each packet in the pcap process the contents\n for timestamp, buf, hdr_len in pcap:\n \n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), ...
[ "0.5710354", "0.5529603", "0.5436237", "0.5389503", "0.5370659", "0.5341635", "0.5319951", "0.5312948", "0.5296589", "0.52821374", "0.5232271", "0.5231817", "0.5208476", "0.52023923", "0.51755023", "0.5116855", "0.5112408", "0.5110333", "0.5092085", "0.5089987", "0.5086152", ...
0.69073343
0
gathers selected cards in order to take action on selected cards (either discarding them or preparing them)
def gatherSelected(self): self.selected_list = [] for element in self.hand_info: if element.status == 1: self.selected_list.append(element) return self.selected_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pick(self, pack, cards_owned, draft_info):\n pass", "def card_sel(\n self, num=1, **kwargs\n ): # pylint: disable=too-many-locals, too-many-branches\n selectfrom = self.card_selSource(**kwargs)\n force = kwargs[\"force\"] if \"force\" in kwargs else False\n showdesc = k...
[ "0.6779038", "0.64504594", "0.6393454", "0.6248788", "0.6065166", "0.60555077", "0.60298246", "0.6014514", "0.6014514", "0.5978343", "0.5904059", "0.5888115", "0.5883593", "0.5855397", "0.58428776", "0.584109", "0.5830821", "0.582095", "0.5810005", "0.5807255", "0.5788884", ...
0.5896568
11
Flattened array of ints, specifying the index of this object. This has to account for shaped parameters!
def _raveled_index(self): return np.r_[:self.size]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _idxs_postformat_array(self):\n self.idxs = np.array(self.idxs)", "def flatten_idx(idx, axis=-1):\n idx = numpy.asanyarray(idx)\n if not idx.dtype.kind in ('i', 'u'):\n idx = idx.astype(int)\n preshape = idx.shape[:axis]\n postshape = idx.shape[axis:]\n stride = int(numpy.product...
[ "0.65175533", "0.62058824", "0.5974461", "0.59025675", "0.58936995", "0.58421", "0.58367753", "0.57612103", "0.5670626", "0.5655063", "0.5648909", "0.56180966", "0.5589195", "0.55857176", "0.55857176", "0.5580362", "0.55702096", "0.5560747", "0.544082", "0.5435741", "0.541882...
0.53458685
27
Validate the response that came back from the API, return True if it's good, False if bad
def _validate_response(self, response): # Check for unexpected response - all should be JSON dicts that have # already been deserialised if not isinstance(response, types.DictionaryType): self.message( "\t\t[!] ERROR - Unexpected value returned from the API: '%s'" % (response)) return False # Check for valid errors if "error" in response and "msg" in response: self.message( "\t\t[!] ERROR - %s (%s)" % (response["msg"], response["timestamp"])) return False # Is this a valid response message if "msg" in response: return True # Catch all...dictionary returned but does not contain expected keys? # Who know's what's going on here?! else: self.message( "\t\t[!] ERROR - Unexpected dictionary response returned from the API: '%s'" % (response)) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(self, response):\n return response[\"status_code\"] == 1", "def is_valid_response(self, response):\r\n if response.status_code in VALID_CODES:\r\n return True\r\n return False", "def validate_response(self, response):\n pass", "def validate_response(respons...
[ "0.8024064", "0.78625363", "0.77986264", "0.7515456", "0.74777824", "0.74465626", "0.73384", "0.7265737", "0.7262182", "0.72441804", "0.7198832", "0.712405", "0.7028976", "0.701251", "0.69981617", "0.69959635", "0.6970569", "0.6948191", "0.6897628", "0.68025744", "0.68025744"...
0.76538676
3
estimate an MxF user factor matrix and an FxN item factor matrix from the MxN rating matrix
def factor_mat(all_dat, f_num, iterations, regularization): # get # of users and # of items [u_num, i_num] = all_dat.shape # init user factors and item factors with random values u_fac = np.matrix(np.random.rand(u_num, f_num)) # MxF i_fac = np.matrix(np.random.rand(i_num, f_num)) # NxF # calculate the preference matrix preference = cal_preference(all_dat) # calculate the confidence matrix confidence = cal_confidence(all_dat) # recalculate the user factors and item factors using the alternating least square method for itr in range(iterations): u_fac = alternate_ls(u_num, i_fac, preference, confidence, regularization) #print itr, "u_fac" i_fac = alternate_ls(i_num, u_fac, preference.T, confidence.T, regularization) #print itr, "i_fac" # save the output df = pd.DataFrame(u_fac) df.to_csv("tmp/u_fac.tmp", index=False, header=False, sep='\t', encoding='utf-8') df = pd.DataFrame(i_fac.T) df.to_csv("tmp/i_fac.tmp", index=False, header=False, sep='\t', encoding='utf-8') # an MxF user factor matrix and an FxN item factor matrix return [u_fac, i_fac.T]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_matrix(self):\n\n self.matrix = np.zeros((len(self.users), len(self.items)))\n\n for user in self.train_set['users']:\n for item in self.train_set['feedback'][user]:\n self.matrix[self.user_to_user_id[user]][self.item_to_item_id[item]] = \\\n se...
[ "0.6282623", "0.62621325", "0.60587424", "0.6045789", "0.6040702", "0.6002245", "0.5951851", "0.59179777", "0.59059614", "0.58943605", "0.589419", "0.587945", "0.5858747", "0.58264637", "0.5798391", "0.57919794", "0.574544", "0.5737683", "0.5693354", "0.5668427", "0.56560904"...
0.72323316
0
Return list of choices for combo box, each a tuple of edit text and any annotation text
def getEditChoices(self, currentText=''): return [(text, '') for text in self.formatList]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def getEditChoices(self,...
[ "0.64435107", "0.64435107", "0.6406375", "0.6369448", "0.6282898", "0.61602914", "0.61550856", "0.6096663", "0.6079173", "0.6074226", "0.599768", "0.5979722", "0.5946701", "0.59085536", "0.58665997", "0.5852769", "0.5851758", "0.5840527", "0.58387506", "0.5816007", "0.5803215...
0.6981332
0
Return list of choices for combo box, each a tuple of edit text and any annotation text
def getEditChoices(self, currentText=''): currentChoices, valid = self.sortedChoices(currentText) nonChoices = [text for text in self.formatList if text not in currentChoices] results = [] for choice in nonChoices: # menu entries to add a choice allChoices = currentChoices + [choice] allChoices = [text for text in self.formatList if text in allChoices] results.append((self.editSep.join(allChoices), '(%s %s)' % (_('add'), choice))) if currentChoices: results.append((None, None)) # separator for choice in currentChoices: # menu entries to remove a choice allChoices = currentChoices[:] allChoices.remove(choice) allChoices = [text for text in self.formatList if text in allChoices] results.append((self.editSep.join(allChoices), '(%s %s)' % (_('remove'), choice))) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n ...
[ "0.6981332", "0.64435107", "0.64435107", "0.6406375", "0.6282898", "0.61602914", "0.61550856", "0.6096663", "0.6079173", "0.6074226", "0.599768", "0.5979722", "0.5946701", "0.59085536", "0.58665997", "0.5852769", "0.5851758", "0.5840527", "0.58387506", "0.5816007", "0.5803215...
0.6369448
4
Return list of choices for combo box, each a tuple of edit text and any annotation text
def getEditChoices(self, currentText=''): format = globalref.options.strData('EditDateFormat', True) today = GenDate().dateStr(format) yesterday = (GenDate() - 1).dateStr(format) tomorrow = (GenDate() + 1).dateStr(format) return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _('yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n ...
[ "0.6981832", "0.64423245", "0.64423245", "0.6404376", "0.63695693", "0.62808853", "0.615837", "0.61527145", "0.60969144", "0.60806596", "0.6074747", "0.59950155", "0.59769976", "0.59458613", "0.59082", "0.5853147", "0.5850785", "0.58412063", "0.58367765", "0.58151263", "0.580...
0.5867416
15
Note this initialization command will start spawning traffic and select the specified human demonstrators for imitation learning
def initialize_element(self): init_command = { "StartLearning": True, "AgentID": 1854 } msg = json.dumps(init_command).encode('unicode_escape') self.socket_control.send(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def starting_tests(self):\n# disable menus during testing, because their message loop seems to interfere\n# with the natlink message loop which waits for recognitionMimic to\n# finish\n self.testing = 1", "def initMana():\n run(\"chariot-me -i\")", "def starting_tests(self):\n# disable menus during tes...
[ "0.63875884", "0.61016667", "0.60508204", "0.60147184", "0.5999624", "0.5965473", "0.59024185", "0.587887", "0.58662325", "0.5809647", "0.58030933", "0.5800339", "0.5754827", "0.574139", "0.5726314", "0.5693041", "0.56924486", "0.5661824", "0.5648796", "0.5642869", "0.5640304...
0.5699686
15
Ensure that a folder exists and create it if it doesn't, including any parent folders, as necessary.
def create_folder(target_folder): try: os.makedirs(target_folder) except OSError as e: pass return os.path.exists(target_folder)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ensure_folder(*arg):\n if len(arg) == 0:\n raise Exception(\"No input to ensure_folder\")\n path = get_dir(Path(*arg))\n path.mkdir(parents=True, exist_ok=True)", "def create_folder_if_needed(path):\n if os.path.exists(path):\n print(\"{} dir exists\".format(path))\n else:\n ...
[ "0.82459253", "0.8094383", "0.8059282", "0.788112", "0.7779735", "0.7752992", "0.77248335", "0.77168036", "0.77027094", "0.76686084", "0.7639529", "0.76316696", "0.76276046", "0.7627525", "0.7594649", "0.7567482", "0.7552197", "0.75503033", "0.7529557", "0.7526078", "0.75254"...
0.7339701
36
Sends a POST request every second to the monitoring server indicating that the process is still running.
def post_heartbeat(host, name, auth=None): data = {'name': name, 'status': 'ok'} try: response = post('{host}/monitoring/heartbeat'.format(host=host), data, auth) except urllib2.URLError: print("Failed to send heartbeat.", file=sys.stderr) else: if response.strip() != 'ok': print('POST got response {response}'.format(response=response), file=sys.stderr) timer = threading.Timer(1.0, post_heartbeat, args=(host, name, auth)) # set the thread as a daemon to exit the program cleanly # when the main thread finishes timer.daemon = True timer.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def periodic_timer(self):\n while self.running:\n self.sendStatusQuery()\n time.sleep(REPORT_INTERVAL)", "def keep_alive():\r\n app = Flask(\"\")\r\n @app.route(\"/\")\r\n def home():\r\n return \"Your bot is now alive!\"\r\n\r\n def run():\r\n app.run(host=\"0....
[ "0.6335293", "0.6263304", "0.6176471", "0.61680967", "0.6098956", "0.6098956", "0.6098956", "0.6034498", "0.5934776", "0.5786157", "0.5784672", "0.5767236", "0.57396203", "0.57379746", "0.5700311", "0.5679326", "0.5662139", "0.56477326", "0.5647195", "0.56367123", "0.5632368"...
0.57290554
14
Gets the contact_list of this MessagingCampaign. The contact list that this messaging campaign will send messages for.
def contact_list(self): return self._contact_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_contacts(self):\n return self.contacts", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def GetContactList(self):\n\t\tfeeds = []\n\t\tfeed = self.client.GetContacts()\n\t\tfeeds.append(feed)\n\t\tnext = feed.GetNextLink()\n\t\twhile next:\n\t\t\tfeed = self.client.GetContacts(uri=ne...
[ "0.72631705", "0.71663344", "0.7121245", "0.6652589", "0.65349084", "0.64175344", "0.63972867", "0.6386485", "0.63770306", "0.63157517", "0.61904186", "0.61765337", "0.6132532", "0.6121634", "0.6121634", "0.602559", "0.6003591", "0.59747976", "0.5964907", "0.5926964", "0.5926...
0.8081953
0
Sets the contact_list of this MessagingCampaign. The contact list that this messaging campaign will send messages for.
def contact_list(self, contact_list): self._contact_list = contact_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receiveContactList(self, contactList):", "def set_contacts(self, contacts):\n\n\t\tif contacts is not None and not isinstance(contacts, list):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: contacts EXPECTED TYPE: list', None, None)\n\t\t\n\t\tself.__contacts = contacts\n\t\tself.__key_modified['...
[ "0.6209503", "0.6202445", "0.588594", "0.588594", "0.5883504", "0.5845821", "0.5795876", "0.5775629", "0.55796677", "0.5562263", "0.5562263", "0.5559013", "0.5479643", "0.5460591", "0.53779536", "0.5320598", "0.53073066", "0.5305575", "0.52854943", "0.5274175", "0.5192591", ...
0.83265656
0
Gets the errors of this MessagingCampaign. A list of current error conditions associated with this messaging campaign.
def errors(self): return self._errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_errors(self):\n return [result for result in self.values() if result.outcome == Result.ERROR]", "def Errors(self):\n return self._get_attribute('errors')", "def getErrorsList(self):\n return self.__errors", "def errors (self):\n return self._errors", "def errors (self):\...
[ "0.7164323", "0.70850545", "0.7084039", "0.7073318", "0.7073318", "0.7032286", "0.69743216", "0.6840167", "0.6840085", "0.6816689", "0.68060446", "0.67511207", "0.66765666", "0.6626607", "0.65734386", "0.65553755", "0.65435886", "0.6522403", "0.6402078", "0.63631475", "0.6350...
0.7075878
3
Sets the errors of this MessagingCampaign. A list of current error conditions associated with this messaging campaign.
def errors(self, errors): self._errors = errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def errors(self, errors):\n\n self._errors = errors", "def validation_errors(self, validation_errors):\n self._validation_errors = validation_errors", "def add_errors(self, errors):\n self.errors = merge_errors(self.errors, errors)", "def errors (self):\n return self._errors", "...
[ "0.7063868", "0.6316049", "0.62133807", "0.5995449", "0.5995449", "0.5993789", "0.5955976", "0.58611673", "0.5738866", "0.57359105", "0.56898946", "0.56828547", "0.56606567", "0.56536406", "0.5637317", "0.5577304", "0.5540206", "0.5453656", "0.54534554", "0.54445463", "0.5427...
0.7172415
0
Calculate matrix of number of edits to convert every subset of y to every subset of x
def distance_matrix(self, x, y, keyboard_weight=None): # create distance matrix size_x = len(x) + 1 size_y = len(y) + 1 dist_matrix = np.zeros((size_x, size_y)) for i in range(size_x): dist_matrix[i, 0] = i for j in range(size_y): dist_matrix[0, j] = j ## fill distance matrix # no keyboard weight if not keyboard_weight: for i in range(1, size_x): for j in range(1, size_y): # if letters are same if x[i-1] == y[j-1]: dist_matrix[i, j] = dist_matrix[i-1, j-1] # if letters are different else: subs = dist_matrix[i-1, j-1] + 1 delete = dist_matrix[i-1, j] + 1 insert = dist_matrix[i, j-1] + 1 dist_matrix[i, j] = min(subs, delete, insert) # manhattan keyboard weight elif keyboard_weight == "manhattan": for i in range(1, size_x): for j in range(1, size_y): # if letters are same if x[i-1] == y[j-1]: dist_matrix[i, j] = dist_matrix[i-1, j-1] # if letters are different else: dist = self.key_distance(x[i-1], y[j-1], keyboard_weight) subs_weight = dist * self.manhattan_coef subs = dist_matrix[i-1, j-1] + subs_weight delete = dist_matrix[i-1, j] + 1 insert = dist_matrix[i, j-1] + 1 dist_matrix[i, j] = min(subs, delete, insert) # euclidean keyboard weight elif keyboard_weight == "euclidean": for i in range(1, size_x): for j in range(1, size_y): # if letters are same if x[i-1] == y[j-1]: dist_matrix[i, j] = dist_matrix[i-1, j-1] # if letters are different else: dist = self.key_distance(x[i-1], y[j-1], keyboard_weight) subs_weight = dist * self.euclidean_coef subs = dist_matrix[i-1, j-1] + subs_weight delete = dist_matrix[i-1, j] + 1 insert = dist_matrix[i, j-1] + 1 dist_matrix[i, j] = min(subs, delete, insert) return dist_matrix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def topsolutions(self):\n answers = []\n for y in xrange(0, self.y):\n answer = self.retrieve(y,self.y)\n i = 0\n for x in xrange(0,y):\n answer -= self.retrieve(y,x)*answers[i]\n i += 1\n answers.append(answer)\n return...
[ "0.5772924", "0.573736", "0.57085884", "0.5653021", "0.56054556", "0.54707366", "0.5380854", "0.53584445", "0.5358038", "0.53385925", "0.5314144", "0.5285138", "0.52835494", "0.52652776", "0.52577674", "0.5253505", "0.5253083", "0.524562", "0.5237841", "0.52344394", "0.521122...
0.0
-1
A function for generating reaction likelihoods for a given genome according to the Probabilistic Annotation algorithm as
def generate_reaction_probabilities(fasta_file, template_model_file, genome_id=None): if genome_id is None: # Use fasta_file name minus extension. worker uses only for file names and logging genome_id = '.'.join(fasta_file.split('.')[0:-1]) # Create a worker for running the algorithm. worker = ProbAnnotationWorker(genome_id) try: template_model = _load_template_file(template_model_file) # Run blast using the fasta file. blast_result_file = worker.runBlast(fasta_file) # Calculate roleset probabilities. rolestring_tuples = worker.rolesetProbabilitiesMarble(blast_result_file) # Calculate per-gene role probabilities. role_probs = worker.rolesetProbabilitiesToRoleProbabilities(rolestring_tuples) # Calculate whole cell role probabilities. total_role_probs = worker.totalRoleProbabilities(role_probs) # Calculate complex probabilities. complex_probs = worker.complexProbabilities(total_role_probs, complexesToRequiredRoles=_complex_to_roles_dict(template_model)) # Calculate reaction probabilities. rxn_probs = worker.reactionProbabilities(complex_probs, rxnsToComplexes=_reactions_to_complexes_dict(template_model)) # Store in dictionary for better serialization return ReactionProbabilities([{'reaction': r[0], 'probability': r[1], 'type': r[2], 'complexes': _deserialize_cplx(r[3], worker.config['separator']), 'gpr': r[4]} for r in rxn_probs]) finally: worker.cleanup() # worker creates lots of temporary and intermediate files. Allow it to clean up
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\...
[ "0.64158255", "0.61762154", "0.6171898", "0.61255485", "0.6108559", "0.60751355", "0.5888883", "0.581115", "0.57994837", "0.5773769", "0.57539636", "0.5743196", "0.57412934", "0.57062215", "0.5637121", "0.5604803", "0.56041193", "0.55022335", "0.5498393", "0.543617", "0.53924...
0.6003137
6
Fit an ellipse to an object. It returns the rotated rectangle in which the ellipse is inscribed.
def __CalculateEllipse(self, contour): if len(contour) > 5: return cv2.fitEllipse(contour) return cv2.minAreaRect(contour)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_ellipse(x,y):\r\n \r\n def fit(x,y):\r\n x = x[:,np.newaxis]\r\n y = y[:,np.newaxis]\r\n D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))\r\n S = np.dot(D.T,D)\r\n C = np.zeros([6,6])\r\n C[0,2] = C[2,0] = 2; C[1,1] = -1\r\n E, V = np.linalg.eig...
[ "0.72764295", "0.65389484", "0.6258115", "0.6085874", "0.6078846", "0.59491014", "0.5820374", "0.5784566", "0.561725", "0.55927783", "0.5559429", "0.5536257", "0.5470644", "0.5456046", "0.54508764", "0.54230434", "0.5402299", "0.53830856", "0.5359062", "0.53571004", "0.535605...
0.5605659
9