Dataset Preview
Duplicate
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed because of a cast error
Error code:   DatasetGenerationCastError
Exception:    DatasetGenerationCastError
Message:      An error occurred while generating the dataset

All the data files must have the same columns, but at some point there are 9 new columns ({'C++', 'Java', 'PHP', 'VB', 'name', 'C#', 'Go', 'C', 'Python'}) and 4 missing columns ({'paddle', 'tensorflow', 'pytorch', 'mxnet'}).

This happened while the json dataset builder was generating data using

hf://datasets/WeixiangYan/CodeTransOcean/CodeTrans Datasets/MultilingualTrans/multilingual_train.json (at revision 8985d0851100f08e94735b2f2556a795ba8c69bb)

Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)
Traceback:    Traceback (most recent call last):
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 2011, in _prepare_split_single
                  writer.write_table(table)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 585, in write_table
                  pa_table = table_cast(pa_table, self._schema)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2302, in table_cast
                  return cast_table_to_schema(table, schema)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2256, in cast_table_to_schema
                  raise CastError(
              datasets.table.CastError: Couldn't cast
              id: int64
              name: string
              C: string
              C#: string
              C++: string
              Go: string
              Java: string
              Python: string
              VB: string
              PHP: string
              to
              {'id': Value(dtype='int64', id=None), 'tensorflow': Value(dtype='string', id=None), 'pytorch': Value(dtype='string', id=None), 'mxnet': Value(dtype='string', id=None), 'paddle': Value(dtype='string', id=None)}
              because column names don't match
              
              During handling of the above exception, another exception occurred:
              
              Traceback (most recent call last):
                File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1321, in compute_config_parquet_and_info_response
                  parquet_operations = convert_to_parquet(builder)
                File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 935, in convert_to_parquet
                  builder.download_and_prepare(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1027, in download_and_prepare
                  self._download_and_prepare(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1122, in _download_and_prepare
                  self._prepare_split(split_generator, **prepare_split_kwargs)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1882, in _prepare_split
                  for job_id, done, content in self._prepare_split_single(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 2013, in _prepare_split_single
                  raise DatasetGenerationCastError.from_cast_error(
              datasets.exceptions.DatasetGenerationCastError: An error occurred while generating the dataset
              
              All the data files must have the same columns, but at some point there are 9 new columns ({'C++', 'Java', 'PHP', 'VB', 'name', 'C#', 'Go', 'C', 'Python'}) and 4 missing columns ({'paddle', 'tensorflow', 'pytorch', 'mxnet'}).
              
              This happened while the json dataset builder was generating data using
              
              hf://datasets/WeixiangYan/CodeTransOcean/CodeTrans Datasets/MultilingualTrans/multilingual_train.json (at revision 8985d0851100f08e94735b2f2556a795ba8c69bb)
              
              Please either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)

Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.

id
int64
tensorflow
string
paddle
string
mxnet
null
pytorch
null
1
x = tf.range(12) tf.size(x) X = tf.reshape(x, (3, 4)) tf.zeros((2, 3, 4)) tf.ones((2, 3, 4)) tf.random.normal(shape=[3, 4]) tf.constant([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = tf.constant([1.0, 2, 4, 8]) y = tf.constant([2.0, 2, 2, 2]) x + y, x - y, x * y, x / y, x ** y tf.exp(x) X = tf.reshape(tf.range(12, dty...
x = paddle.arange(12) x.numel() X = paddle.reshape(x, (3, 4)) paddle.zeros((2, 3, 4)) paddle.ones((2, 3, 4)) paddle.randn((3, 4),'float32') paddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = paddle.to_tensor([1.0, 2, 4, 8]) y = paddle.to_tensor([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x**y paddle.exp(x) ...
null
null
2
import tensorflow as tf X, y = tf.constant(inputs.values), tf.constant(outputs.values)
import warnings warnings.filterwarnings(action='ignore') import paddle X, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)
null
null
3
import tensorflow as tf x = tf.constant(3.0) y = tf.constant(2.0) print(x + y, x * y, x / y, x**y) x = tf.range(4) A = tf.reshape(tf.range(20), (5, 4)) tf.transpose(A) B = tf.constant([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == tf.transpose(B) X = tf.reshape(tf.range(24), (2, 3, 4)) A = tf.reshape(tf.range(20, dtype=tf.flo...
import warnings warnings.filterwarnings(action='ignore') import paddle x = paddle.to_tensor([3.0]) y = paddle.to_tensor([2.0]) x + y, x * y, x / y, x**y x = paddle.arange(4) A = paddle.reshape(paddle.arange(20), (5, 4)) paddle.transpose(A, perm=[1, 0]) B = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == paddle...
null
null
4
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import tensorflow as d2l def f(x): return 3 * x ** 2 - 4 * x def numerical_lim(f, x, h): return (f(x + h) - f(x)) / h h = 0.1 for i in range(5): print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}') h...
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import paddle as d2l def f(x): return 3 * x ** 2 - 4 * x def numerical_lim(f, x, h): return (f(x + h) - f(x)) / h h = 0.1 for i in range(5): print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}') h *= ...
null
null
5
import tensorflow as tf x = tf.range(4, dtype=tf.float32) x = tf.Variable(x) with tf.GradientTape() as t: y = 2 * tf.tensordot(x, x, axes=1) x_grad = t.gradient(y, x) x_grad x_grad == 4 * x with tf.GradientTape() as t: y = tf.reduce_sum(x) t.gradient(y, x) with tf.GradientTape() as t: y = x * x t.gradient(y...
import warnings warnings.filterwarnings(action='ignore') import paddle x = paddle.arange(4, dtype='float32') x = paddle.to_tensor(x, stop_gradient=False) y = 2 * paddle.dot(x, x) y.backward() x.grad x.grad == 4 * x x.clear_gradient() y = paddle.sum(x) y.backward() x.grad x.clear_gradient() y = x * x paddle.sum(y).backw...
null
null
6
%matplotlib inline import numpy as np import tensorflow as tf import tensorflow_probability as tfp from d2l import tensorflow as d2l fair_probs = tf.ones(6) / 6 tfp.distributions.Multinomial(1, fair_probs).sample() tfp.distributions.Multinomial(10, fair_probs).sample() counts = tfp.distributions.Multinomial(1000, fair_...
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import numpy as np import paddle fair_probs = [1.0 / 6] * 6 paddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample() counts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_prob...
null
null
7
counts = tfp.distributions.Multinomial(10, fair_probs).sample(500) cum_counts = tf.cumsum(counts, axis=0) estimates = cum_counts / tf.reduce_sum(cum_counts, axis=1, keepdims=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i].numpy(), label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(...
counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1)) cum_counts = counts.cumsum(axis=0) cum_counts = cum_counts.squeeze(axis=1) estimates = cum_counts / cum_counts.sum(axis=1, keepdim=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i], ...
null
null
8
%matplotlib inline import math import time import numpy as np import tensorflow as tf from d2l import tensorflow as d2l n = 10000 a = tf.ones(n) b = tf.ones(n) c = tf.Variable(tf.zeros(n)) timer = Timer() for i in range(n): c[i].assign(a[i] + b[i]) x = np.arange(-7, 7, 0.01) params = [(0, 1), (0, 2), (3, 1)] d2l.pl...
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import time import numpy as np import paddle n = 10000 a = paddle.ones([n]) b = paddle.ones([n]) c = paddle.zeros([n]) timer = Timer() for i in range(n): c[i] = a[i] + b[i] x = np.arange(-7, 7, 0.01) param...
null
null
9
%matplotlib inline import random import tensorflow as tf from d2l import tensorflow as d2l def synthetic_data(w, b, num_examples): X = tf.zeros((num_examples, w.shape[0])) X += tf.random.normal(shape=X.shape) y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b y += tf.random.normal(shape=y.shape, stddev=0.01) ...
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import paddle def synthetic_data(w, b, num_examples): X = paddle.normal(0, 1, (num_examples, len(w))) y = paddle.matmul(X, w) + b y += paddle.normal(0, 0.01, y.shape) return X, y.reshape((-1,...
null
null
10
import numpy as np import tensorflow as tf from d2l import tensorflow as d2l true_w = tf.constant([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = tf.data.Dataset.from_tensor_slices(data_arrays) if is_train: ...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import numpy as np import paddle true_w = paddle.to_tensor([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = paddle.io.TensorDataset(dat...
null
null
11
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l d2l.use_svg_display() mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data() len(mnist_train[0]), len(mnist_test[0]) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * ...
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import sys import paddle from paddle.vision import transforms d2l.use_svg_display() trans = transforms.ToTensor() mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans) mnist_test = paddle.vision...
null
null
12
import tensorflow as tf from IPython import display from d2l import tensorflow as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01)) b = tf.Variable(tf.zeros(num_ou...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from IPython import display batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs)) b = paddle.zeros(shape=...
null
null
13
import tensorflow as tf from d2l import tensorflow as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = tf.keras.models.Sequential() net.add(tf.keras.layers.Flatten(input_shape=(28, 28))) weight_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01) net.add(tf.k...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10)) def init_weights(m): if type(m) == nn.Linear: nn.initiali...
null
null
14
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l x = tf.Variable(tf.range(-8.0, 8.0, 0.1), dtype=tf.float32) y = tf.nn.relu(x) d2l.plot(x.numpy(), y.numpy(), 'x', 'relu(x)', figsize=(5, 2.5)) with tf.GradientTape() as t: y = tf.nn.relu(x) d2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x'...
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle x = paddle.arange(-8.0, 8.0, 0.1, dtype='float32') x.stop_gradient = False y = paddle.nn.functional.relu(x) d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5)) y.backward(paddl...
null
null
15
import tensorflow as tf from d2l import tensorflow as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = tf.Variable(tf.random.normal(shape=(num_inputs, num_hiddens), mean=0, stddev=0.01)) b1 = tf.Variable(tf.zeros(num_hiddens)) ...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = paddle.randn([num_inputs, num_hiddens]) * 0.01 W1.stop_gradient = ...
null
null
16
import tensorflow as tf from d2l import tensorflow as d2l net = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(10)]) batch_size, lr, num_epochs = 256, 0.1, 10 loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) trainer = tf....
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10)) for layer in net: if type(layer) == nn.Linear: ...
null
null
17
import math import numpy as np import tensorflow as tf from d2l import tensorflow as d2l true_w, features, poly_features, labels = [tf.constant(x, dtype=tf.float32) for x in [true_w, features, poly_features, labels]] features[:2], poly_features[:2, :], labels[:2] def evaluate_loss(net, data_iter, loss): metric = d2...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import numpy as np import paddle from paddle import nn true_w, features, poly_features, labels = [paddle.to_tensor(x, dtype= paddle.float32) for x in [true_w, features, poly_features, labels]] features[:2], poly_features[:2,...
null
null
18
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = tf.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synth...
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter ...
null
null
19
import tensorflow as tf from d2l import tensorflow as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return tf.zeros_like(X) if dropout == 0: return X mask = tf.random.uniform(shape=tf.shape(X), minval=0, maxval=1) < 1 - dropout return tf.cast(mask, dtyp...
import warnings warnings.filterwarnings(action='ignore') import random import paddle from paddle import nn warnings.filterwarnings("ignore", category=DeprecationWarning) from d2l import paddle as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return paddle.zeros_like(X) ...
null
null
20
trainer = tf.keras.optimizers.SGD(learning_rate=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l x = tf.Variable(tf.range(-8.0, 8.0, 0.1)) with tf.GradientTape() as t: y = tf.nn.sigmoid(x) d2l.plot(x.numpy(), [y.num...
trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters()) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle x = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='flo...
null
null
21
%matplotlib inline import numpy as np import pandas as pd import tensorflow as tf from d2l import tensorflow as d2l n_train = train_data.shape[0] train_features = tf.constant(all_features[:n_train].values, dtype=tf.float32) test_features = tf.constant(all_features[n_train:].values, dtype=tf.float32) train_labels = tf.c...
%matplotlib inline import warnings import numpy as np import pandas as pd warnings.filterwarnings(action='ignore') import paddle from paddle import nn warnings.filterwarnings("ignore", category=DeprecationWarning) from d2l import paddle as d2l n_train = train_data.shape[0] train_features = paddle.to_tensor(all_features...
null
null
22
import tensorflow as tf net = tf.keras.models.Sequential([ tf.keras.layers.Dense(256, activation=tf.nn.relu), tf.keras.layers.Dense(10)) X = tf.random.uniform((2, 20)) net(X) class MLP(tf.keras.Model): def __init__(self): super().__init__() self.hidden = tf.keras.layers.Dense(units=256, acti...
import warnings warnings.filterwarnings(action='ignore') import paddle from paddle import nn from paddle.nn import functional as F net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10)) X = paddle.rand([2, 20]) net(X) class MLP(nn.Layer): def __init__(self): super().__init__() self.h...
null
null
23
import tensorflow as tf net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu), tf.keras.layers.Dense(1), ]) X = tf.random.uniform((2, 4)) net(X) net.get_weights()[1] def block1(name): return tf.keras.Sequential([tf.keras.layers.Flatten(), tf.keras....
import warnings warnings.filterwarnings(action='ignore') import paddle from paddle import nn net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1)) X = paddle.rand([2, 4]) net(X) net.state_dict()['2.bias'] def block1(): return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU()) def block...
null
null
24
import tensorflow as tf class CenteredLayer(tf.keras.Model): def __init__(self): super().__init__() def call(self, inputs): return inputs - tf.reduce_mean(inputs) Y = net(tf.random.uniform((4, 8))) tf.reduce_mean(Y) class MyDense(tf.keras.Model): def __init__(self, units): super().__...
import warnings warnings.filterwarnings(action='ignore') import paddle import paddle.nn.functional as F from paddle import nn class CenteredLayer(nn.Layer): def __init__(self): super().__init__() def forward(self, X): return X - X.mean() Y = net(paddle.rand([4, 8])) Y.mean() class MyLinear(nn.La...
null
null
25
import numpy as np import tensorflow as tf x = tf.range(4) np.save('x-file.npy', x) x2 = np.load('x-file.npy', allow_pickle=True) y = tf.zeros(4) np.save('xy-files.npy', [x, y]) x2, y2 = np.load('xy-files.npy', allow_pickle=True) mydict = {'x': x, 'y': y} np.save('mydict.npy', mydict) mydict2 = np.load('mydict.npy', al...
import warnings warnings.filterwarnings(action='ignore') import paddle from paddle import nn from paddle.nn import functional as F x = paddle.arange(4) paddle.save(x, 'x-file') x2 = paddle.load('x-file') y = paddle.zeros([4]) paddle.save([x,y], 'x-file') x2, y2 = paddle.load('x-file') mydict = {'x': x, 'y': y} paddle.s...
null
null
26
import tensorflow as tf tf.device('/CPU:0'), tf.device('/GPU:0'), tf.device('/GPU:1') len(tf.config.experimental.list_physical_devices('GPU')) def try_gpu(i=0): if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1: return tf.device(f'/GPU:{i}') return tf.device('/CPU:0') def try_all_gpus...
import paddle from paddle import nn paddle.device.set_device("cpu"), paddle.CUDAPlace(0), paddle.CUDAPlace(1) paddle.device.cuda.device_count() if paddle.device.cuda.device_count() >= i + 1: return paddle.CUDAPlace(i) return paddle.CPUPlace() def try_all_gpus(): devices = [paddle.CUDAPlace(i) for i ...
null
null
27
import tensorflow as tf from d2l import tensorflow as d2l def corr2d(X, K): h, w = K.shape Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j].assign(tf.reduce_sum( X[i: i + h, j: j + w] * K))...
import warningsfrom d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn def corr2d(X, K): h, w = K.shape Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j...
null
null
28
import tensorflow as tf def comp_conv2d(conv2d, X): X = tf.reshape(X, (1, ) + X.shape + (1, )) Y = conv2d(X) return tf.reshape(Y, Y.shape[1:3]) conv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same') X = tf.random.uniform(shape=(8, 8)) comp_conv2d(conv2d, X).shape conv2d = tf.keras.layers.Conv2D(...
import warnings warnings.filterwarnings(action='ignore') import paddle from paddle import nn def comp_conv2d(conv2d, X): X = paddle.reshape(X, [1, 1] + X.shape) Y = conv2d(X) return Y.reshape(Y.shape[2:]) conv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1) X = paddle.rand((8, 8)) co...
null
null
29
import tensorflow as tf from d2l import tensorflow as d2l def corr2d_multi_in(X, K): return tf.reduce_sum([d2l.corr2d(x, k) for x, k in zip(X, K)], axis=0) X = tf.constant([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = tf.constant([[[0.0, 1.0], [2.0, ...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle def corr2d_multi_in(X, K): return sum(d2l.corr2d(x, k) for x, k in zip(X, K)) X = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = paddle.to_t...
null
null
30
import tensorflow as tf def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = tf.Variable(tf.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w +1))) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j].assign(tf.reduce_max(X[i: i + p_h, j...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): ...
null
null
31
import tensorflow as tf from d2l import tensorflow as d2l def net(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=6, kernel_size=5, activation='sigmoid', padding='same'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Conv2D(filters=16, kernel_size=5...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn, optimizer net = nn.Sequential( nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(), nn.AvgPool2D(kernel_size=2, stride=2), nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(), nn.AvgPool2D(...
null
null
32
import tensorflow as tf from d2l import tensorflow as d2l def net(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4, activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), tf.keras.layers.Conv2D(filters=256, kernel_size=5, pad...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn net = nn.Sequential( nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(), nn.MaxPool2D(kernel_size=3, stride=2), nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(), nn.M...
null
null
33
import tensorflow as tf from d2l import tensorflow as d2l def vgg_block(num_convs, num_channels): blk = tf.keras.models.Sequential() for _ in range(num_convs): blk.add(tf.keras.layers.Conv2D(num_channels,kernel_size=3, padding='same',activation='relu')) blk.add(tf.keras.layers.MaxPool2D(pool_size=2,...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def vgg_block(num_convs, in_channels, out_channels): layers = [] for _ in range(num_convs): layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1)) layers.ap...
null
null
34
import tensorflow as tf from d2l import tensorflow as d2l def nin_block(num_channels, kernel_size, strides, padding): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(num_channels, kernel_size, strides=strides, padding=padding, activation='relu'), tf.keras.layers.Conv2D(num_channels, kerne...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def nin_block(in_channels, out_channels, kernel_size, strides, padding): return nn.Sequential( nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding), nn.ReLU(), ...
null
null
35
import tensorflow as tf from d2l import tensorflow as d2l class Inception(tf.keras.Model): def __init__(self, c1, c2, c3, c4): super().__init__() self.p1_1 = tf.keras.layers.Conv2D(c1, 1, activation='relu') self.p2_1 = tf.keras.layers.Conv2D(c2[0], 1, activation='relu') self.p2_2 = t...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn import paddle.nn.functional as F class Inception(nn.Layer): def __init__(self, in_channels, c1, c2, c3, c4, **kwargs): super(Inception, self).__init__(**kwargs) self.p1_1 = nn.Conv2D(...
null
null
36
import tensorflow as tf from d2l import tensorflow as d2l def batch_norm(X, gamma, beta, moving_mean, moving_var, eps): inv = tf.cast(tf.math.rsqrt(moving_var + eps), X.dtype) inv *= gamma Y = X * inv + (beta - moving_mean * inv) return Y class BatchNorm(tf.keras.layers.Layer): def __init__(self, **...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True): if not is_training: X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5 else: assert le...
null
null
37
import tensorflow as tf from d2l import tensorflow as d2l class Residual(tf.keras.Model): def __init__(self, num_channels, use_1x1conv=False, strides=1): super().__init__() self.conv1 = tf.keras.layers.Conv2D( num_channels, padding='same', kernel_size=3, strides=strides) self.con...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn from paddle.nn import functional as F class Residual(nn.Layer): def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1): super(Residual, self).__init__(...
null
null
38
import tensorflow as tf from d2l import tensorflow as d2l class ConvBlock(tf.keras.layers.Layer): def __init__(self, num_channels): super(ConvBlock, self).__init__() self.bn = tf.keras.layers.BatchNormalization() self.relu = tf.keras.layers.ReLU() self.conv = tf.keras.layers.Conv2D(f...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn as nn def conv_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2D(input_channels), nn.ReLU(), nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1)) class...
null
null
39
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l T = 1000 time = tf.range(1, T + 1, dtype=tf.float32) x = tf.sin(0.01 * time) + tf.random.normal([T], 0, 0.2) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = tf.Variable(tf.zeros((T - tau, tau))) for i in ran...
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn T = 1000 time = paddle.arange(1, T + 1, dtype=paddle.float32) x = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) ...
null
null
40
import collections import re from d2l import tensorflow as d2l def tokenize(lines, token='word'): if token == 'word': return [line.split() for line in lines] elif token == 'char': return [list(line) for line in lines] else: print('Error: Unknown word element type:' + token) tokens = ...
import collections import re from d2l import paddle as d2l def tokenize(lines, token='word'): if token == 'word': return [line.split() for line in lines] elif token == 'char': return [list(line) for line in lines] else: print('Error: Unknown word element type:' + token) tokens = toke...
null
null
41
import random import tensorflow as tf from d2l import tensorflow as d2l tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import paddle tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): c...
null
null
42
import tensorflow as tf from d2l import tensorflow as d2l X, W_xh = tf.random.normal((3, 1), 0, 1), tf.random.normal((1, 4), 0, 1) H, W_hh = tf.random.normal((3, 4), 0, 1), tf.random.normal((4, 4), 0, 1) tf.matmul(X, W_xh) + tf.matmul(H, W_hh) tf.matmul(tf.concat((X, H), 1), tf.concat((W_xh, W_hh), 0))
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle X, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4)) H, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4)) paddle.matmul(X, W_xh) + paddle.matmul(H, W_hh) paddle.matmul(paddle.concat((X, H), 1), padd...
null
null
43
%matplotlib inline import math import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) train_random_iter, vocab_random_iter = d2l.load_data_time_machine(batch_size, num_steps, use_random_iter=True) tf.one_hot(tf.const...
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import paddle from paddle import nn from paddle.nn import functional as F batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) F.one_hot(paddle.to_tensor([0, 2])...
null
null
44
import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_cell = tf.keras.layers.SimpleRNNCell(num_hiddens, kernel_initializer='glorot_uniform') rnn_layer = tf.keras.layers.RNN(rnn_cell, time_major...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn from paddle.nn import functional as F batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_layer = nn.SimpleRNN(len(vocab), num_hidden...
null
null
45
import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return tf.random.normal(shape=shape,stddev=0.01...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn.functional as F from paddle import nn batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens): num_inputs = num_outputs ...
null
null
46
import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return tf.Variable(tf.random.normal(shape=...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle import paddle.nn.functional as Function from paddle import nn batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens): num_inputs = ...
null
null
47
import os import tensorflow as tf from d2l import tensorflow as d2l def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = tf.constant([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = tf.reduce_sum( ...
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import os import paddle def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['<pad>']) for l in ...
null
null
48
null
x = paddle.arange(12) x.numel() X = paddle.reshape(x, (3, 4)) paddle.zeros((2, 3, 4)) paddle.ones((2, 3, 4)) paddle.randn((3, 4),'float32') paddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = paddle.to_tensor([1.0, 2, 4, 8]) y = paddle.to_tensor([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x**y paddle.exp(x) ...
null
null
49
null
import warnings warnings.filterwarnings(action='ignore') import paddle X, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values)
null
null
50
null
import warnings warnings.filterwarnings(action='ignore') import paddle x = paddle.to_tensor([3.0]) y = paddle.to_tensor([2.0]) x + y, x * y, x / y, x**y x = paddle.arange(4) A = paddle.reshape(paddle.arange(20), (5, 4)) paddle.transpose(A, perm=[1, 0]) B = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == paddle...
null
null
51
null
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import paddle as d2l def f(x): return 3 * x ** 2 - 4 * x def numerical_lim(f, x, h): return (f(x + h) - f(x)) / h h = 0.1 for i in range(5): print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}') h *= ...
null
null
52
null
import warnings warnings.filterwarnings(action='ignore') import paddle x = paddle.arange(4, dtype='float32') x = paddle.to_tensor(x, stop_gradient=False) y = 2 * paddle.dot(x, x) x.clear_gradient() y = paddle.sum(x) y.backward() x.grad x.clear_gradient() y = x * x paddle.sum(y).backward() x.grad x.clear_gradient() y = ...
null
null
53
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import numpy as np import paddle fair_probs = [1.0 / 6] * 6 paddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample() counts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_prob...
null
null
54
null
counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1)) cum_counts = counts.cumsum(axis=0) cum_counts = cum_counts.squeeze(axis=1) estimates = cum_counts / cum_counts.sum(axis=1, keepdim=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i], ...
null
null
55
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import time import numpy as np import paddle n = 10000 a = paddle.ones([n]) b = paddle.ones([n]) c = paddle.zeros([n]) timer = Timer() for i in range(n): c[i] = a[i] + b[i] x = np.arange(-7, 7, 0.01) param...
null
null
56
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import random import paddle def synthetic_data(w, b, num_examples): X = paddle.normal(0, 1, (num_examples, len(w))) y = paddle.matmul(X, w) + b y += paddle.normal(0, 0.01, y.shape) return X, y.reshape((-1,...
null
null
57
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import numpy as np import paddle true_w = paddle.to_tensor([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = paddle.io.TensorDataset(dat...
null
null
58
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import sys import paddle from paddle.vision import transforms d2l.use_svg_display() trans = transforms.ToTensor() mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans) mnist_test = paddle.vision...
null
null
59
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from IPython import display batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs)) b = paddle.zeros(shape=...
null
null
60
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10)) def init_weights(m): if type(m) == nn.Linear: nn.initiali...
null
null
61
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle x = paddle.arange(-8.0, 8.0, 0.1, dtype='float32') x.stop_gradient = False y = paddle.nn.functional.relu(x) d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5)) y.backward(paddl...
null
null
62
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = paddle.randn([num_inputs, num_hiddens]) * 0.01 W1.stop_gradient = ...
null
null
63
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn net = nn.Sequential(nn.Flatten(), nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 10)) for layer in net: if type(layer) == nn.Linear: ...
null
null
64
null
import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import math import numpy as np import paddle from paddle import nn true_w, features, poly_features, labels = [paddle.to_tensor(x, dtype= paddle.float32) for x in [true_w, features, poly_features, labels]] features[:2], poly_features[:2,...
null
null
65
null
%matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle from paddle import nn n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter ...
null
null
66
null
import warnings warnings.filterwarnings(action='ignore') import random import paddle from paddle import nn warnings.filterwarnings("ignore", category=DeprecationWarning) from d2l import paddle as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return paddle.zeros_like(X) ...
null
null
67
null
trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters()) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline import warnings from d2l import paddle as d2l warnings.filterwarnings("ignore") import paddle x = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='flo...
null
null
68
null
%matplotlib inline import warnings import numpy as np import pandas as pd warnings.filterwarnings(action='ignore') import paddle from paddle import nn warnings.filterwarnings("ignore", category=DeprecationWarning) from d2l import paddle as d2l n_train = train_data.shape[0] train_features = paddle.to_tensor(all_features...
null
null
69
null
import warnings warnings.filterwarnings(action='ignore') import paddle from paddle import nn from paddle.nn import functional as F net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10)) X = paddle.rand([2, 20]) net(X) class MLP(nn.Layer): def __init__(self): super().__init__() self.h...
null
null
70
null
import warnings warnings.filterwarnings(action='ignore') import paddle from paddle import nn net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1)) X = paddle.rand([2, 4]) net(X) net.state_dict()['2.bias'] def block1(): return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU()) def block...
null
null
End of preview.
README.md exists but content is empty.
Downloads last month
67

Models trained or fine-tuned on WeixiangYan/CodeTransOcean